repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
wsmith323/django | django/db/models/fields/files.py | 7 | 18911 | import datetime
import os
from django import forms
from django.core import checks
from django.core.files.base import File
from django.core.files.images import ImageFile
from django.core.files.storage import default_storage
from django.db.models import signals
from django.db.models.fields import Field
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content, max_length=self.field.max_length)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
return self
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to them. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length") == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
# If upload_to is a callable, make sure that the path it returns is
# passed through get_valid_name() of the underlying storage.
if callable(self.upload_to):
directory_name, filename = os.path.split(self.upload_to(instance, filename))
filename = self.storage.get_valid_name(filename)
return os.path.normpath(os.path.join(directory_name, filename))
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install Pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| bsd-3-clause |
rcarrillocruz/ansible | test/units/test_constants.py | 187 | 3203 | # -*- coding: utf-8 -*-
# (c) 2017 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import pwd
import os
import pytest
from ansible import constants
from ansible.module_utils.six import StringIO
from ansible.module_utils.six.moves import configparser
from ansible.module_utils._text import to_text
@pytest.fixture
def cfgparser():
CFGDATA = StringIO("""
[defaults]
defaults_one = 'data_defaults_one'
[level1]
level1_one = 'data_level1_one'
""")
p = configparser.ConfigParser()
p.readfp(CFGDATA)
return p
@pytest.fixture
def user():
user = {}
user['uid'] = os.geteuid()
pwd_entry = pwd.getpwuid(user['uid'])
user['username'] = pwd_entry.pw_name
user['home'] = pwd_entry.pw_dir
return user
@pytest.fixture
def cfg_file():
data = '/ansible/test/cfg/path'
old_cfg_file = constants.CONFIG_FILE
constants.CONFIG_FILE = os.path.join(data, 'ansible.cfg')
yield data
constants.CONFIG_FILE = old_cfg_file
@pytest.fixture
def null_cfg_file():
old_cfg_file = constants.CONFIG_FILE
del constants.CONFIG_FILE
yield
constants.CONFIG_FILE = old_cfg_file
@pytest.fixture
def cwd():
data = '/ansible/test/cwd/'
old_cwd = os.getcwd
os.getcwd = lambda: data
old_cwdu = None
if hasattr(os, 'getcwdu'):
old_cwdu = os.getcwdu
os.getcwdu = lambda: to_text(data)
yield data
os.getcwd = old_cwd
if hasattr(os, 'getcwdu'):
os.getcwdu = old_cwdu
class TestMkBoolean:
def test_bools(self):
assert constants.mk_boolean(True) is True
assert constants.mk_boolean(False) is False
def test_none(self):
assert constants.mk_boolean(None) is False
def test_numbers(self):
assert constants.mk_boolean(1) is True
assert constants.mk_boolean(0) is False
assert constants.mk_boolean(0.0) is False
# Current mk_boolean doesn't consider these to be true values
# def test_other_numbers(self):
# assert constants.mk_boolean(2) is True
# assert constants.mk_boolean(-1) is True
# assert constants.mk_boolean(0.1) is True
def test_strings(self):
assert constants.mk_boolean("true") is True
assert constants.mk_boolean("TRUE") is True
assert constants.mk_boolean("t") is True
assert constants.mk_boolean("yes") is True
assert constants.mk_boolean("y") is True
assert constants.mk_boolean("on") is True
| gpl-3.0 |
joone/chromium-crosswalk | chrome/common/extensions/docs/server2/features_bundle.py | 78 | 14840 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from copy import copy
from branch_utility import BranchUtility
from compiled_file_system import SingleFile, Unicode
from docs_server_utils import StringIdentity
from extensions_paths import API_PATHS, JSON_TEMPLATES
from file_system import FileNotFoundError
from future import All, Future
from path_util import Join
from platform_util import GetExtensionTypes, PlatformToExtensionType
from third_party.json_schema_compiler.json_parse import Parse
_API_FEATURES = '_api_features.json'
_MANIFEST_FEATURES = '_manifest_features.json'
_PERMISSION_FEATURES = '_permission_features.json'
def HasParent(feature_name, feature, all_feature_names):
# A feature has a parent if it has a . in its name, its parent exists,
# and it does not explicitly specify that it has no parent.
return ('.' in feature_name and
feature_name.rsplit('.', 1)[0] in all_feature_names and
not feature.get('noparent'))
def GetParentName(feature_name, feature, all_feature_names):
'''Returns the name of the parent feature, or None if it does not have a
parent.
'''
if not HasParent(feature_name, feature, all_feature_names):
return None
return feature_name.rsplit('.', 1)[0]
def _CreateFeaturesFromJSONFutures(json_futures):
'''Returns a dict of features. The value of each feature is a list with
all of its possible values.
'''
def ignore_feature(name, value):
'''Returns true if this feature should be ignored. Features are ignored if
they are only available to whitelisted apps or component extensions/apps, as
in these cases the APIs are not available to public developers.
Private APIs are also unavailable to public developers, but logic elsewhere
makes sure they are not listed. So they shouldn't be ignored via this
mechanism.
'''
if name.endswith('Private'):
return False
return value.get('location') == 'component' or 'whitelist' in value
features = {}
for json_future in json_futures:
try:
features_json = Parse(json_future.Get())
except FileNotFoundError:
# Not all file system configurations have the extra files.
continue
for name, rawvalue in features_json.iteritems():
if name not in features:
features[name] = []
for value in (rawvalue if isinstance(rawvalue, list) else (rawvalue,)):
if not ignore_feature(name, value):
features[name].append(value)
return features
def _CopyParentFeatureValues(child, parent):
'''Takes data from feature dict |parent| and copies/merges it
into feature dict |child|. Two passes are run over the features,
and on the first pass features are not resolved across caches,
so a None value for |parent| may be passed in.
'''
if parent is None:
return child
merged = copy(parent)
merged.pop('noparent', None)
merged.pop('name', None)
merged.update(child)
return merged
def _ResolveFeature(feature_name,
feature_values,
extra_feature_values,
platform,
features_type,
features_map):
'''Filters and combines the possible values for a feature into one dict.
It uses |features_map| to resolve dependencies for each value and inherit
unspecified platform and channel data. |feature_values| is then filtered
by platform and all values with the most stable platform are merged into one
dict. All values in |extra_feature_values| get merged into this dict.
Returns |resolve_successful| and |feature|. |resolve_successful| is False
if the feature's dependencies have not been merged yet themselves, meaning
that this feature can not be reliably resolved yet. |feature| is the
resulting feature dict, or None if the feature does not exist on the
platform specified.
'''
feature = None
most_stable_channel = None
for value in feature_values:
# If 'extension_types' or 'channel' is unspecified, these values should
# be inherited from dependencies. If they are specified, these values
# should override anything specified by dependencies.
inherit_valid_platform = 'extension_types' not in value
if inherit_valid_platform:
valid_platform = None
else:
valid_platform = (value['extension_types'] == 'all' or
platform in value['extension_types'])
inherit_channel = 'channel' not in value
channel = value.get('channel')
dependencies = value.get('dependencies', [])
parent = GetParentName(
feature_name, value, features_map[features_type]['all_names'])
if parent is not None:
# The parent data needs to be resolved so the child can inherit it.
if parent in features_map[features_type].get('unresolved', ()):
return False, None
value = _CopyParentFeatureValues(
value, features_map[features_type]['resolved'].get(parent))
# Add the parent as a dependency to ensure proper platform filtering.
dependencies.append(features_type + ':' + parent)
for dependency in dependencies:
dep_type, dep_name = dependency.split(':')
if (dep_type not in features_map or
dep_name in features_map[dep_type].get('unresolved', ())):
# The dependency itself has not been merged yet or the features map
# does not have the needed data. Fail to resolve.
return False, None
dep = features_map[dep_type]['resolved'].get(dep_name)
if inherit_valid_platform and (valid_platform is None or valid_platform):
# If dep is None, the dependency does not exist because it has been
# filtered out by platform. This feature value does not explicitly
# specify platform data, so filter this feature value out.
# Only run this check if valid_platform is True or None so that it
# can't be reset once it is False.
valid_platform = dep is not None
if inherit_channel and dep and 'channel' in dep:
if channel is None or BranchUtility.NewestChannel(
(dep['channel'], channel)) != channel:
# Inherit the least stable channel from the dependencies.
channel = dep['channel']
# Default to stable on all platforms.
if valid_platform is None:
valid_platform = True
if valid_platform and channel is None:
channel = 'stable'
if valid_platform:
# The feature value is valid. Merge it into the feature dict.
if feature is None or BranchUtility.NewestChannel(
(most_stable_channel, channel)) != channel:
# If this is the first feature value to be merged, copy the dict.
# If this feature value has a more stable channel than the most stable
# channel so far, replace the old dict so that it only merges values
# from the most stable channel.
feature = copy(value)
most_stable_channel = channel
elif channel == most_stable_channel:
feature.update(value)
if feature is None:
# Nothing was left after filtering the values, but all dependency resolves
# were successful. This feature does not exist on |platform|.
return True, None
# Merge in any extra values.
for value in extra_feature_values:
feature.update(value)
# Cleanup, fill in missing fields.
if 'name' not in feature:
feature['name'] = feature_name
feature['channel'] = most_stable_channel
return True, feature
class _FeaturesCache(object):
def __init__(self,
file_system,
compiled_fs_factory,
json_paths,
extra_paths,
platform,
features_type):
self._cache = compiled_fs_factory.Create(
file_system, self._CreateCache, type(self), category=platform)
self._text_cache = compiled_fs_factory.ForUnicode(file_system)
self._json_paths = json_paths
self._extra_paths = extra_paths
self._platform = platform
self._features_type = features_type
@Unicode
def _CreateCache(self, _, features_json):
json_path_futures = [self._text_cache.GetFromFile(path)
for path in self._json_paths[1:]]
extra_path_futures = [self._text_cache.GetFromFile(path)
for path in self._extra_paths]
features_values = _CreateFeaturesFromJSONFutures(
[Future(value=features_json)] + json_path_futures)
extra_features_values = _CreateFeaturesFromJSONFutures(extra_path_futures)
features = {
'resolved': {},
'unresolved': copy(features_values),
'extra': extra_features_values,
'all_names': set(features_values.keys())
}
# Merges as many feature values as possible without resolving dependencies
# from other FeaturesCaches. Pass in a features_map with just this
# FeatureCache's features_type. Makes repeated passes until no new
# resolves are successful.
new_resolves = True
while new_resolves:
new_resolves = False
for feature_name, feature_values in features_values.iteritems():
if feature_name not in features['unresolved']:
continue
resolve_successful, feature = _ResolveFeature(
feature_name,
feature_values,
extra_features_values.get(feature_name, ()),
self._platform,
self._features_type,
{self._features_type: features})
if resolve_successful:
del features['unresolved'][feature_name]
new_resolves = True
if feature is not None:
features['resolved'][feature_name] = feature
return features
def GetFeatures(self):
if not self._json_paths:
return Future(value={})
return self._cache.GetFromFile(self._json_paths[0])
class FeaturesBundle(object):
'''Provides access to properties of API, Manifest, and Permission features.
'''
def __init__(self,
file_system,
compiled_fs_factory,
object_store_creator,
platform):
def create_features_cache(features_type, feature_file, *extra_paths):
return _FeaturesCache(
file_system,
compiled_fs_factory,
[Join(path, feature_file) for path in API_PATHS],
extra_paths,
self._platform,
features_type)
if platform not in GetExtensionTypes():
self._platform = PlatformToExtensionType(platform)
else:
self._platform = platform
self._caches = {
'api': create_features_cache('api', _API_FEATURES),
'manifest': create_features_cache(
'manifest',
_MANIFEST_FEATURES,
Join(JSON_TEMPLATES, 'manifest.json')),
'permission': create_features_cache(
'permission',
_PERMISSION_FEATURES,
Join(JSON_TEMPLATES, 'permissions.json'))
}
# Namespace the object store by the file system ID because this class is
# used by the availability finder cross-channel.
self._object_store = object_store_creator.Create(
_FeaturesCache,
category=StringIdentity(file_system.GetIdentity(), self._platform))
def GetPermissionFeatures(self):
return self.GetFeatures('permission', ('permission',))
def GetManifestFeatures(self):
return self.GetFeatures('manifest', ('manifest',))
def GetAPIFeatures(self):
return self.GetFeatures('api', ('api', 'manifest', 'permission'))
def GetFeatures(self, features_type, dependencies):
'''Resolves all dependencies in the categories specified by |dependencies|.
Returns the features in the |features_type| category.
'''
def next_(features):
if features is not None:
return Future(value=features)
dependency_futures = []
cache_types = []
for cache_type in dependencies:
cache_types.append(cache_type)
dependency_futures.append(self._object_store.Get(cache_type))
def load_features(dependency_features_list):
futures = []
for dependency_features, cache_type in zip(dependency_features_list,
cache_types):
if dependency_features is not None:
# Get cached dependencies if possible. If it has been cached, all
# of its features have been resolved, so the other fields are
# unnecessary.
futures.append(Future(value={'resolved': dependency_features}))
else:
futures.append(self._caches[cache_type].GetFeatures())
def resolve(features):
features_map = {}
for cache_type, feature in zip(cache_types, features):
# Copy down to features_map level because the 'resolved' and
# 'unresolved' dicts will be modified.
features_map[cache_type] = dict((c, copy(d))
for c, d in feature.iteritems())
def has_unresolved():
'''Determines if there are any unresolved features left over in any
of the categories in |dependencies|.
'''
return any(cache.get('unresolved')
for cache in features_map.itervalues())
# Iterate until everything is resolved. If dependencies are multiple
# levels deep, it might take multiple passes to inherit data to the
# topmost feature.
while has_unresolved():
for cache_type, cache in features_map.iteritems():
if 'unresolved' not in cache:
continue
to_remove = []
for name, values in cache['unresolved'].iteritems():
resolve_successful, feature = _ResolveFeature(
name,
values,
cache['extra'].get(name, ()),
self._platform,
cache_type,
features_map)
if not resolve_successful:
continue # Try again on the next iteration of the while loop
# When successfully resolved, remove it from the unresolved
# dict. Add it to the resolved dict if it didn't get deleted.
to_remove.append(name)
if feature is not None:
cache['resolved'][name] = feature
for key in to_remove:
del cache['unresolved'][key]
for cache_type, cache in features_map.iteritems():
self._object_store.Set(cache_type, cache['resolved'])
return features_map[features_type]['resolved']
return All(futures).Then(resolve)
return All(dependency_futures).Then(load_features)
return self._object_store.Get(features_type).Then(next_)
| bsd-3-clause |
markofu/scripts | nmap/nmap/zenmap/build/lib/zenmapGUI/higwidgets/higboxes.py | 4 | 10511 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@nmap.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the terms and conditions of this license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@nmap.com for further *
# * information. *
# * *
# * If you have received a written license agreement or contract for *
# * Covered Software stating terms other than these, you may choose to use *
# * and redistribute Covered Software under those terms instead of these. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
"""
higwidgets/higboxes.py
box related classes
"""
__all__ = ['HIGHBox', 'HIGVBox']
import gtk
class HIGBox(gtk.Box):
def _pack_noexpand_nofill(self, widget):
self.pack_start(widget, expand=False, fill=False)
def _pack_expand_fill(self, widget):
self.pack_start(widget, expand=True, fill=True)
class HIGHBox(gtk.HBox, HIGBox):
def __init__(self, homogeneous=False, spacing=12):
gtk.HBox.__init__(self, homogeneous, spacing)
pack_section_label = HIGBox._pack_noexpand_nofill
pack_label = HIGBox._pack_noexpand_nofill
pack_entry = HIGBox._pack_expand_fill
class HIGVBox(gtk.VBox, HIGBox):
def __init__(self, homogeneous=False, spacing=12):
gtk.VBox.__init__(self, homogeneous, spacing)
# Packs a widget as a line, so it doesn't expand vertically
pack_line = HIGBox._pack_noexpand_nofill
class HIGSpacer(HIGHBox):
def __init__(self, widget=None):
HIGHBox.__init__(self)
self.set_spacing(6)
self._pack_noexpand_nofill(hig_box_space_holder())
if widget:
self._pack_expand_fill(widget)
self.child = widget
def get_child(self):
return self.child
def hig_box_space_holder():
return gtk.Label(" ")
| gpl-2.0 |
chaos95/fluxxbot | pyGBot/Plugins/system/CommandSpec/PlayGame.py | 1 | 2071 | ##
## pyGBot - Versatile IRC Bot
## Copyright (C) 2008 Morgan Lokhorst-Blight, Alex Soborov
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
##
from pyGBot.Plugins.system.Commands import BaseCommand
from pyGBot.Plugins.system.Auth import AuthLevels as AL
class PlayGame(BaseCommand):
level = AL.Mod
def __init__(self, bot, channel, user, args):
args = args.split()
if channel == user or channel == None:
bot.noteout(user,"This command must be run in a channel.")
return
if len(args) == 0:
bot.pubout(channel, 'Please specify a game. use the list command to see what I can play.')
return
for p in bot.activeplugins:
if p.startswith('games.'):
bot.pubout(channel, 'There is a game already active. Use the stop command to deactivate it.')
return
actname = ""
for pn in bot.plugins.iterkeys():
if pn.startswith('games.') and pn.lower() == ('games.%s' % args[0].lower()):
actname = pn
if actname == "":
bot.pubout(channel, "That game is not in my databanks.")
elif bot.activatePlugin(actname, channel):
bot.pubout(channel, "%s started." % actname.split('.')[1])
else:
bot.pubout(channel, "Error activating %s" % actname.split('.')[1])
| gpl-3.0 |
GitYiheng/reinforcement_learning_test | test04_neural_network/tf_syntax_basics.py | 1 | 1051 | import tensorflow as tf
# print(tf.__version__)
# tensor = n-dimensional array
hello = tf.constant("Hello ")
world = tf.constant("World")
# print(type(hello))
# print(hello)
# with tf.Session() as sess:
# result = sess.run(hello + world)
# print(result)
# a = tf.constant(10)
# b = tf.constant(20)
# print(type(a))
# print(a + b)
# print(a + b)
# print(a + b)
# with tf.Session() as sess:
# result = sess.run(a+b)
# print(result)
# const = tf.constant(10)
# fill_mat = tf.fill((4, 4), 10)
# myzeros = tf.zeros((4, 4))
# myones = tf.ones((4, 4))
# myrandn = tf.random_normal((4, 4), mean=0, stddev=1.0)
# myrandu = tf.random_uniform((4, 4), minval=0, maxval=1)
#
# my_ops = [const, fill_mat, myzeros, myones, myrandn, myrandu]
#
# with tf.Session() as sess:
# for op in my_ops:
# print(sess.run(op))
a = tf.constant([ [1, 2], \
[3, 4]])
print(a.get_shape())
b = tf.constant([ [10], \
[100]])
result = tf.matmul(a, b)
with tf.Session() as sess:
sess.run(result)
print(result.eval())
| mit |
akhof/Python-Sorting-Algorithms | src/Example.py | 1 | 1482 | from PySortAlgos import *
def example_1():
persons = {"Thomas":21, "Ursula":58, "Mike":19, "Max":30, "Tina":21}
person_sortedKeysByAge1 = BubbleSort(key=lambda p: persons[p]).sort(persons.keys())
person_sortedKeysByAge2 = QuickSort(key=lambda p: persons[p]).sort(persons.keys())
print("Sorted with BubbleSort (stable):")
for key in person_sortedKeysByAge1:
print("{} ({})".format(key, persons[key]))
print("\nSorted with QuickSort (not so stable):")
for key in person_sortedKeysByAge2:
print("{} ({})".format(key, persons[key]))
def example_2():
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self, *args, **kwargs):
return "{} ({})".format(self.name, self.age)
persons = [Person("Thomas", 21), Person("Ursula", 58), Person("Mike", 19), Person("Max", 30), Person("Tina", 21)]
persons_sortedByName = BubbleSort(compare=lambda x,y: 0 if x==y else (-1 if x<y else 1), key=lambda p: p.name.upper()).sort(persons)
persons_sortedByAge = BubbleSort(key=lambda p: p.age).sort(persons)
print("Index\tpre sorted\tsorted by age\tsorted by name")
for i in range(len(persons)):
print("{}\t{}\t{}\t{}".format(i, persons[i], persons_sortedByAge[i], persons_sortedByName[i]))
if __name__ == "__main__":
example_1()
example_2() | mit |
ivesbai/mediadrop | mediadrop/validation/limit_feed_items_validator.py | 10 | 1106 | # -*- coding: utf-8 -*-
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from formencode.validators import Int
from pylons import request
__all__ = ['LimitFeedItemsValidator']
class LimitFeedItemsValidator(Int):
min = 1
def empty_value(self, value):
return self.default_limit(request.settings)
@property
def if_missing(self):
return self.default_limit(request.settings)
@property
def if_invalid(self):
return self.default_limit(request.settings)
def default_limit(self, settings):
default_feed_results = settings.get('default_feed_results')
if default_feed_results in ('', '-1'):
return None
elif default_feed_results is None:
return 30
return int(default_feed_results)
| gpl-3.0 |
CoolDevelopment/MoshKernel-amami | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
yceruto/django | tests/view_tests/tests/test_i18n.py | 6 | 9398 | # -*- coding:utf-8 -*-
import gettext
import os
from os import path
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase, TestCase
from django.test.utils import override_settings
from django.utils import six
from django.utils._os import upath
from django.utils.translation import override
from django.utils.text import javascript_quote
try:
from selenium.webdriver.firefox import webdriver as firefox
except ImportError:
firefox = None
from ..urls import locale_dir
class I18NTests(TestCase):
""" Tests django views in django/views/i18n.py """
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
for lang_code, lang_name in settings.LANGUAGES:
post_data = dict(language=lang_code, next='/views/')
response = self.client.post('/views/i18n/setlang/', data=post_data)
self.assertRedirects(response, 'http://testserver/views/')
self.assertEqual(self.client.session['_language'], lang_code)
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code, lang_name = settings.LANGUAGES[0]
post_data = dict(language=lang_code, next='//unsafe/redirection/')
response = self.client.post('/views/i18n/setlang/', data=post_data)
self.assertEqual(response.url, 'http://testserver/')
self.assertEqual(self.client.session['_language'], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/views/i18n/setlang/')
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
if six.PY3:
trans_txt = catalog.gettext('this is to be translated')
else:
trans_txt = catalog.ugettext('this is to be translated')
response = self.client.get('/views/jsi18n/')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# javascript_quote is used to be able to check unicode strings
self.assertContains(response, javascript_quote(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, r'"month name\u0004May": "mai"', 1)
class JsI18NTests(TestCase):
"""
Tests django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE.
"""
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'), override('en-us'):
response = self.client.get('/views/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('fi'):
response = self.client.get('/views/jsi18n/')
self.assertContains(response, 'il faut le traduire')
def testI18NLanguageNonEnglishDefault(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'), override('en-us'):
response = self.client.get('/views/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
def test_nonenglish_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests.app0']
with self.settings(LANGUAGE_CODE='fr', INSTALLED_APPS=extended_apps):
with override('en-us'):
response = self.client.get('/views/jsi18n_english_translation/')
self.assertContains(response, javascript_quote('this app0 string is to be translated'))
def testI18NLanguageNonEnglishFallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'), override('none'):
response = self.client.get('/views/jsi18n/')
self.assertContains(response, 'Choisir une heure')
class JsI18NTestsMultiPackage(TestCase):
"""
Tests for django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE and merge JS translation from several packages.
"""
def testI18NLanguageEnglishDefault(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests.app1', 'view_tests.app2']
with self.settings(LANGUAGE_CODE='en-us', INSTALLED_APPS=extended_apps):
with override('fr'):
response = self.client.get('/views/jsi18n_multi_packages1/')
self.assertContains(response, javascript_quote('il faut traduire cette chaîne de caractères de app1'))
def testI18NDifferentNonEnLangs(self):
"""
Similar to above but with neither default or requested language being
English.
"""
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests.app3', 'view_tests.app4']
with self.settings(LANGUAGE_CODE='fr', INSTALLED_APPS=extended_apps):
with override('es-ar'):
response = self.client.get('/views/jsi18n_multi_packages2/')
self.assertContains(response, javascript_quote('este texto de app3 debe ser traducido'))
def testI18NWithLocalePaths(self):
extended_locale_paths = settings.LOCALE_PATHS + (
path.join(path.dirname(
path.dirname(path.abspath(upath(__file__)))), 'app3', 'locale'),)
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/views/jsi18n/')
self.assertContains(response,
javascript_quote('este texto de app3 debe ser traducido'))
skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False)
@unittest.skipIf(skip_selenium, 'Selenium tests not requested')
@unittest.skipUnless(firefox, 'Selenium not installed')
class JavascriptI18nTests(LiveServerTestCase):
available_apps = []
urls = 'view_tests.urls'
@classmethod
def setUpClass(cls):
cls.selenium = firefox.WebDriver()
super(JavascriptI18nTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.refresh() # see ticket #21227
cls.selenium.quit()
super(JavascriptI18nTests, cls).tearDownClass()
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/'))
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
def test_escaping(self):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
| bsd-3-clause |
mgrygoriev/CloudFerry | cloudferrylib/os/discovery/keystone.py | 1 | 1594 | # Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from marshmallow import fields
from keystoneclient import exceptions
from cloudferrylib.os.discovery import model
LOG = logging.getLogger(__name__)
@model.type_alias('tenants')
class Tenant(model.Model):
class Schema(model.Schema):
object_id = model.PrimaryKey('id')
name = fields.String(required=True)
enabled = fields.Boolean(required=True)
description = fields.String(allow_none=True)
@classmethod
def load_missing(cls, cloud, object_id):
identity_client = cloud.identity_client()
try:
raw_tenant = identity_client.tenants.get(object_id.id)
return cls.load_from_cloud(cloud, raw_tenant)
except exceptions.NotFound:
return None
@classmethod
def discover(cls, cloud):
identity_client = cloud.identity_client()
with model.Session() as session:
for tenant in identity_client.tenants.list():
session.store(Tenant.load_from_cloud(cloud, tenant))
| apache-2.0 |
SenorPez/project-cars-replay-enhancer | test/test_ParticipantPacket.py | 1 | 6889 | """Tests ParticipantPacket.py
"""
import unittest
from hashlib import md5
from struct import pack
from replayenhancer.ParticipantPacket import ParticipantPacket
class TestParticipantPacket(unittest.TestCase):
"""Unit tests against the ParticipantPacket object.
"""
expected_build_version_number = 12345
expected_packet_type = 1
expected_car_name = "F1 W07 Hybrid"
expected_car_class_name = "F1 2016"
expected_track_location = "Abu Dhabi"
expected_track_variation = "Grand Prix"
expected_name = [
"Nico Rosberg",
"Lewis Hamilton",
"Daniel Ricciardo",
"Sebastian Vettel",
"Max Verstappen",
"Kimi Räikkönen",
"Sergio Pérez",
"Valtteri Bottas",
"Nico Hülkenberg",
"Fernando Alonso",
"Felipe Massa",
"Carlos Sainz Jr.",
"Romain Grosjean",
"Daniil Kvyat",
"Jenson Button",
"Kevin Magnussen"
]
expected_packet_length = 1347
@classmethod
def binary_data(cls, **kwargs):
test_data = list()
packet_string = "HB64s64s64s64s"
packet_string += "64s" * 16
packet_string += "64x"
try:
test_data.append(kwargs['build_version_number'])
except KeyError:
test_data.append(cls.expected_build_version_number)
try:
test_data.append(kwargs['packet_type'])
except KeyError:
test_data.append(cls.expected_packet_type)
try:
test_data.append(kwargs['car_name'].encode('utf-8'))
except KeyError:
test_data.append(cls.expected_car_name.encode('utf-8'))
try:
test_data.append(kwargs['car_class_name'].encode('utf-8'))
except KeyError:
test_data.append(cls.expected_car_class_name.encode('utf-8'))
try:
test_data.append(kwargs['track_location'].encode('utf-8'))
except KeyError:
test_data.append(cls.expected_track_location.encode('utf-8'))
try:
test_data.append(kwargs['track_variation'].encode('utf-8'))
except KeyError:
test_data.append(cls.expected_track_variation.encode('utf-8'))
try:
test_data.extend([name.encode('utf-8') for name in kwargs['name']])
except KeyError:
test_data.extend(
[name.encode('utf-8') for name in cls.expected_name])
return pack(packet_string, *test_data)
def test_init(self):
instance = ParticipantPacket(self.binary_data())
expected_result = ParticipantPacket
self.assertIsInstance(instance, expected_result)
def test_init_wrong_packet_length(self):
test_binary_data = pack("H", 42)
from struct import error
with self.assertRaises(error):
ParticipantPacket(test_binary_data)
def test_init_wrong_packet_type(self):
with self.assertRaises(ValueError):
ParticipantPacket(self.binary_data(packet_type=2))
def test_property_build_version_number(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_build_version_number
self.assertEqual(instance.build_version_number, expected_result)
def test_property_car_class_name(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_car_class_name
self.assertEqual(instance.car_class_name, expected_result)
def test_property_car_class_name_split_on_null(self):
instance = ParticipantPacket(self.binary_data(
car_class_name=self.expected_car_class_name + "\x00Garbage Data"))
expected_result = self.expected_car_class_name
self.assertEqual(instance.car_class_name, expected_result)
def test_property_car_name(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_car_name
self.assertEqual(instance.car_name, expected_result)
def test_property_car_name_split_on_null(self):
instance = ParticipantPacket(self.binary_data(
car_class_name=self.expected_car_name + "\x00Garbage Data"))
expected_result = self.expected_car_name
self.assertEqual(instance.car_name, expected_result)
def test_property_data_hash(self):
instance = ParticipantPacket(self.binary_data())
expected_result = md5(self.binary_data()).hexdigest()
self.assertEqual(instance.data_hash, expected_result)
def test_property_name(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_name
self.assertListEqual(instance.name, expected_result)
def test_property_name_split_on_null(self):
instance = ParticipantPacket(self.binary_data(
name=[name+'\x00Garbage Data' for name in self.expected_name]))
expected_result = self.expected_name
self.assertListEqual(instance.name, expected_result)
def test_property_packet_type(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_packet_type
self.assertEqual(instance.packet_type, expected_result)
def test_property_track_location(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_track_location
self.assertEqual(instance.track_location, expected_result)
def test_property_track_location_split_on_null(self):
instance = ParticipantPacket(self.binary_data(
track_location=self.expected_track_location + "\x00Garbage Data"))
expected_result = self.expected_track_location
self.assertEqual(instance.track_location, expected_result)
def test_property_track_variation(self):
instance = ParticipantPacket(self.binary_data())
expected_result = self.expected_track_variation
self.assertEqual(instance.track_variation, expected_result)
def test_property_track_variation_split_on_null(self):
instance = ParticipantPacket(self.binary_data(
track_variation=self.expected_track_variation + "\x00Garbage Data"))
expected_result = self.expected_track_variation
self.assertEqual(instance.track_variation, expected_result)
def test_method_repr(self):
instance = ParticipantPacket(self.binary_data())
expected_result = "ParticipantPacket"
self.assertEqual(repr(instance), expected_result)
def test_method_str(self):
instance = ParticipantPacket(self.binary_data())
expected_result = "ParticipantPacket"
self.assertEqual(str(instance), expected_result)
| mit |
z1gm4/desarrollo_web_udp | env/lib/python2.7/site-packages/coverage/python.py | 27 | 5903 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Python source expertise for coverage.py"""
import os.path
import zipimport
from coverage import env, files
from coverage.misc import contract, expensive, NoSource, join_regex, isolate_module
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
os = isolate_module(os)
@contract(returns='bytes')
def read_python_source(filename):
"""Read the Python source text from `filename`.
Returns bytes.
"""
with open(filename, "rb") as f:
return f.read().replace(b"\r\n", b"\n").replace(b"\r", b"\n")
@contract(returns='unicode')
def get_python_source(filename):
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
exts = [".py", ".pyw"]
else:
exts = [ext]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
source = read_python_source(try_filename)
break
# Maybe it's in a zip file?
source = get_zip_bytes(try_filename)
if source is not None:
break
else:
# Couldn't find source.
raise NoSource("No source for code: '%s'." % filename)
source = source.decode(source_encoding(source), "replace")
# Python code should always end with a line with a newline.
if source and source[-1] != '\n':
source += '\n'
return source
@contract(returns='bytes|None')
def get_zip_bytes(filename):
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf, coverage=None):
self.coverage = coverage
if hasattr(morf, '__file__'):
filename = morf.__file__
else:
filename = morf
filename = files.unicode_filename(filename)
# .pyc files should always refer to a .py instead.
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
elif filename.endswith('$py.class'): # Jython
filename = filename[:-9] + ".py"
super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
if hasattr(morf, '__name__'):
name = morf.__name__
name = name.replace(".", os.sep) + ".py"
name = files.unicode_filename(name)
else:
name = files.relative_filename(filename)
self.relname = name
self._source = None
self._parser = None
self._statements = None
self._excluded = None
@contract(returns='unicode')
def relative_filename(self):
return self.relname
@property
def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
return self._parser
@expensive
def lines(self):
"""Return the line numbers of statements in the file."""
if self._statements is None:
self._statements, self._excluded = self.parser.parse_source()
return self._statements
@expensive
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
if self._excluded is None:
self._statements, self._excluded = self.parser.parse_source()
return self._excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
@expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
return no_branch
@expensive
def arcs(self):
return self.parser.arcs()
@expensive
def exit_counts(self):
return self.parser.exit_counts()
@contract(returns='unicode')
def source(self):
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self):
return source_token_lines(self.source())
| gpl-3.0 |
AllisonWang/incubator-airflow | tests/contrib/hooks/test_spark_submit_hook.py | 7 | 13102 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import six
import sys
import unittest
from airflow import configuration, models
from airflow.utils import db
from mock import patch, call
from airflow.contrib.hooks.spark_submit_hook import SparkSubmitHook
class TestSparkSubmitHook(unittest.TestCase):
_spark_job_file = 'test_application.py'
_config = {
'conf': {
'parquet.compression': 'SNAPPY'
},
'conn_id': 'default_spark',
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'jars': 'parquet.jar',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg in list_cmd:
if arg.startswith("--"):
pos = list_cmd.index(arg)
return_dict[arg] = list_cmd[pos+1]
return return_dict
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='spark_yarn_cluster', conn_type='spark',
host='yarn://yarn-master', extra='{"queue": "root.etl", "deploy-mode": "cluster"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_default_mesos', conn_type='spark',
host='mesos://host', port=5050)
)
db.merge_conn(
models.Connection(
conn_id='spark_home_set', conn_type='spark',
host='yarn://yarn-master',
extra='{"spark-home": "/opt/myspark"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_home_not_set', conn_type='spark',
host='yarn://yarn-master')
)
db.merge_conn(
models.Connection(
conn_id='spark_binary_set', conn_type='spark',
host='yarn', extra='{"spark-binary": "custom-spark-submit"}')
)
db.merge_conn(
models.Connection(
conn_id='spark_binary_and_home_set', conn_type='spark',
host='yarn', extra='{"spark-home": "/path/to/spark_home", "spark-binary": "custom-spark-submit"}')
)
def test_build_command(self):
# Given
hook = SparkSubmitHook(**self._config)
# When
cmd = hook._build_command(self._spark_job_file)
# Then
expected_build_cmd = [
'spark-submit',
'--master', 'yarn',
'--conf', 'parquet.compression=SNAPPY',
'--files', 'hive-site.xml',
'--py-files', 'sample_library.py',
'--jars', 'parquet.jar',
'--num-executors', '10',
'--total-executor-cores', '4',
'--executor-cores', '4',
'--executor-memory', '22g',
'--driver-memory', '3g',
'--keytab', 'privileged_user.keytab',
'--principal', 'user/spark@airflow.org',
'--name', 'spark-job',
'--class', 'com.foo.bar.AppMain',
'--verbose',
'test_application.py',
'-f', 'foo',
'--bar', 'bar',
'--with-spaces', 'args should keep embdedded spaces',
'baz'
]
self.assertEquals(expected_build_cmd, cmd)
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id='')
hook.submit()
# Then
self.assertEqual(mock_popen.mock_calls[0], call(['spark-submit', '--master', 'yarn', '--name', 'default-name', ''], stderr=-2, stdout=-1, universal_newlines=True, bufsize=-1))
def test_resolve_connection_yarn_default(self):
# Given
hook = SparkSubmitHook(conn_id='')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
def test_resolve_connection_yarn_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn")
self.assertEqual(dict_cmd["--queue"], "root.default")
def test_resolve_connection_mesos_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default_mesos')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "mesos://host:5050")
def test_resolve_connection_spark_yarn_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(dict_cmd["--master"], "yarn://yarn-master")
self.assertEqual(dict_cmd["--queue"], "root.etl")
self.assertEqual(dict_cmd["--deploy-mode"], "cluster")
def test_resolve_connection_spark_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/opt/myspark"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/opt/myspark/bin/spark-submit')
def test_resolve_connection_spark_home_not_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_not_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'spark-submit')
def test_resolve_connection_spark_binary_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], 'custom-spark-submit')
def test_resolve_connection_spark_binary_and_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_and_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_command(self._spark_job_file)
# Then
expected_spark_connection = {"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/path/to/spark_home"}
self.assertEqual(connection, expected_spark_connection)
self.assertEqual(cmd[0], '/path/to/spark_home/bin/custom-spark-submit')
def test_process_log(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagers',
'INFO Client: Submitting application application_1486558679801_1820 to ResourceManager'
]
# When
hook._process_log(log_lines)
# Then
self.assertEqual(hook._yarn_application_id, 'application_1486558679801_1820')
@patch('airflow.contrib.hooks.spark_submit_hook.subprocess.Popen')
def test_spark_process_on_kill(self, mock_popen):
# Given
mock_popen.return_value.stdout = six.StringIO('stdout')
mock_popen.return_value.stderr = six.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 to ResourceManager'
]
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook._process_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
self.assertIn(call(['yarn', 'application', '-kill', 'application_1486558679801_1820'], stderr=-1, stdout=-1), mock_popen.mock_calls)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
AeroNotix/django-timetracker | tracker/api.py | 1 | 1202 | from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.authentication import ApiKeyAuthentication
from tastypie.authorization import Authorization
from timetracker.tracker.models import Tbluser
from timetracker.tracker.trackingentry import TrackingEntry
from timetracker.utils.datemaps import MARKET_CHOICES_LIST
class TbluserResource(ModelResource):
class Meta:
queryset = Tbluser.objects.all()
resource_name = 'users'
excludes = ["password"]
authentication = ApiKeyAuthentication()
authorization = Authorization()
resources = []
for market in MARKET_CHOICES_LIST:
class Klass(ModelResource):
class Meta:
queryset = Tbluser.objects.filter(market=market)
resource_name = 'user/%s' % market
excludes = ["password"]
authentication = ApiKeyAuthentication()
resources.append(Klass)
class TrackingEntryResource(ModelResource):
user = fields.ToOneField("timetracker.tracker.api.TbluserResource", "user")
class Meta:
queryset = TrackingEntry.objects.all()
resource_name = 'trackingentries'
authentication = ApiKeyAuthentication()
| bsd-3-clause |
heke123/chromium-crosswalk | tools/perf/page_sets/tough_image_cases.py | 35 | 1060 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class ToughImageCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(ToughImageCasesPage, self).__init__(
url=url, page_set=page_set,
shared_page_state_class=shared_page_state.SharedDesktopPageState)
class ToughImageCasesPageSet(story.StorySet):
""" A collection of image-heavy sites. """
def __init__(self):
super(ToughImageCasesPageSet, self).__init__()
urls_list = [
'http://www.free-pictures-photos.com/aviation/airplane-306.jpg',
('http://upload.wikimedia.org/wikipedia/commons/c/cb/'
'General_history%2C_Alaska_Yukon_Pacific_Exposition%'
'2C_fully_illustrated_-_meet_me_in_Seattle_1909_-_Page_78.jpg')
]
for url in urls_list:
self.AddStory(ToughImageCasesPage(url, self))
| bsd-3-clause |
batxes/4c2vhic | SHH_INV_models/SHH_INV_models_final_output_0.2_-0.1_10000/mtx1_models/SHH_INV_models48063.py | 4 | 17576 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((5063.15, -154.98, 6315.39), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((4322.75, 1633.54, 5975.19), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((4870.32, 2859.89, 4820.13), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((5560.18, 1921.99, 2802.96), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((4656.44, 2633.83, 2407.37), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((3107.87, 3754.73, 3760.01), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((400.803, 2771.4, 3345.73), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((-384.527, 2224.06, 2312.51), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((1334.23, 4427.65, 5908.77), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((1289.5, 4071.13, 6945.97), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2986.8, 3666.11, 7442.29), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4576.72, 2817.07, 8379.3), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4807.13, 4411.12, 8300.53), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((5867.92, 5159.79, 6999.38), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((6976.38, 3793.58, 6959.53), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((8860.58, 2397.8, 7910.15), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((6960.03, 2546.33, 7884.02), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((5557.63, 1958.32, 7881.47), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((6765.88, 1817.89, 7019), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((7124.09, 3097.04, 7833.72), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5900.98, 4377.84, 7466.37), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((6324.49, 4011.73, 7381.52), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5528.31, 3569.04, 8357.9), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4618.44, 2684.36, 8103.9), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((5370.24, 2975.98, 6711.04), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6154.27, 4060.79, 7142.87), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((6695.85, 3113.9, 8018.73), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((6006.56, 4517.13, 8317.11), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((6801.09, 4808.39, 7924.31), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((7219.9, 5800.7, 7280.65), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5985.47, 4939.81, 6734.09), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((5060.57, 4795.71, 5501.53), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((4828.77, 4890.03, 7018.47), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((5367.6, 4494.67, 8376.69), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5909.43, 3645.79, 8491.54), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5271.69, 3856.93, 9862.91), (0.7, 0.7, 0.7), 697.612)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((4381.88, 5456.24, 7529.69), (0.7, 0.7, 0.7), 799.808)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2277.39, 5797.85, 6641.03), (0.7, 0.7, 0.7), 1132.58)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((2700.37, 6548.93, 6475.7), (0.7, 0.7, 0.7), 1011.94)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((2089.27, 7807.13, 5562.14), (0.7, 0.7, 0.7), 782.592)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((419.582, 7614.39, 6172.68), (0.7, 0.7, 0.7), 856.575)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((-483.809, 7502.97, 6624.77), (1, 0.7, 0), 706.579)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((447.876, 7295.74, 6047.47), (0.7, 0.7, 0.7), 1015.96)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((2798.73, 7518.37, 4429.21), (0.7, 0.7, 0.7), 1205.72)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((4418.07, 8668.47, 3736.66), (0.7, 0.7, 0.7), 841.939)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((3803.79, 7404.65, 3036.43), (1, 0.7, 0), 806.999)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((4113.87, 7761.28, 2604.77), (0.7, 0.7, 0.7), 958.856)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((5643.34, 7115.76, 1696.03), (0.7, 0.7, 0.7), 952.892)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((6209.02, 7719.4, 1430.43), (0.7, 0.7, 0.7), 809.284)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((5369.43, 8220.88, 1513.97), (0.7, 0.7, 0.7), 709.159)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((6962.18, 7746.57, 2120.11), (0.7, 0.7, 0.7), 859.832)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((7446.42, 6401.53, 3122.2), (0.7, 0.7, 0.7), 800.866)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((7538.32, 8038.57, 3955.86), (0.7, 0.7, 0.7), 949.508)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((6269.45, 8675.69, 5249.81), (0.7, 0.7, 0.7), 891.98)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((6736.25, 6744.55, 5180.8), (0.7, 0.7, 0.7), 890.034)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((7072.98, 7443.21, 3650.57), (0.7, 0.7, 0.7), 804.165)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((8280.97, 8458.31, 3097.68), (0.7, 0.7, 0.7), 826.796)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((8753.07, 8760.38, 4572.16), (0.7, 0.7, 0.7), 1085.8)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((6534.85, 7864.44, 5379.48), (0.7, 0.7, 0.7), 906.997)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((7036.97, 8786.52, 3977.86), (0.7, 0.7, 0.7), 708.694)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((7126.91, 8235.7, 2533.92), (0.7, 0.7, 0.7), 780.223)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((6163.6, 9181.57, 1732.73), (0.7, 0.7, 0.7), 757.424)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((6077.64, 8483.79, 284.178), (0.7, 0.7, 0.7), 817.574)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((5780.23, 8535.28, -346.838), (0.7, 0.7, 0.7), 782.423)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((5589.82, 7173.04, 679.26), (0.7, 0.7, 0.7), 906.404)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((6316.09, 8467, 1731.79), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((6201.09, 7185.38, 804.563), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((5634.94, 6423.59, 611.194), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((6635.37, 8247.48, 551.183), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((5213.16, 8207.15, 509.083), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((6571.34, 7517.72, 603.44), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
pdonadeo/django-oscar | tests/functional/catalogue/catalogue_tests.py | 36 | 4107 | from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.utils.six.moves import http_client
from oscar.apps.catalogue.models import Category
from oscar.test.decorators import ignore_deprecation_warnings
from oscar.test.testcases import WebTestCase
from oscar.test.factories import create_product
class TestProductDetailView(WebTestCase):
def test_enforces_canonical_url(self):
p = create_product()
kwargs = {'product_slug': '1_wrong-but-valid-slug_1',
'pk': p.id}
wrong_url = reverse('catalogue:detail', kwargs=kwargs)
response = self.app.get(wrong_url)
self.assertEqual(http_client.MOVED_PERMANENTLY, response.status_code)
self.assertTrue(p.get_absolute_url() in response.location)
def test_child_to_parent_redirect(self):
parent_product = create_product(structure='parent')
kwargs = {'product_slug': parent_product.slug,
'pk': parent_product.id}
parent_product_url = reverse('catalogue:detail', kwargs=kwargs)
child = create_product(
title="Variant 1", structure='child', parent=parent_product)
kwargs = {'product_slug': child.slug,
'pk': child.id}
child_url = reverse('catalogue:detail', kwargs=kwargs)
response = self.app.get(parent_product_url)
self.assertEqual(http_client.OK, response.status_code)
response = self.app.get(child_url)
self.assertEqual(http_client.MOVED_PERMANENTLY, response.status_code)
class TestProductListView(WebTestCase):
def test_shows_add_to_basket_button_for_available_product(self):
product = create_product(num_in_stock=1)
page = self.app.get(reverse('catalogue:index'))
self.assertContains(page, product.title)
self.assertContains(page, "Add to basket")
def test_shows_not_available_for_out_of_stock_product(self):
product = create_product(num_in_stock=0)
page = self.app.get(reverse('catalogue:index'))
self.assertContains(page, product.title)
self.assertContains(page, "Unavailable")
def test_shows_pagination_navigation_for_multiple_pages(self):
per_page = settings.OSCAR_PRODUCTS_PER_PAGE
title = u"Product #%d"
for idx in range(0, int(1.5 * per_page)):
create_product(title=title % idx)
page = self.app.get(reverse('catalogue:index'))
self.assertContains(page, "Page 1 of 2")
class TestProductCategoryView(WebTestCase):
def setUp(self):
self.category = Category.add_root(name="Products")
def test_browsing_works(self):
correct_url = self.category.get_absolute_url()
response = self.app.get(correct_url)
self.assertEqual(http_client.OK, response.status_code)
def test_enforces_canonical_url(self):
kwargs = {'category_slug': '1_wrong-but-valid-slug_1',
'pk': self.category.pk}
wrong_url = reverse('catalogue:category', kwargs=kwargs)
response = self.app.get(wrong_url)
self.assertEqual(http_client.MOVED_PERMANENTLY, response.status_code)
self.assertTrue(self.category.get_absolute_url() in response.location)
@ignore_deprecation_warnings
def test_can_chop_off_last_part_of_url(self):
# We cache category URLs, which normally is a safe thing to do, as
# the primary key stays the same and ProductCategoryView only looks
# at the key any way.
# But this test chops the URLs, and hence relies on the URLs being
# correct. So in this case, we start with a clean cache to ensure
# our URLs are correct.
cache.clear()
child_category = self.category.add_child(name='Cool products')
full_url = child_category.get_absolute_url()
chopped_url = full_url.rsplit('/', 2)[0]
parent_url = self.category.get_absolute_url()
response = self.app.get(chopped_url).follow() # fails if no redirect
self.assertTrue(response.url.endswith(parent_url))
| bsd-3-clause |
BonexGu/Blik2D-SDK | Blik2D/addon/opencv-3.1.0_for_blik/modules/python/test/ticket_6.py | 13 | 2971 | #!/usr/bin/env python
import urllib
import cv2.cv as cv
import Image
import unittest
class TestLoadImage(unittest.TestCase):
def setUp(self):
open("large.jpg", "w").write(urllib.urlopen("http://www.cs.ubc.ca/labs/lci/curious_george/img/ROS_bug_imgs/IMG_3560.jpg").read())
def test_load(self):
pilim = Image.open("large.jpg")
cvim = cv.LoadImage("large.jpg")
self.assert_(len(pilim.tostring()) == len(cvim.tostring()))
class Creating(unittest.TestCase):
size=(640, 480)
repeat=100
def test_0_Create(self):
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
cnt=cv.CountNonZero(image)
self.assertEqual(cnt, 0, msg="Created image is not black. CountNonZero=%i" % cnt)
def test_2_CreateRepeat(self):
cnt=0
for i in range(self.repeat):
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
cnt+=cv.CountNonZero(image)
self.assertEqual(cnt, 0, msg="Created images are not black. Mean CountNonZero=%.3f" % (1.*cnt/self.repeat))
def test_2a_MemCreated(self):
cnt=0
v=[]
for i in range(self.repeat):
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
cv.FillPoly(image, [[(0, 0), (0, 100), (100, 0)]], 0)
cnt+=cv.CountNonZero(image)
v.append(image)
self.assertEqual(cnt, 0, msg="Memorized images are not black. Mean CountNonZero=%.3f" % (1.*cnt/self.repeat))
def test_3_tostirng(self):
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
image.tostring()
cnt=cv.CountNonZero(image)
self.assertEqual(cnt, 0, msg="After tostring(): CountNonZero=%i" % cnt)
def test_40_tostringRepeat(self):
cnt=0
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
cv.Set(image, cv.Scalar(0,0,0,0))
for i in range(self.repeat*100):
image.tostring()
cnt=cv.CountNonZero(image)
self.assertEqual(cnt, 0, msg="Repeating tostring(): Mean CountNonZero=%.3f" % (1.*cnt/self.repeat))
def test_41_CreateToStringRepeat(self):
cnt=0
for i in range(self.repeat*100):
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
cv.Set(image, cv.Scalar(0,0,0,0))
image.tostring()
cnt+=cv.CountNonZero(image)
self.assertEqual(cnt, 0, msg="Repeating create and tostring(): Mean CountNonZero=%.3f" % (1.*cnt/self.repeat))
def test_4a_MemCreatedToString(self):
cnt=0
v=[]
for i in range(self.repeat):
image = cv.CreateImage(self.size, cv.IPL_DEPTH_8U, 1)
cv.Set(image, cv.Scalar(0,0,0,0))
image.tostring()
cnt+=cv.CountNonZero(image)
v.append(image)
self.assertEqual(cnt, 0, msg="Repeating and memorizing after tostring(): Mean CountNonZero=%.3f" % (1.*cnt/self.repeat))
if __name__ == '__main__':
unittest.main()
| mit |
0359xiaodong/viewfinder | backend/www/test/update_user_photo_test.py | 13 | 4568 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Tests update_user_photo method.
"""
__author__ = ['ben@emailscrubbed.com (Ben Darnell)']
import time
from copy import deepcopy
from viewfinder.backend.base import util
from viewfinder.backend.db.db_client import DBKey
from viewfinder.backend.db.episode import Episode
from viewfinder.backend.db.photo import Photo
from viewfinder.backend.db.user_photo import UserPhoto
from viewfinder.backend.www.test import service_base_test
class UpdateUserPhotoTestCase(service_base_test.ServiceBaseTestCase):
def testUpdateUserPhoto(self):
"""Assign different asset keys to the same photo from different users."""
episode_id, photo_id = self._UploadEpisodeWithPhoto()
# Assign an asset key for user 1
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=['a/#asset-key-1'])
# User 2 doesn't own the photo (and doesn't even have access to it!) but can still set asset keys.
self._tester.UpdateUserPhoto(self._cookie2, photo_id, asset_keys=['a/#asset-key-2'])
# User 2 can't read the episode yet.
self.assertEqual([],
self._tester.QueryEpisodes(self._cookie2,
[{'episode_id': episode_id, 'get_photos': True}])['episodes'])
# Share the episode with user 2, and then try fetching the asset key
vp_id, new_ep_ids = self._tester.ShareNew(self._cookie, [(episode_id, [photo_id])], [self._user2.user_id])
self.assertEqual(sorted(self._GetAssetKeys(self._cookie2, new_ep_ids[0])), ['a/#asset-key-2'])
def testReplaceUserPhoto(self):
"""Change the asset keys associated with a user/photo."""
episode_id, photo_id = self._UploadEpisodeWithPhoto()
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=['a/#asset-key-1'])
self.assertEqual(self._GetAssetKeys(self._cookie, episode_id), ['a/#asset-key-1'])
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=['a/#asset-key-1', 'a/#asset-key-2'])
self.assertEqual(sorted(self._GetAssetKeys(self._cookie, episode_id)), ['a/#asset-key-1', 'a/#asset-key-2'])
# Asset keys are append-only; an empty update doesn't remove what's there.
self._tester.UpdateUserPhoto(self._cookie, photo_id, asset_keys=[])
self.assertEqual(sorted(self._GetAssetKeys(self._cookie, episode_id)), ['a/#asset-key-1', 'a/#asset-key-2'])
def _GetAssetKeys(self, cookie, ep_id):
episodes = self._tester.QueryEpisodes(cookie, [{'episode_id': ep_id, 'get_photos': True}])
photo = episodes['episodes'][0]['photos'][0]
return photo.get('asset_keys')
def _UploadEpisodeWithPhoto(self):
"""Create episode with photo and upload.
Returns: photo_id of created photo.
"""
timestamp = time.time()
episode_id = Episode.ConstructEpisodeId(timestamp, self._device_ids[0], 100)
ep_dict = {'episode_id': episode_id,
'timestamp': timestamp,
'title': 'Episode Title'}
photo_id = Photo.ConstructPhotoId(timestamp, self._device_ids[0], 100)
ph_dict = {'aspect_ratio': 1.3333,
'timestamp': timestamp,
'tn_md5': util.ComputeMD5Hex('thumbnail image data'),
'med_md5': util.ComputeMD5Hex('medium image data'),
'full_md5': util.ComputeMD5Hex('full image data'),
'orig_md5': util.ComputeMD5Hex('original image data'),
'tn_size': 5*1024,
'med_size': 10*1024,
'full_size': 150*1024,
'orig_size': 1200*1024,
'caption': 'a photo',
'photo_id': photo_id}
self._tester.UploadEpisode(self._cookie, ep_dict, [ph_dict])
return episode_id, photo_id
def _TestUpdateUserPhoto(tester, user_cookie, request_dict):
validator = tester.validator
user_id, device_id = tester.GetIdsFromCookie(user_cookie)
request_dict = deepcopy(request_dict)
actual_dict = tester.SendRequest('update_user_photo', user_cookie, request_dict)
existing = validator.GetModelObject(UserPhoto, DBKey(user_id, request_dict['photo_id']), must_exist=False)
if existing is None:
asset_keys = request_dict['asset_keys']
else:
asset_keys = set(request_dict['asset_keys'])
asset_keys.update(existing.asset_keys)
up_dict = {'user_id': user_id,
'photo_id': request_dict['photo_id'],
'asset_keys': asset_keys}
validator.ValidateUpdateDBObject(UserPhoto, **up_dict)
tester._CompareResponseDicts('update_user_photo', user_id, request_dict, {}, actual_dict)
return actual_dict
| apache-2.0 |
seccomp/libseccomp | tests/39-basic-api_level.py | 1 | 2177 | #!/usr/bin/env python
#
# Seccomp Library test program
#
# Copyright (c) 2016 Red Hat <pmoore@redhat.com>
# Copyright (c) 2017 Canonical Ltd.
# Authors: Paul Moore <paul@paul-moore.com>
# Tyler Hicks <tyhicks@canonical.com>
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of version 2.1 of the GNU Lesser General Public License as
# published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, see <http://www.gnu.org/licenses>.
#
import argparse
import sys
import util
from seccomp import *
def test():
api = get_api()
if (api < 1):
raise RuntimeError("Failed getting initial API level")
set_api(1)
api = get_api()
if api != 1:
raise RuntimeError("Failed getting API level 1")
set_api(2)
api = get_api()
if api != 2:
raise RuntimeError("Failed getting API level 2")
set_api(3)
api = get_api()
if api != 3:
raise RuntimeError("Failed getting API level 3")
set_api(4)
api = get_api()
if api != 4:
raise RuntimeError("Failed getting API level 4")
set_api(5)
api = get_api()
if api != 5:
raise RuntimeError("Failed getting API level 5")
set_api(6)
api = get_api()
if api != 6:
raise RuntimeError("Failed getting API level 6")
# Attempt to set a high, invalid API level
try:
set_api(1024)
except ValueError:
pass
else:
raise RuntimeError("Missing failure when setting invalid API level")
# Ensure that the previously set API level didn't change
api = get_api()
if api != 6:
raise RuntimeError("Failed getting old API level after setting an invalid API level")
test()
# kate: syntax python;
# kate: indent-mode python; space-indent on; indent-width 4; mixedindent off;
| lgpl-2.1 |
chand3040/cloud_that | pavelib/paver_tests/test_paver_bok_choy_cmds.py | 74 | 3080 | """
Tests for the bok-choy paver commands themselves.
Run just this test with: paver test_lib -t pavelib/paver_tests/test_paver_bok_choy_cmds.py
"""
import os
import unittest
from pavelib.utils.test.suites import BokChoyTestSuite
REPO_DIR = os.getcwd()
class TestPaverBokChoyCmd(unittest.TestCase):
def _expected_command(self, name, store=None):
"""
Returns the command that is expected to be run for the given test spec
and store.
"""
shard = os.environ.get('SHARD')
expected_statement = (
"DEFAULT_STORE={default_store} "
"SCREENSHOT_DIR='{repo_dir}/test_root/log{shard_str}' "
"BOK_CHOY_HAR_DIR='{repo_dir}/test_root/log{shard_str}/hars' "
"SELENIUM_DRIVER_LOG_DIR='{repo_dir}/test_root/log{shard_str}' "
"nosetests {repo_dir}/common/test/acceptance/{exp_text} "
"--with-xunit "
"--xunit-file={repo_dir}/reports/bok_choy{shard_str}/xunit.xml "
"--verbosity=2 "
).format(
default_store=store,
repo_dir=REPO_DIR,
shard_str='/shard_' + shard if shard else '',
exp_text=name,
)
return expected_statement
def test_default(self):
suite = BokChoyTestSuite('')
name = 'tests'
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_suite_spec(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_class_spec(self):
spec = 'test_foo.py:FooTest'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_testcase_spec(self):
spec = 'test_foo.py:FooTest.test_bar'
suite = BokChoyTestSuite('', test_spec=spec)
name = 'tests/{}'.format(spec)
self.assertEqual(suite.cmd, self._expected_command(name=name))
def test_spec_with_draft_default_store(self):
spec = 'test_foo.py'
suite = BokChoyTestSuite('', test_spec=spec, default_store='draft')
name = 'tests/{}'.format(spec)
self.assertEqual(
suite.cmd,
self._expected_command(name=name, store='draft')
)
def test_invalid_default_store(self):
# the cmd will dumbly compose whatever we pass in for the default_store
suite = BokChoyTestSuite('', default_store='invalid')
name = 'tests'
self.assertEqual(
suite.cmd,
self._expected_command(name=name, store='invalid')
)
def test_serversonly(self):
suite = BokChoyTestSuite('', serversonly=True)
self.assertEqual(suite.cmd, "")
def test_test_dir(self):
test_dir = 'foo'
suite = BokChoyTestSuite('', test_dir=test_dir)
self.assertEqual(
suite.cmd,
self._expected_command(name=test_dir)
)
| agpl-3.0 |
bslatkin/pycon2014 | lib/asyncio-0.4.1/asyncio/base_events.py | 6 | 30987 | """Base implementation of event loop.
The event loop can be broken up into a multiplexer (the part
responsible for notifying us of IO events) and the event loop proper,
which wraps a multiplexer with functionality for scheduling callbacks,
immediately or at a given time in the future.
Whenever a public API takes a callback, subsequent positional
arguments will be passed to the callback if/when it is called. This
avoids the proliferation of trivial lambdas implementing closures.
Keyword arguments for the callback are not supported; this is a
conscious design decision, leaving the door open for keyword arguments
to modify the meaning of the API call itself.
"""
import collections
import concurrent.futures
import heapq
import logging
import socket
import subprocess
import time
import os
import sys
from . import events
from . import futures
from . import tasks
from .log import logger
__all__ = ['BaseEventLoop', 'Server']
# Argument for default thread pool executor creation.
_MAX_WORKERS = 5
class _StopError(BaseException):
"""Raised to stop the event loop."""
def _check_resolved_address(sock, address):
# Ensure that the address is already resolved to avoid the trap of hanging
# the entire event loop when the address requires doing a DNS lookup.
family = sock.family
if family == socket.AF_INET:
host, port = address
elif family == socket.AF_INET6:
host, port = address[:2]
else:
return
type_mask = 0
if hasattr(socket, 'SOCK_NONBLOCK'):
type_mask |= socket.SOCK_NONBLOCK
if hasattr(socket, 'SOCK_CLOEXEC'):
type_mask |= socket.SOCK_CLOEXEC
# Use getaddrinfo(AI_NUMERICHOST) to ensure that the address is
# already resolved.
try:
socket.getaddrinfo(host, port,
family=family,
type=(sock.type & ~type_mask),
proto=sock.proto,
flags=socket.AI_NUMERICHOST)
except socket.gaierror as err:
raise ValueError("address must be resolved (IP address), got %r: %s"
% (address, err))
def _raise_stop_error(*args):
raise _StopError
class Server(events.AbstractServer):
def __init__(self, loop, sockets):
self.loop = loop
self.sockets = sockets
self.active_count = 0
self.waiters = []
def attach(self, transport):
assert self.sockets is not None
self.active_count += 1
def detach(self, transport):
assert self.active_count > 0
self.active_count -= 1
if self.active_count == 0 and self.sockets is None:
self._wakeup()
def close(self):
sockets = self.sockets
if sockets is not None:
self.sockets = None
for sock in sockets:
self.loop._stop_serving(sock)
if self.active_count == 0:
self._wakeup()
def _wakeup(self):
waiters = self.waiters
self.waiters = None
for waiter in waiters:
if not waiter.done():
waiter.set_result(waiter)
@tasks.coroutine
def wait_closed(self):
if self.sockets is None or self.waiters is None:
return
waiter = futures.Future(loop=self.loop)
self.waiters.append(waiter)
yield from waiter
class BaseEventLoop(events.AbstractEventLoop):
def __init__(self):
self._ready = collections.deque()
self._scheduled = []
self._default_executor = None
self._internal_fds = 0
self._running = False
self._clock_resolution = time.get_clock_info('monotonic').resolution
self._exception_handler = None
self._debug = False
def _make_socket_transport(self, sock, protocol, waiter=None, *,
extra=None, server=None):
"""Create socket transport."""
raise NotImplementedError
def _make_ssl_transport(self, rawsock, protocol, sslcontext, waiter, *,
server_side=False, server_hostname=None,
extra=None, server=None):
"""Create SSL transport."""
raise NotImplementedError
def _make_datagram_transport(self, sock, protocol,
address=None, extra=None):
"""Create datagram transport."""
raise NotImplementedError
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create read pipe transport."""
raise NotImplementedError
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
"""Create write pipe transport."""
raise NotImplementedError
@tasks.coroutine
def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
"""Create subprocess transport."""
raise NotImplementedError
def _read_from_self(self):
"""XXX"""
raise NotImplementedError
def _write_to_self(self):
"""XXX"""
raise NotImplementedError
def _process_events(self, event_list):
"""Process selector events."""
raise NotImplementedError
def run_forever(self):
"""Run until stop() is called."""
if self._running:
raise RuntimeError('Event loop is running.')
self._running = True
try:
while True:
try:
self._run_once()
except _StopError:
break
finally:
self._running = False
def run_until_complete(self, future):
"""Run until the Future is done.
If the argument is a coroutine, it is wrapped in a Task.
XXX TBD: It would be disastrous to call run_until_complete()
with the same coroutine twice -- it would wrap it in two
different Tasks and that can't be good.
Return the Future's result, or raise its exception.
"""
future = tasks.async(future, loop=self)
future.add_done_callback(_raise_stop_error)
self.run_forever()
future.remove_done_callback(_raise_stop_error)
if not future.done():
raise RuntimeError('Event loop stopped before Future completed.')
return future.result()
def stop(self):
"""Stop running the event loop.
Every callback scheduled before stop() is called will run.
Callback scheduled after stop() is called won't. However,
those callbacks will run if run() is called again later.
"""
self.call_soon(_raise_stop_error)
def close(self):
"""Close the event loop.
This clears the queues and shuts down the executor,
but does not wait for the executor to finish.
"""
self._ready.clear()
self._scheduled.clear()
executor = self._default_executor
if executor is not None:
self._default_executor = None
executor.shutdown(wait=False)
def is_running(self):
"""Returns running status of event loop."""
return self._running
def time(self):
"""Return the time according to the event loop's clock."""
return time.monotonic()
def call_later(self, delay, callback, *args):
"""Arrange for a callback to be called at a given time.
Return a Handle: an opaque object with a cancel() method that
can be used to cancel the call.
The delay can be an int or float, expressed in seconds. It is
always a relative time.
Each callback will be called exactly once. If two callbacks
are scheduled for exactly the same time, it undefined which
will be called first.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
return self.call_at(self.time() + delay, callback, *args)
def call_at(self, when, callback, *args):
"""Like call_later(), but uses an absolute time."""
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_at()")
timer = events.TimerHandle(when, callback, args, self)
heapq.heappush(self._scheduled, timer)
return timer
def call_soon(self, callback, *args):
"""Arrange for a callback to be called as soon as possible.
This operates as a FIFO queue, callbacks are called in the
order in which they are registered. Each callback will be
called exactly once.
Any positional arguments after the callback will be passed to
the callback when it is called.
"""
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with call_soon()")
handle = events.Handle(callback, args, self)
self._ready.append(handle)
return handle
def call_soon_threadsafe(self, callback, *args):
"""XXX"""
handle = self.call_soon(callback, *args)
self._write_to_self()
return handle
def run_in_executor(self, executor, callback, *args):
if tasks.iscoroutinefunction(callback):
raise TypeError("coroutines cannot be used with run_in_executor()")
if isinstance(callback, events.Handle):
assert not args
assert not isinstance(callback, events.TimerHandle)
if callback._cancelled:
f = futures.Future(loop=self)
f.set_result(None)
return f
callback, args = callback._callback, callback._args
if executor is None:
executor = self._default_executor
if executor is None:
executor = concurrent.futures.ThreadPoolExecutor(_MAX_WORKERS)
self._default_executor = executor
return futures.wrap_future(executor.submit(callback, *args), loop=self)
def set_default_executor(self, executor):
self._default_executor = executor
def getaddrinfo(self, host, port, *,
family=0, type=0, proto=0, flags=0):
return self.run_in_executor(None, socket.getaddrinfo,
host, port, family, type, proto, flags)
def getnameinfo(self, sockaddr, flags=0):
return self.run_in_executor(None, socket.getnameinfo, sockaddr, flags)
@tasks.coroutine
def create_connection(self, protocol_factory, host=None, port=None, *,
ssl=None, family=0, proto=0, flags=0, sock=None,
local_addr=None, server_hostname=None):
"""XXX"""
if server_hostname is not None and not ssl:
raise ValueError('server_hostname is only meaningful with ssl')
if server_hostname is None and ssl:
# Use host as default for server_hostname. It is an error
# if host is empty or not set, e.g. when an
# already-connected socket was passed or when only a port
# is given. To avoid this error, you can pass
# server_hostname='' -- this will bypass the hostname
# check. (This also means that if host is a numeric
# IP/IPv6 address, we will attempt to verify that exact
# address; this will probably fail, but it is possible to
# create a certificate for a specific IP address, so we
# don't judge it here.)
if not host:
raise ValueError('You must set server_hostname '
'when using ssl without a host')
server_hostname = host
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
f1 = self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs = [f1]
if local_addr is not None:
f2 = self.getaddrinfo(
*local_addr, family=family,
type=socket.SOCK_STREAM, proto=proto, flags=flags)
fs.append(f2)
else:
f2 = None
yield from tasks.wait(fs, loop=self)
infos = f1.result()
if not infos:
raise OSError('getaddrinfo() returned empty list')
if f2 is not None:
laddr_infos = f2.result()
if not laddr_infos:
raise OSError('getaddrinfo() returned empty list')
exceptions = []
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if f2 is not None:
for _, _, _, _, laddr in laddr_infos:
try:
sock.bind(laddr)
break
except OSError as exc:
exc = OSError(
exc.errno, 'error while '
'attempting to bind on address '
'{!r}: {}'.format(
laddr, exc.strerror.lower()))
exceptions.append(exc)
else:
sock.close()
sock = None
continue
yield from self.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
else:
# If they all have the same str(), raise one.
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
# Raise a combined exception so the user can see all
# the various error messages.
raise OSError('Multiple exceptions: {}'.format(
', '.join(str(exc) for exc in exceptions)))
elif sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
sock.setblocking(False)
transport, protocol = yield from self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname)
return transport, protocol
@tasks.coroutine
def _create_connection_transport(self, sock, protocol_factory, ssl,
server_hostname):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
if ssl:
sslcontext = None if isinstance(ssl, bool) else ssl
transport = self._make_ssl_transport(
sock, protocol, sslcontext, waiter,
server_side=False, server_hostname=server_hostname)
else:
transport = self._make_socket_transport(sock, protocol, waiter)
yield from waiter
return transport, protocol
@tasks.coroutine
def create_datagram_endpoint(self, protocol_factory,
local_addr=None, remote_addr=None, *,
family=0, proto=0, flags=0):
"""Create datagram connection."""
if not (local_addr or remote_addr):
if family == 0:
raise ValueError('unexpected address family')
addr_pairs_info = (((family, proto), (None, None)),)
else:
# join addresss by (family, protocol)
addr_infos = collections.OrderedDict()
for idx, addr in ((0, local_addr), (1, remote_addr)):
if addr is not None:
assert isinstance(addr, tuple) and len(addr) == 2, (
'2-tuple is expected')
infos = yield from self.getaddrinfo(
*addr, family=family, type=socket.SOCK_DGRAM,
proto=proto, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
for fam, _, pro, _, address in infos:
key = (fam, pro)
if key not in addr_infos:
addr_infos[key] = [None, None]
addr_infos[key][idx] = address
# each addr has to have info for each (family, proto) pair
addr_pairs_info = [
(key, addr_pair) for key, addr_pair in addr_infos.items()
if not ((local_addr and addr_pair[0] is None) or
(remote_addr and addr_pair[1] is None))]
if not addr_pairs_info:
raise ValueError('can not get address information')
exceptions = []
for ((family, proto),
(local_address, remote_address)) in addr_pairs_info:
sock = None
r_addr = None
try:
sock = socket.socket(
family=family, type=socket.SOCK_DGRAM, proto=proto)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(False)
if local_addr:
sock.bind(local_address)
if remote_addr:
yield from self.sock_connect(sock, remote_address)
r_addr = remote_address
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
raise exceptions[0]
protocol = protocol_factory()
transport = self._make_datagram_transport(sock, protocol, r_addr)
return transport, protocol
@tasks.coroutine
def create_server(self, protocol_factory, host=None, port=None,
*,
family=socket.AF_UNSPEC,
flags=socket.AI_PASSIVE,
sock=None,
backlog=100,
ssl=None,
reuse_address=None):
"""XXX"""
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if host is not None or port is not None:
if sock is not None:
raise ValueError(
'host/port and sock can not be specified at the same time')
AF_INET6 = getattr(socket, 'AF_INET6', 0)
if reuse_address is None:
reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
sockets = []
if host == '':
host = None
infos = yield from self.getaddrinfo(
host, port, family=family,
type=socket.SOCK_STREAM, proto=0, flags=flags)
if not infos:
raise OSError('getaddrinfo() returned empty list')
completed = False
try:
for res in infos:
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
except socket.error:
# Assume it's a bad family/type/protocol combination.
continue
sockets.append(sock)
if reuse_address:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,
True)
# Disable IPv4/IPv6 dual stack support (enabled by
# default on Linux) which makes a single socket
# listen on both address families.
if af == AF_INET6 and hasattr(socket, 'IPPROTO_IPV6'):
sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_V6ONLY,
True)
try:
sock.bind(sa)
except OSError as err:
raise OSError(err.errno, 'error while attempting '
'to bind on address %r: %s'
% (sa, err.strerror.lower()))
completed = True
finally:
if not completed:
for sock in sockets:
sock.close()
else:
if sock is None:
raise ValueError(
'host and port was not specified and no sock specified')
sockets = [sock]
server = Server(self, sockets)
for sock in sockets:
sock.listen(backlog)
sock.setblocking(False)
self._start_serving(protocol_factory, sock, ssl, server)
return server
@tasks.coroutine
def connect_read_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_read_pipe_transport(pipe, protocol, waiter)
yield from waiter
return transport, protocol
@tasks.coroutine
def connect_write_pipe(self, protocol_factory, pipe):
protocol = protocol_factory()
waiter = futures.Future(loop=self)
transport = self._make_write_pipe_transport(pipe, protocol, waiter)
yield from waiter
return transport, protocol
@tasks.coroutine
def subprocess_shell(self, protocol_factory, cmd, *, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=False, shell=True, bufsize=0,
**kwargs):
if not isinstance(cmd, (bytes, str)):
raise ValueError("cmd must be a string")
if universal_newlines:
raise ValueError("universal_newlines must be False")
if not shell:
raise ValueError("shell must be True")
if bufsize != 0:
raise ValueError("bufsize must be 0")
protocol = protocol_factory()
transport = yield from self._make_subprocess_transport(
protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
return transport, protocol
@tasks.coroutine
def subprocess_exec(self, protocol_factory, program, *args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=False,
shell=False, bufsize=0, **kwargs):
if universal_newlines:
raise ValueError("universal_newlines must be False")
if shell:
raise ValueError("shell must be False")
if bufsize != 0:
raise ValueError("bufsize must be 0")
popen_args = (program,) + args
for arg in popen_args:
if not isinstance(arg, (str, bytes)):
raise TypeError("program arguments must be "
"a bytes or text string, not %s"
% type(arg).__name__)
protocol = protocol_factory()
transport = yield from self._make_subprocess_transport(
protocol, popen_args, False, stdin, stdout, stderr,
bufsize, **kwargs)
return transport, protocol
def set_exception_handler(self, handler):
"""Set handler as the new event loop exception handler.
If handler is None, the default exception handler will
be set.
If handler is a callable object, it should have a
matching signature to '(loop, context)', where 'loop'
will be a reference to the active event loop, 'context'
will be a dict object (see `call_exception_handler()`
documentation for details about context).
"""
if handler is not None and not callable(handler):
raise TypeError('A callable object or None is expected, '
'got {!r}'.format(handler))
self._exception_handler = handler
def default_exception_handler(self, context):
"""Default exception handler.
This is called when an exception occurs and no exception
handler is set, and can be called by a custom exception
handler that wants to defer to the default behavior.
context parameter has the same meaning as in
`call_exception_handler()`.
"""
message = context.get('message')
if not message:
message = 'Unhandled exception in event loop'
exception = context.get('exception')
if exception is not None:
exc_info = (type(exception), exception, exception.__traceback__)
else:
exc_info = False
log_lines = [message]
for key in sorted(context):
if key in {'message', 'exception'}:
continue
log_lines.append('{}: {!r}'.format(key, context[key]))
logger.error('\n'.join(log_lines), exc_info=exc_info)
def call_exception_handler(self, context):
"""Call the current event loop exception handler.
context is a dict object containing the following keys
(new keys maybe introduced later):
- 'message': Error message;
- 'exception' (optional): Exception object;
- 'future' (optional): Future instance;
- 'handle' (optional): Handle instance;
- 'protocol' (optional): Protocol instance;
- 'transport' (optional): Transport instance;
- 'socket' (optional): Socket instance.
Note: this method should not be overloaded in subclassed
event loops. For any custom exception handling, use
`set_exception_handler()` method.
"""
if self._exception_handler is None:
try:
self.default_exception_handler(context)
except Exception:
# Second protection layer for unexpected errors
# in the default implementation, as well as for subclassed
# event loops with overloaded "default_exception_handler".
logger.error('Exception in default exception handler',
exc_info=True)
else:
try:
self._exception_handler(self, context)
except Exception as exc:
# Exception in the user set custom exception handler.
try:
# Let's try default handler.
self.default_exception_handler({
'message': 'Unhandled error in exception handler',
'exception': exc,
'context': context,
})
except Exception:
# Guard 'default_exception_handler' in case it's
# overloaded.
logger.error('Exception in default exception handler '
'while handling an unexpected error '
'in custom exception handler',
exc_info=True)
def _add_callback(self, handle):
"""Add a Handle to ready or scheduled."""
assert isinstance(handle, events.Handle), 'A Handle is required here'
if handle._cancelled:
return
if isinstance(handle, events.TimerHandle):
heapq.heappush(self._scheduled, handle)
else:
self._ready.append(handle)
def _add_callback_signalsafe(self, handle):
"""Like _add_callback() but called from a signal handler."""
self._add_callback(handle)
self._write_to_self()
def _run_once(self):
"""Run one full iteration of the event loop.
This calls all currently ready callbacks, polls for I/O,
schedules the resulting callbacks, and finally schedules
'call_later' callbacks.
"""
# Remove delayed calls that were cancelled from head of queue.
while self._scheduled and self._scheduled[0]._cancelled:
heapq.heappop(self._scheduled)
timeout = None
if self._ready:
timeout = 0
elif self._scheduled:
# Compute the desired timeout.
when = self._scheduled[0]._when
deadline = max(0, when - self.time())
if timeout is None:
timeout = deadline
else:
timeout = min(timeout, deadline)
# TODO: Instrumentation only in debug mode?
if logger.isEnabledFor(logging.INFO):
t0 = self.time()
event_list = self._selector.select(timeout)
t1 = self.time()
if t1-t0 >= 1:
level = logging.INFO
else:
level = logging.DEBUG
if timeout is not None:
logger.log(level, 'poll %.3f took %.3f seconds',
timeout, t1-t0)
else:
logger.log(level, 'poll took %.3f seconds', t1-t0)
else:
event_list = self._selector.select(timeout)
self._process_events(event_list)
# Handle 'later' callbacks that are ready.
end_time = self.time() + self._clock_resolution
while self._scheduled:
handle = self._scheduled[0]
if handle._when >= end_time:
break
handle = heapq.heappop(self._scheduled)
self._ready.append(handle)
# This is the only place where callbacks are actually *called*.
# All other places just add them to ready.
# Note: We run all currently scheduled callbacks, but not any
# callbacks scheduled by callbacks run this time around --
# they will be run the next time (after another I/O poll).
# Use an idiom that is threadsafe without using locks.
ntodo = len(self._ready)
for i in range(ntodo):
handle = self._ready.popleft()
if not handle._cancelled:
handle._run()
handle = None # Needed to break cycles when an exception occurs.
def get_debug(self):
return self._debug
def set_debug(self, enabled):
self._debug = enabled
| apache-2.0 |
KyleJamesWalker/ansible | test/runner/lib/manage_ci.py | 49 | 6035 | """Access Ansible Core CI remote services."""
from __future__ import absolute_import, print_function
import pipes
from time import sleep
import lib.pytar
from lib.util import (
SubprocessError,
ApplicationError,
run_command,
)
from lib.core_ci import (
AnsibleCoreCI,
)
from lib.ansible_util import (
ansible_environment,
)
class ManageWindowsCI(object):
"""Manage access to a Windows instance provided by Ansible Core CI."""
def __init__(self, core_ci):
"""
:type core_ci: AnsibleCoreCI
"""
self.core_ci = core_ci
def wait(self):
"""Wait for instance to respond to ansible ping."""
extra_vars = [
'ansible_connection=winrm',
'ansible_host=%s' % self.core_ci.connection.hostname,
'ansible_user=%s' % self.core_ci.connection.username,
'ansible_password=%s' % self.core_ci.connection.password,
'ansible_port=%s' % self.core_ci.connection.port,
'ansible_winrm_server_cert_validation=ignore',
]
name = 'windows_%s' % self.core_ci.version
env = ansible_environment(self.core_ci.args)
cmd = ['ansible', '-m', 'win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)]
for _ in range(1, 120):
try:
run_command(self.core_ci.args, cmd, env=env)
return
except SubprocessError:
sleep(10)
continue
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
class ManageNetworkCI(object):
"""Manage access to a network instance provided by Ansible Core CI."""
def __init__(self, core_ci):
"""
:type core_ci: AnsibleCoreCI
"""
self.core_ci = core_ci
def wait(self):
"""Wait for instance to respond to ansible ping."""
extra_vars = [
'ansible_host=%s' % self.core_ci.connection.hostname,
'ansible_user=%s' % self.core_ci.connection.username,
'ansible_port=%s' % self.core_ci.connection.port,
'ansible_connection=local',
'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key,
'ansible_network_os=%s' % self.core_ci.platform,
]
name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '_'))
env = ansible_environment(self.core_ci.args)
cmd = ['ansible', '-m', 'net_command', '-a', '?', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)]
for _ in range(1, 90):
try:
run_command(self.core_ci.args, cmd, env=env)
return
except SubprocessError:
sleep(10)
continue
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
class ManagePosixCI(object):
"""Manage access to a POSIX instance provided by Ansible Core CI."""
def __init__(self, core_ci):
"""
:type core_ci: AnsibleCoreCI
"""
self.core_ci = core_ci
self.ssh_args = ['-o', 'BatchMode=yes', '-o', 'StrictHostKeyChecking=no', '-i', self.core_ci.ssh_key.key]
if self.core_ci.platform == 'freebsd':
self.become = ['su', '-l', 'root', '-c']
elif self.core_ci.platform == 'osx':
self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH']
def setup(self):
"""Start instance and wait for it to become ready and respond to an ansible ping."""
self.wait()
self.configure()
self.upload_source()
def wait(self):
"""Wait for instance to respond to SSH."""
for _ in range(1, 90):
try:
self.ssh('id')
return
except SubprocessError:
sleep(10)
continue
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
def configure(self):
"""Configure remote host for testing."""
self.upload('test/runner/setup/remote.sh', '/tmp')
self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s' % self.core_ci.platform)
def upload_source(self):
"""Upload and extract source."""
if not self.core_ci.args.explain:
lib.pytar.create_tarfile('/tmp/ansible.tgz', '.', lib.pytar.ignore)
self.upload('/tmp/ansible.tgz', '/tmp')
self.ssh('rm -rf ~/ansible && mkdir ~/ansible && cd ~/ansible && tar oxzf /tmp/ansible.tgz')
def download(self, remote, local):
"""
:type remote: str
:type local: str
"""
self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
def upload(self, local, remote):
"""
:type local: str
:type remote: str
"""
self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
def ssh(self, command):
"""
:type command: str | list[str]
"""
if isinstance(command, list):
command = ' '.join(pipes.quote(c) for c in command)
run_command(self.core_ci.args,
['ssh', '-tt', '-q'] + self.ssh_args +
['-p', str(self.core_ci.connection.port),
'%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
self.become + [pipes.quote(command)])
def scp(self, src, dst):
"""
:type src: str
:type dst: str
"""
run_command(self.core_ci.args,
['scp'] + self.ssh_args +
['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst])
| gpl-3.0 |
liberatorqjw/scikit-learn | sklearn/feature_extraction/tests/test_image.py | 14 | 10178 | # Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import scipy as sp
from scipy import ndimage
from nose.tools import assert_equal, assert_true
from numpy.testing import assert_raises
from ..image import img_to_graph, grid_to_graph
from ..image import (extract_patches_2d, reconstruct_from_patches_2d,
PatchExtractor, extract_patches)
from ...utils.graph import connected_components
def test_img_to_graph():
x, y = np.mgrid[:4, :4] - 10
grad_x = img_to_graph(x)
grad_y = img_to_graph(y)
assert_equal(grad_x.nnz, grad_y.nnz)
# Negative elements are the diagonal: the elements of the original
# image. Positive elements are the values of the gradient, they
# should all be equal on grad_x and grad_y
np.testing.assert_array_equal(grad_x.data[grad_x.data > 0],
grad_y.data[grad_y.data > 0])
def test_grid_to_graph():
#Checking that the function works with graphs containing no edges
size = 2
roi_size = 1
# Generating two convex parts with one vertex
# Thus, edges will be empty in _to_graph
mask = np.zeros((size, size), dtype=np.bool)
mask[0:roi_size, 0:roi_size] = True
mask[-roi_size:, -roi_size:] = True
mask = mask.reshape(size ** 2)
A = grid_to_graph(n_x=size, n_y=size, mask=mask, return_as=np.ndarray)
assert_true(connected_components(A)[0] == 2)
# Checking that the function works whatever the type of mask is
mask = np.ones((size, size), dtype=np.int16)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask)
assert_true(connected_components(A)[0] == 1)
# Checking dtype of the graph
mask = np.ones((size, size))
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.bool)
assert_true(A.dtype == np.bool)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.int)
assert_true(A.dtype == np.int)
A = grid_to_graph(n_x=size, n_y=size, n_z=size, mask=mask, dtype=np.float)
assert_true(A.dtype == np.float)
def test_connect_regions():
lena = sp.misc.lena()
for thr in (50, 150):
mask = lena > thr
graph = img_to_graph(lena, mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def test_connect_regions_with_grid():
lena = sp.misc.lena()
mask = lena > 50
graph = grid_to_graph(*lena.shape, mask=mask)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
mask = lena > 150
graph = grid_to_graph(*lena.shape, mask=mask, dtype=None)
assert_equal(ndimage.label(mask)[1], connected_components(graph)[0])
def _downsampled_lena():
lena = sp.misc.lena().astype(np.float32)
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = (lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2]
+ lena[1::2, 1::2])
lena = lena.astype(np.float)
lena /= 16.0
return lena
def _orange_lena(lena=None):
lena = _downsampled_lena() if lena is None else lena
lena_color = np.zeros(lena.shape + (3,))
lena_color[:, :, 0] = 256 - lena
lena_color[:, :, 1] = 256 - lena / 2
lena_color[:, :, 2] = 256 - lena / 4
return lena_color
def _make_images(lena=None):
lena = _downsampled_lena() if lena is None else lena
# make a collection of lenas
images = np.zeros((3,) + lena.shape)
images[0] = lena
images[1] = lena + 1
images[2] = lena + 2
return images
downsampled_lena = _downsampled_lena()
orange_lena = _orange_lena(downsampled_lena)
lena_collection = _make_images(downsampled_lena)
def test_extract_patches_all():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_all_color():
lena = orange_lena
i_h, i_w = lena.shape[:2]
p_h, p_w = 16, 16
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_all_rect():
lena = downsampled_lena
lena = lena[:, 32:97]
i_h, i_w = lena.shape
p_h, p_w = 16, 12
expected_n_patches = (i_h - p_h + 1) * (i_w - p_w + 1)
patches = extract_patches_2d(lena, (p_h, p_w))
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
def test_extract_patches_max_patches():
lena = downsampled_lena
i_h, i_w = lena.shape
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=100)
assert_equal(patches.shape, (100, p_h, p_w))
expected_n_patches = int(0.5 * (i_h - p_h + 1) * (i_w - p_w + 1))
patches = extract_patches_2d(lena, (p_h, p_w), max_patches=0.5)
assert_equal(patches.shape, (expected_n_patches, p_h, p_w))
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=2.0)
assert_raises(ValueError, extract_patches_2d, lena, (p_h, p_w),
max_patches=-1.0)
def test_reconstruct_patches_perfect():
lena = downsampled_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_reconstruct_patches_perfect_color():
lena = orange_lena
p_h, p_w = 16, 16
patches = extract_patches_2d(lena, (p_h, p_w))
lena_reconstructed = reconstruct_from_patches_2d(patches, lena.shape)
np.testing.assert_array_equal(lena, lena_reconstructed)
def test_patch_extractor_fit():
lenas = lena_collection
extr = PatchExtractor(patch_size=(8, 8), max_patches=100, random_state=0)
assert_true(extr == extr.fit(lenas))
def test_patch_extractor_max_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
max_patches = 100
expected_n_patches = len(lenas) * max_patches
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
max_patches = 0.5
expected_n_patches = len(lenas) * int((i_h - p_h + 1) * (i_w - p_w + 1)
* max_patches)
extr = PatchExtractor(patch_size=(p_h, p_w), max_patches=max_patches,
random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_max_patches_default():
lenas = lena_collection
extr = PatchExtractor(max_patches=100, random_state=0)
patches = extr.transform(lenas)
assert_equal(patches.shape, (len(lenas) * 100, 12, 12))
def test_patch_extractor_all_patches():
lenas = lena_collection
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w))
def test_patch_extractor_color():
lenas = _make_images(orange_lena)
i_h, i_w = lenas.shape[1:3]
p_h, p_w = 8, 8
expected_n_patches = len(lenas) * (i_h - p_h + 1) * (i_w - p_w + 1)
extr = PatchExtractor(patch_size=(p_h, p_w), random_state=0)
patches = extr.transform(lenas)
assert_true(patches.shape == (expected_n_patches, p_h, p_w, 3))
def test_extract_patches_strided():
image_shapes_1D = [(10,), (10,), (11,), (10,)]
patch_sizes_1D = [(1,), (2,), (3,), (8,)]
patch_steps_1D = [(1,), (1,), (4,), (2,)]
expected_views_1D = [(10,), (9,), (3,), (2,)]
last_patch_1D = [(10,), (8,), (8,), (2,)]
image_shapes_2D = [(10, 20), (10, 20), (10, 20), (11, 20)]
patch_sizes_2D = [(2, 2), (10, 10), (10, 11), (6, 6)]
patch_steps_2D = [(5, 5), (3, 10), (3, 4), (4, 2)]
expected_views_2D = [(2, 4), (1, 2), (1, 3), (2, 8)]
last_patch_2D = [(5, 15), (0, 10), (0, 8), (4, 14)]
image_shapes_3D = [(5, 4, 3), (3, 3, 3), (7, 8, 9), (7, 8, 9)]
patch_sizes_3D = [(2, 2, 3), (2, 2, 2), (1, 7, 3), (1, 3, 3)]
patch_steps_3D = [(1, 2, 10), (1, 1, 1), (2, 1, 3), (3, 3, 4)]
expected_views_3D = [(4, 2, 1), (2, 2, 2), (4, 2, 3), (3, 2, 2)]
last_patch_3D = [(3, 2, 0), (1, 1, 1), (6, 1, 6), (6, 3, 4)]
image_shapes = image_shapes_1D + image_shapes_2D + image_shapes_3D
patch_sizes = patch_sizes_1D + patch_sizes_2D + patch_sizes_3D
patch_steps = patch_steps_1D + patch_steps_2D + patch_steps_3D
expected_views = expected_views_1D + expected_views_2D + expected_views_3D
last_patches = last_patch_1D + last_patch_2D + last_patch_3D
for (image_shape, patch_size, patch_step, expected_view,
last_patch) in zip(image_shapes, patch_sizes, patch_steps,
expected_views, last_patches):
image = np.arange(np.prod(image_shape)).reshape(image_shape)
patches = extract_patches(image, patch_shape=patch_size,
extraction_step=patch_step)
ndim = len(image_shape)
assert_true(patches.shape[:ndim] == expected_view)
last_patch_slices = [slice(i, i + j, None) for i, j in
zip(last_patch, patch_size)]
assert_true((patches[[slice(-1, None, None)] * ndim] ==
image[last_patch_slices].squeeze()).all())
def test_extract_patches_square():
# test same patch size for all dimensions
lena = downsampled_lena
i_h, i_w = lena.shape
p = 8
expected_n_patches = ((i_h - p + 1), (i_w - p + 1))
patches = extract_patches(lena, patch_shape=p)
assert_true(patches.shape == (expected_n_patches[0], expected_n_patches[1],
p, p))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
amarian12/e-pool.net | nattraverso/pynupnp/__init__.py | 288 | 1088 | """
This package offers ways to retreive ip addresses of the machine, and map ports
through UPnP devices.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
from nattraverso.pynupnp.upnp import search_upnp_device, UPnPMapper
def get_external_ip():
"""
Returns a deferred which will be called with the WAN ip address
retreived through UPnP. The ip is a string of the form "x.x.x.x"
@return: A deferred called with the external ip address of this host
@rtype: L{twisted.internet.defer.Deferred}
"""
return search_upnp_device().addCallback(lambda x: x.get_external_ip())
def get_port_mapper():
"""
Returns a deferred which will be called with a L{UPnPMapper} instance.
This is a L{nattraverso.portmapper.NATMapper} implementation.
@return: A deferred called with the L{UPnPMapper} instance.
@rtype: L{twisted.internet.defer.Deferred}
"""
return search_upnp_device().addCallback(lambda x: UPnPMapper(x))
| gpl-3.0 |
spisneha25/django | django/contrib/admindocs/views.py | 146 | 15441 | import inspect
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.admindocs import utils
from django.core import urlresolvers
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.db import models
from django.http import Http404
from django.template.engine import Engine
from django.utils.decorators import method_decorator
from django.utils.inspect import func_has_no_args
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class BaseAdminDocsView(TemplateView):
"""
Base view for admindocs views.
"""
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
if not utils.docutils_is_available:
# Display an error message for people without docutils
self.template_name = 'admin_doc/missing_docutils.html'
return self.render_to_response(admin.site.each_context(request))
return super(BaseAdminDocsView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs.update({'root_path': urlresolvers.reverse('admin:index')})
kwargs.update(admin.site.each_context(self.request))
return super(BaseAdminDocsView, self).get_context_data(**kwargs)
class BookmarkletsView(BaseAdminDocsView):
template_name = 'admin_doc/bookmarklets.html'
def get_context_data(self, **kwargs):
context = super(BookmarkletsView, self).get_context_data(**kwargs)
context.update({
'admin_url': "%s://%s%s" % (
self.request.scheme, self.request.get_host(), context['root_path'])
})
return context
class TemplateTagIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_tag_index.html'
def get_context_data(self, **kwargs):
tags = []
try:
engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
app_libs = sorted(engine.template_libraries.items())
builtin_libs = [('', lib) for lib in engine.template_builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'tags': tags})
return super(TemplateTagIndexView, self).get_context_data(**kwargs)
class TemplateFilterIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_filter_index.html'
def get_context_data(self, **kwargs):
filters = []
try:
engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
app_libs = sorted(engine.template_libraries.items())
builtin_libs = [('', lib) for lib in engine.template_builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'filters': filters})
return super(TemplateFilterIndexView, self).get_context_data(**kwargs)
class ViewIndexView(BaseAdminDocsView):
template_name = 'admin_doc/view_index.html'
def get_context_data(self, **kwargs):
views = []
urlconf = import_module(settings.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, namespace, name) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'url': simplify_regex(regex),
'url_name': ':'.join((namespace or []) + (name and [name] or [])),
'namespace': ':'.join((namespace or [])),
'name': name,
})
kwargs.update({'views': views})
return super(ViewIndexView, self).get_context_data(**kwargs)
class ViewDetailView(BaseAdminDocsView):
template_name = 'admin_doc/view_detail.html'
def get_context_data(self, **kwargs):
view = self.kwargs['view']
urlconf = urlresolvers.get_urlconf()
if urlresolvers.get_resolver(urlconf)._is_callback(view):
mod, func = urlresolvers.get_mod_func(view)
view_func = getattr(import_module(mod), func)
else:
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
kwargs.update({
'name': view,
'summary': title,
'body': body,
'meta': metadata,
})
return super(ViewDetailView, self).get_context_data(**kwargs)
class ModelIndexView(BaseAdminDocsView):
template_name = 'admin_doc/model_index.html'
def get_context_data(self, **kwargs):
m_list = [m._meta for m in apps.get_models()]
kwargs.update({'models': m_list})
return super(ModelIndexView, self).get_context_data(**kwargs)
class ModelDetailView(BaseAdminDocsView):
template_name = 'admin_doc/model_detail.html'
def get_context_data(self, **kwargs):
model_name = self.kwargs['model_name']
# Get the model class.
try:
app_config = apps.get_app_config(self.kwargs['app_label'])
except LookupError:
raise Http404(_("App %(app_label)r not found") % self.kwargs)
try:
model = app_config.get_model(model_name)
except LookupError:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs)
opts = model._meta
title, body, metadata = utils.parse_docstring(model.__doc__)
if title:
title = utils.parse_rst(title, 'model', _('model:') + model_name)
if body:
body = utils.parse_rst(body, 'model', _('model:') + model_name)
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.remote_field.model.__name__
app_label = field.remote_field.model._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': app_label,
'object_name': data_type,
}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if inspect.isfunction(func) and func_has_no_args(func):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.related_objects:
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': rel.related_model._meta.app_label,
'object_name': rel.related_model._meta.object_name,
}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
kwargs.update({
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': title,
'description': body,
'fields': fields,
})
return super(ModelDetailView, self).get_context_data(**kwargs)
class TemplateDetailView(BaseAdminDocsView):
template_name = 'admin_doc/template_detail.html'
def get_context_data(self, **kwargs):
template = self.kwargs['template']
templates = []
try:
default_engine = Engine.get_default()
except ImproperlyConfigured:
# Non-trivial TEMPLATES settings aren't supported (#24125).
pass
else:
# This doesn't account for template loaders (#24128).
for index, directory in enumerate(default_engine.dirs):
template_file = os.path.join(directory, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'order': index,
})
kwargs.update({
'name': template,
'templates': templates,
})
return super(TemplateDetailView, self).get_context_data(**kwargs)
####################
# Helper functions #
####################
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + p.regex.pattern,
(namespace or []) + (p.namespace and [p.namespace] or [])
))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern,
namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause |
froyobin/ceilometer | ceilometer/tests/ipmi/pollsters/test_sensor.py | 6 | 4747 | # Copyright 2014 Intel Corp.
#
# Author: Zhai Edwin <edwin.zhai@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from ceilometer.ipmi.pollsters import sensor
from ceilometer.tests.ipmi.notifications import ipmi_test_data
from ceilometer.tests.ipmi.pollsters import base
CONF = cfg.CONF
CONF.import_opt('host', 'ceilometer.service')
TEMPERATURE_SENSOR_DATA = {
'Temperature': ipmi_test_data.TEMPERATURE_DATA
}
CURRENT_SENSOR_DATA = {
'Current': ipmi_test_data.CURRENT_DATA
}
FAN_SENSOR_DATA = {
'Fan': ipmi_test_data.FAN_DATA
}
VOLTAGE_SENSOR_DATA = {
'Voltage': ipmi_test_data.VOLTAGE_DATA
}
MISSING_SENSOR_DATA = ipmi_test_data.MISSING_SENSOR['payload']['payload']
MALFORMED_SENSOR_DATA = ipmi_test_data.BAD_SENSOR['payload']['payload']
MISSING_ID_SENSOR_DATA = ipmi_test_data.NO_SENSOR_ID['payload']['payload']
class TestTemperatureSensorPollster(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return TEMPERATURE_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.TemperatureSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(10, float(32), CONF.host)
class TestMissingSensorData(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return MISSING_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.TemperatureSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(0)
class TestMalformedSensorData(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return MALFORMED_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.TemperatureSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(0)
class TestMissingSensorId(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return MISSING_ID_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.TemperatureSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(0)
class TestFanSensorPollster(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return FAN_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.FanSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(12, float(7140), CONF.host)
class TestCurrentSensorPollster(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return CURRENT_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.CurrentSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(1, float(130), CONF.host)
class TestVoltageSensorPollster(base.TestPollsterBase):
def fake_sensor_data(self, sensor_type):
return VOLTAGE_SENSOR_DATA
def fake_data(self):
# No use for Sensor test
return None
def make_pollster(self):
return sensor.VoltageSensorPollster()
@mock.patch('ceilometer.pipeline.setup_pipeline', mock.MagicMock())
def test_get_samples(self):
self._test_get_samples()
self._verify_metering(4, float(3.309), CONF.host)
| apache-2.0 |
Neurita/pelican-plugins | events/events.py | 37 | 4126 | # -*- coding: utf-8 -*-
"""
events plugin for Pelican
=========================
This plugin looks for and parses an "events" directory and generates
blog posts with a user-defined event date. (typically in the future)
It also generates an ICalendar v2.0 calendar file.
https://en.wikipedia.org/wiki/ICalendar
Author: Federico Ceratto <federico.ceratto@gmail.com>
Released under AGPLv3+ license, see LICENSE
"""
from datetime import datetime, timedelta
from pelican import signals, utils
import icalendar
import logging
import os.path
import pytz
log = logging.getLogger(__name__)
TIME_MULTIPLIERS = {
'w': 'weeks',
'd': 'days',
'h': 'hours',
'm': 'minutes',
's': 'seconds'
}
events = []
def parse_tstamp(ev, field_name):
"""Parse a timestamp string in format "YYYY-MM-DD HH:MM"
:returns: datetime
"""
try:
return datetime.strptime(ev[field_name], '%Y-%m-%d %H:%M')
except Exception, e:
log.error("Unable to parse the '%s' field in the event named '%s': %s" \
% (field_name, ev['title'], e))
raise
def parse_timedelta(ev):
"""Parse a timedelta string in format [<num><multiplier> ]*
e.g. 2h 30m
:returns: timedelta
"""
chunks = ev['event-duration'].split()
tdargs = {}
for c in chunks:
try:
m = TIME_MULTIPLIERS[c[-1]]
val = int(c[:-1])
tdargs[m] = val
except KeyError:
log.error("""Unknown time multiplier '%s' value in the \
'event-duration' field in the '%s' event. Supported multipliers \
are: '%s'.""" % (c, ev['title'], ' '.join(TIME_MULTIPLIERS)))
raise RuntimeError("Unknown time multiplier '%s'" % c)
except ValueError:
log.error("""Unable to parse '%s' value in the 'event-duration' \
field in the '%s' event.""" % (c, ev['title']))
raise ValueError("Unable to parse '%s'" % c)
return timedelta(**tdargs)
def parse_article(generator, metadata):
"""Collect articles metadata to be used for building the event calendar
:returns: None
"""
if 'event-start' not in metadata:
return
dtstart = parse_tstamp(metadata, 'event-start')
if 'event-end' in metadata:
dtend = parse_tstamp(metadata, 'event-end')
elif 'event-duration' in metadata:
dtdelta = parse_timedelta(metadata)
dtend = dtstart + dtdelta
else:
msg = "Either 'event-end' or 'event-duration' must be" + \
" speciefied in the event named '%s'" % metadata['title']
log.error(msg)
raise ValueError(msg)
events.append((dtstart, dtend, metadata))
def generate_ical_file(generator):
"""Generate an iCalendar file
"""
global events
ics_fname = generator.settings['PLUGIN_EVENTS']['ics_fname']
if not ics_fname:
return
ics_fname = os.path.join(generator.settings['OUTPUT_PATH'], ics_fname)
log.debug("Generating calendar at %s with %d events" % (ics_fname, len(events)))
tz = generator.settings.get('TIMEZONE', 'UTC')
tz = pytz.timezone(tz)
ical = icalendar.Calendar()
ical.add('prodid', '-//My calendar product//mxm.dk//')
ical.add('version', '2.0')
for e in events:
dtstart, dtend, metadata = e
ie = icalendar.Event(
summary=metadata['summary'],
dtstart=dtstart,
dtend=dtend,
dtstamp=metadata['date'],
priority=5,
uid=metadata['title'] + metadata['summary'],
)
if 'event-location' in metadata:
ie.add('location', metadata['event-location'])
ical.add_component(ie)
with open(ics_fname, 'wb') as f:
f.write(ical.to_ical())
def generate_events_list(generator):
"""Populate the event_list variable to be used in jinja templates"""
generator.context['events_list'] = sorted(events, reverse=True)
def register():
signals.article_generator_context.connect(parse_article)
signals.article_generator_finalized.connect(generate_ical_file)
signals.article_generator_finalized.connect(generate_events_list)
| agpl-3.0 |
sanjuro/RCJK | api/xmlrpc.py | 32 | 4100 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCDispatcher
from django.conf import settings
from django import http
from oauth import oauth
from common import api
from common import legacy
from common import exception
from common import oauth_util
def _xmlrpc_url():
if settings.SUBDOMAINS_ENABLED:
return "http://api.%s/xmlrpc" % settings.HOSTED_DOMAIN
else:
return "http://%s/api/xmlrpc" % settings.DOMAIN
URL = _xmlrpc_url()
class XmlRpcDispatcher(object):
_PERSONAL_KEY = 'personal_key'
_API_USER_NICK_KEY = 'user'
# for when the client makes a non-post xmlrpc request
_ONLY_POST_ALLOWED = xmlrpclib.dumps(xmlrpclib.Fault(
exception.INVALID_ARGUMENTS,
'XML-RPC message must be an HTTP-POST request'))
@staticmethod
def _get_api_user(params):
if (settings.API_ALLOW_LEGACY_AUTH
and params.has_key(XmlRpcDispatcher._PERSONAL_KEY)
and params.has_key(XmlRpcDispatcher._API_USER_NICK_KEY)):
return legacy.authenticate_user_personal_key(
params[XmlRpcDispatcher._API_USER_NICK_KEY],
params[XmlRpcDispatcher._PERSONAL_KEY])
oauth_request = oauth.OAuthRequest(http_method='POST',
http_url=URL,
parameters=params)
oauth_util.verify_oauth_request(oauth_request)
return oauth_util.get_api_user_from_oauth_request(oauth_request)
@staticmethod
def _wrap_api_call(function):
def _wrapped(params):
try:
api_user = XmlRpcDispatcher._get_api_user(params)
if not api_user:
raise xmlrpclib.Fault(0x00, 'Invalid API user')
method_args = oauth_util.get_non_oauth_params(params)
if XmlRpcDispatcher._PERSONAL_KEY in method_args:
del method_args[XmlRpcDispatcher._PERSONAL_KEY]
if XmlRpcDispatcher._API_USER_NICK_KEY in method_args:
del method_args[XmlRpcDispatcher._API_USER_NICK_KEY]
return function(api_user, **method_args).to_api()
except oauth_util.OAuthError, e:
raise xmlrpclib.Fault(exception.OAUTH_ERROR, e.message)
except TypeError, e:
raise xmlrpclib.Fault(exception.INVALID_ARGUMENTS, str(e))
return _wrapped
def __init__(self, public_api_methods):
if sys.version_info[:3] >= (2,5,):
self._dispatcher = SimpleXMLRPCDispatcher(allow_none=True,
encoding=None)
else:
self._dispatcher = SimpleXMLRPCDispatcher()
for name, method in public_api_methods.iteritems():
self._dispatcher.register_function(
name=name,
function=XmlRpcDispatcher._wrap_api_call(method))
def dispatch(self, request):
return http.HttpResponse(content=self._dispatch(request),
mimetype='text/xml')
def _dispatch(self, request):
if request.method != 'POST':
return XmlRpcDispatcher._ONLY_POST_ALLOWED
# SimpleXMLRPCDispatcher in python 2.4 does not allow None in response.
# That prevents us from calling _marshall_dispatch directly.
try:
params, method = xmlrpclib.loads(request.raw_post_data)
rv = self._dispatcher._dispatch(method, params)
return xmlrpclib.dumps((rv,),
methodresponse=True,
allow_none=True)
except xmlrpclib.Fault, fault:
return xmlrpclib.dumps(fault)
except:
return xmlrpclib.dumps(xmlrpclib.Fault(
0x00, '%s:%s' % (sys.exc_type, sys.exc_value)))
| apache-2.0 |
darktears/chromium-crosswalk | chrome/test/data/nacl/debug_stub_browser_tests.py | 42 | 3536 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import sys
import xml.etree.ElementTree
import gdb_rsp
def AssertRaises(exc_class, func):
try:
func()
except exc_class:
pass
else:
raise AssertionError('Function did not raise %r' % exc_class)
def GetTargetArch(connection):
"""Get the CPU architecture of the NaCl application."""
reply = connection.RspRequest('qXfer:features:read:target.xml:0,fff')
assert reply[0] == 'l', reply
tree = xml.etree.ElementTree.fromstring(reply[1:])
arch_tag = tree.find('architecture')
assert arch_tag is not None, reply
return arch_tag.text.strip()
def ReverseBytes(byte_string):
"""Reverse bytes in the hex string: '09ab' -> 'ab09'. This converts
little-endian number in the hex string to its normal string representation.
"""
assert len(byte_string) % 2 == 0, byte_string
return ''.join([byte_string[i - 2 : i]
for i in xrange(len(byte_string), 0, -2)])
def GetProgCtrString(connection, arch):
"""Get current execution point."""
registers = connection.RspRequest('g')
# PC register indices can be found in
# native_client/src/trusted/debug_stub/abi.cc in AbiInit function.
if arch == 'i386':
# eip index is 8
return ReverseBytes(registers[8 * 8 : 8 * 8 + 8])
if arch == 'i386:x86-64':
# rip index is 16
return ReverseBytes(registers[16 * 16 : 16 * 16 + 8])
if arch == 'iwmmxt':
# pc index is 15
return ReverseBytes(registers[15 * 8 : 15 * 8 + 8])
raise AssertionError('Unknown architecture: %s' % arch)
def TestContinue(connection):
# Once the NaCl test module reports that the test passed, the NaCl <embed>
# element is removed from the page. The NaCl module will be killed by the
# browser which will appear as EOF (end-of-file) on the debug stub socket.
AssertRaises(gdb_rsp.EofOnReplyException,
lambda: connection.RspRequest('vCont;c'))
def TestBreakpoint(connection):
# Breakpoints and single-stepping might interfere with Chrome sandbox. So we
# check that they work properly in this test.
arch = GetTargetArch(connection)
registers = connection.RspRequest('g')
pc = GetProgCtrString(connection, arch)
# Set breakpoint
result = connection.RspRequest('Z0,%s,1' % pc)
assert result == 'OK', result
# Check that we stopped at breakpoint
result = connection.RspRequest('vCont;c')
stop_reply = re.compile(r'T05thread:(\d+);')
assert stop_reply.match(result), result
thread = stop_reply.match(result).group(1)
# Check that registers haven't changed
result = connection.RspRequest('g')
assert result == registers, (result, registers)
# Remove breakpoint
result = connection.RspRequest('z0,%s,1' % pc)
assert result == 'OK', result
# Check single stepping
result = connection.RspRequest('vCont;s:%s' % thread)
assert result == 'T05thread:%s;' % thread, result
assert pc != GetProgCtrString(connection, arch)
# Check that we terminate normally
AssertRaises(gdb_rsp.EofOnReplyException,
lambda: connection.RspRequest('vCont;c'))
def Main(args):
port = int(args[0])
name = args[1]
connection = gdb_rsp.GdbRspConnection(('localhost', port))
if name == 'continue':
TestContinue(connection)
elif name == 'breakpoint':
TestBreakpoint(connection)
else:
raise AssertionError('Unknown test name: %r' % name)
if __name__ == '__main__':
Main(sys.argv[1:])
| bsd-3-clause |
encima/NeuroSocket | pysocket.py | 1 | 5179 | import socket
import sys
import platform
import json
import pprint
from datetime import datetime
import time
import subprocess
import couchdb
from multiprocessing import Process
import re
import config
import argparse
import time
pp = pprint.PrettyPrinter(indent=4)
parser = argparse.ArgumentParser(description='Log all the productivity')
#parser.add_argument('-o','--output',help='Output file name', required=False)
parser.add_argument('-d','--dbname',help='DB name', required=False, default="readings")
parser.add_argument('-l', '--logdir', help='Directory to save logs', required=False)
parser.add_argument('-i','--interval',help='Interval for readings', required=False, default=30, type=int)
parser.add_argument("-m", "--mindwave", help="Connect to mindwave", action="store_true")
args = parser.parse_args()
server = couchdb.Server()
server.resource.credentials = (config.DB_USERNAME, config.DB_PWD)
db = None
log_file = None
if not args.logdir and args.dbname:
print(args.dbname)
try:
db = server[args.dbname]
except:
db = server.create(args.dbname)
print("DB Connected")
else:
print(datetime.today().toordinal())
logpath = "{0}/log_{1}.json".format(args.logdir, datetime.today().toordinal())
log_file = open(logpath, 'w+', encoding="utf8")
print("Opened {} for writing".format(logpath))
LOG_NAME = "output/log_{0}.json"
HOST_INFO = platform.uname()
print(HOST_INFO)
print("Running on {}".format(HOST_INFO.system))
def save_reading(reading):
if args.logdir:
json.dump(reading, log_file)
elif db is not None:
db.save(reading)
def get_app(host):
foreground_app = None
try:
foreground_app = subprocess.check_output(config.FG_CMD[host], stderr=subprocess.STDOUT, shell=True, timeout=5)
# foreground_app = subprocess.Popen(config.FG_CMD[host], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# foreground_app, err = foreground_app.communicate()
foreground_app = foreground_app.decode()
except subprocess.TimeoutExpired as e:
print("Command timed out")
return {'program': 'Could not detect'}
except subprocess.CalledProcessError as e:
print("Error running script")
return {'program': 'Could not detect'}
# raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
if host == 'Linux':
foreground_app = foreground_app.replace("\"","")
foreground_app = re.split("= |\n", foreground_app)
idleTime = subprocess.check_output("xprintidle", stderr=subprocess.STDOUT,shell=True).decode()
idleTime = float(idleTime)/1000
foreground_app = {'program': foreground_app[3], 'title': foreground_app[1], 'idleTime':idleTime}
elif host == 'Windows' or host == 'Darwin':
foreground_app = json.loads(foreground_app)
return foreground_app
def enrich_reading(d_json):
d_json['time'] = str(datetime.now())
d_json['host'] = HOST_INFO.node
d_json['app'] = get_app(HOST_INFO.system)
d_json['platform'] = HOST_INFO.system
sock = None
if args.mindwave:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
readings = []
server_address = (config.HOST, config.PORT)
print('connecting to %s port %s' % server_address)
sock.connect(server_address)
try:
# Send config message to mindwave
if sock:
message = "{\"enableRawOutput\": false, \"format\": \"Json\"}\n";
sock.send(config.CONFSTRING.encode('utf8'))
# Look for the response
while True:
if args.mindwave:
data = sock.recv(1024)
dataform = data.decode()
print(dataform)
try:
struct = json.loads(dataform)
if struct:
if ('status' in struct and struct['status'] != "scanning") or 'status' not in struct:
d_json = struct
#add time and foreground app to json
enrich_reading(d_json)
# pp.pprint(d_json)
if config.BUFFER:
readings.append(d_json)
if len(readings) > MAX_READING_BUFFER:
for reading in readings:
p = Process(target=save_reading, args=(reading,))
p.start()
p.join()
readings = []
else:
save_reading(d_json)
except Exception as e:
print("------")
print(str(e))
print(dataform)
print("------")
else:
d_json = {}
enrich_reading(d_json)
pp.pprint(d_json)
save_reading(d_json)
time.sleep(args.interval)
#TODO add cl option for logging
except KeyboardInterrupt:
print("Quit; saving readings")
finally:
if log_file is not None:
log_file.close()
print("Closing Log")
if sock:
sock.close()
print(sys.stderr, 'closing socket')
| mit |
zainabg/NOX | src/nox/lib/registries.py | 11 | 5332 | import types
class ObjectRegistry:
"""Registry for any type of objects named by a string-type name.
This class is for handling common registries of objects where it is
desirable to be able to lookup the object by a string name. It assumes
it is not desirable to overwrite an existing registered object, and
raises an exception if that is attempted."""
@staticmethod
def _default_check(o):
return True
def __init__(self, objcheckfn=None):
"""Create an object registry.
The objcheckfun argument is an optional function to be called when
an object is added to ensure it meets the criteria of the regsitry.
It is called with a single argument that is the object to check.
If it returns True, the object will be added to the registry, if
it returns false it will not. Note that if it returns false the
attempt to register will appear to be successful as far as the caller
is concerned. If the caller should be informed of an issue, the
check function should raise an appropriate exception, for example
a TypeError. If now objcheckfn is provided any object is
considered acceptable."""
self._objdict = {}
if objcheckfn == None:
self._objcheckfn = ObjectRegistry._default_check
else:
self._objcheckfn = objcheckfn
def has_registered(self, name):
"Return whether an object is registered in the repository under name."
return name in self._objdict
def register(self, name, obj):
"Register obj in the registry under name."
ntype = type(name)
if ntype != types.StringType and ntype != types.UnicodeType:
raise TypeError("The name for an object must be a regular or unicode string.")
if self.has_registered(name):
raise KeyError("The name %s is already registered." % name)
if self._objcheckfn(obj):
self._objdict[name] = obj
def get(self, name):
"Get the object named by name."
try:
return self._objdict[name]
except KeyError, e:
raise KeyError("The name %s is not yet registered." % name)
def list(self, sort_key=None, sort_reverse=None):
"Return a list of the names of registered objects."
if sort_key == None:
return self._objdict.keys()
o = self._objdict.items()
o.sort(key=lambda i: sort_key(i[1]), reverse=sort_reverse)
return [ i[0] for i in o ]
def __len__(self):
return len(self._objdict)
def __getitem__(self, name):
return self.get(name)
def __setitem__(self, name, obj):
self.register(self, name, obj)
# __delitems__() is deliberately NOT defined because removal of
# objects from the registery is not allowed. __setitem__() will
# also only allow adding new objects, not overwritting existing
# ones, for the same reason.
def __iter__(self):
return self._objdict.iterkeys()
def __contains__(self, name):
return self.has_registered(name)
class ClassRegistry(ObjectRegistry):
"""Registry for classes named by a string-type name."""
def _verify_class(self, o):
if self.cls == None:
return True
if self.cls == o:
return False
if not issubclass(o, self.cls):
raise TypeError("Classes registered in this registry must be subclasses of type %s" % self.cls)
return True
def __init__(self, cls=None):
"""Create a class registry.
If the optional class argument is supplied, all classes being
registered must be a subclass of that class. A TypeError will
be raised on attempts to use any other class and attempts to
register the class itself will be ignored."""
self.cls = cls
ObjectRegistry.__init__(self, self._verify_class)
def register(self, cls):
"""Register a class in the registry.
The class must have a class variable called 'name' which is the name
under which it will be registered."""
ObjectRegistry.register(self, cls.name, cls)
def create(self, name, *arg, **kwarg):
"""Create an instance of the class named by 'name'"""
return self.get(name)(*arg, **kwarg)
class InstanceRegistry(ObjectRegistry):
"""Registry for instances of a class named by a string-type name."""
def _verify_instance(self, o):
if self.cls == None:
return True
if not isinstance(o, self.cls):
raise TypeError("Classes registered in this registry must be subclasses of type %s" % self.cls)
return True
def __init__(self, cls=None):
"""Create a class registry.
If the optional class argument is supplied, all instances being
registered must be a subclass of that class. A TypeError will
be raised on attempts to use any other class."""
self.cls = cls
ObjectRegistry.__init__(self, self._verify_instance)
def register(self, name, instance):
"""Register a class in the registry.
The class must have a class variable called 'name' which is the name
under which it will be registered."""
ObjectRegistry.register(self, name, instance)
| gpl-3.0 |
dohoangkhiem/ansible-modules-extras | cloud/vmware/vca_fw.py | 14 | 9993 | #!/usr/bin/python
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vca_fw
short_description: add remove firewall rules in a gateway in a vca
description:
- Adds or removes firewall rules from a gateway in a vca environment
version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
username:
description:
- The vca username or email address, if not set the environment variable VCA_USER is checked for the username.
required: false
default: None
password:
description:
- The vca password, if not set the environment variable VCA_PASS is checked for the password
required: false
default: None
org:
description:
- The org to login to for creating vapp, mostly set when the service_type is vdc.
required: false
default: None
instance_id:
description:
- The instance id in a vchs environment to be used for creating the vapp
required: false
default: None
host:
description:
- The authentication host to be used when service type is vcd.
required: false
default: None
api_version:
description:
- The api version to be used with the vca
required: false
default: "5.7"
service_type:
description:
- The type of service we are authenticating against
required: false
default: vca
choices: [ "vca", "vchs", "vcd" ]
state:
description:
- if the object should be added or removed
required: false
default: present
choices: [ "present", "absent" ]
verify_certs:
description:
- If the certificates of the authentication is to be verified
required: false
default: True
vdc_name:
description:
- The name of the vdc where the gateway is located.
required: false
default: None
gateway_name:
description:
- The name of the gateway of the vdc where the rule should be added
required: false
default: gateway
fw_rules:
description:
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
'''
EXAMPLES = '''
#Add a set of firewall rules
- hosts: localhost
connection: local
tasks:
- vca_fw:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'absent'
fw_rules:
- description: "ben testing"
source_ip: "Any"
dest_ip: 192.168.2.11
- description: "ben testing 2"
source_ip: 192.168.2.100
source_port: "Any"
dest_port: "22"
dest_ip: 192.168.2.13
is_enable: "true"
enable_logging: "false"
protocol: "Tcp"
policy: "allow"
'''
try:
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
except ImportError:
# normally set a flag here but it will be caught when testing for
# the existence of pyvcloud (see module_utils/vca.py). This just
# protects against generating an exception at runtime
pass
VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any']
VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description',
'dest_ip', 'dest_port', 'source_ip', 'source_port',
'protocol']
def protocol_to_tuple(protocol):
return (protocol.get_Tcp(),
protocol.get_Udp(),
protocol.get_Icmp(),
protocol.get_Other(),
protocol.get_Any())
def protocol_to_string(protocol):
protocol = protocol_to_tuple(protocol)
if protocol[0] is True:
return 'Tcp'
elif protocol[1] is True:
return 'Udp'
elif protocol[2] is True:
return 'Icmp'
elif protocol[3] is True:
return 'Other'
elif protocol[4] is True:
return 'Any'
def protocol_to_type(protocol):
try:
protocols = ProtocolsType()
setattr(protocols, protocol, True)
return protocols
except AttributeError:
raise VcaError("The value in protocol is not valid")
def validate_fw_rules(fw_rules):
for rule in fw_rules:
for k in rule.keys():
if k not in VALID_RULE_KEYS:
raise VcaError("%s is not a valid key in fw rules, please "
"check above.." % k, valid_keys=VALID_RULE_KEYS)
rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower()
rule['dest_ip'] = rule.get('dest_ip', 'Any').lower()
rule['source_port'] = str(rule.get('source_port', 'Any')).lower()
rule['source_ip'] = rule.get('source_ip', 'Any').lower()
rule['protocol'] = rule.get('protocol', 'Any').lower()
rule['policy'] = rule.get('policy', 'allow').lower()
rule['is_enable'] = rule.get('is_enable', True)
rule['enable_logging'] = rule.get('enable_logging', False)
rule['description'] = rule.get('description', 'rule added by Ansible')
return fw_rules
def fw_rules_to_dict(rules):
fw_rules = list()
for rule in rules:
fw_rules.append(
dict(
dest_port=rule.get_DestinationPortRange().lower(),
dest_ip=rule.get_DestinationIp().lower().lower(),
source_port=rule.get_SourcePortRange().lower(),
source_ip=rule.get_SourceIp().lower(),
protocol=protocol_to_string(rule.get_Protocols()).lower(),
policy=rule.get_Policy().lower(),
is_enable=rule.get_IsEnabled(),
enable_logging=rule.get_EnableLogging(),
description=rule.get_Description()
)
)
return fw_rules
def create_fw_rule(is_enable, description, policy, protocol, dest_port,
dest_ip, source_port, source_ip, enable_logging):
return FirewallRuleType(IsEnabled=is_enable,
Description=description,
Policy=policy,
Protocols=protocol_to_type(protocol),
DestinationPortRange=dest_port,
DestinationIp=dest_ip,
SourcePortRange=source_port,
SourceIp=source_ip,
EnableLogging=enable_logging)
def main():
argument_spec = vca_argument_spec()
argument_spec.update(
dict(
fw_rules = dict(required=True, type='list'),
gateway_name = dict(default='gateway'),
state = dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
fw_rules = module.params.get('fw_rules')
gateway_name = module.params.get('gateway_name')
vdc_name = module.params['vdc_name']
vca = vca_login(module)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check "
"the gateway_name param" % gateway_name)
fwservice = gateway._getFirewallService()
rules = gateway.get_fw_rules()
current_rules = fw_rules_to_dict(rules)
try:
desired_rules = validate_fw_rules(fw_rules)
except VcaError, e:
module.fail_json(msg=e.message)
result = dict(changed=False)
result['current_rules'] = current_rules
result['desired_rules'] = desired_rules
updates = list()
additions = list()
deletions = list()
for (index, rule) in enumerate(desired_rules):
try:
if rule != current_rules[index]:
updates.append((index, rule))
except IndexError:
additions.append(rule)
eol = len(current_rules) > len(desired_rules)
if eol > 0:
for rule in current_rules[eos:]:
deletions.append(rule)
for rule in additions:
if not module.check_mode:
rule['protocol'] = rule['protocol'].capitalize()
gateway.add_fw_rule(**rule)
result['changed'] = True
for index, rule in updates:
if not module.check_mode:
rule = create_fw_rule(**rule)
fwservice.replace_FirewallRule_at(index, rule)
result['changed'] = True
keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip']
for rule in deletions:
if not module.check_mode:
kwargs = dict([(k, v) for k, v in rule.items() if k in keys])
kwargs['protocol'] = protocol_to_string(kwargs['protocol'])
gateway.delete_fw_rule(**kwargs)
result['changed'] = True
if not module.check_mode and result['changed'] == True:
task = gateway.save_services_configuration()
if task:
vca.block_until_completed(task)
result['rules_updated'] = count=len(updates)
result['rules_added'] = count=len(additions)
result['rules_deleted'] = count=len(deletions)
return module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.vca import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Atlas-Sailed-Co/oppia | core/storage/email/gae_models_test.py | 14 | 1370 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Sean Lip'
import datetime
from core.platform import models
(email_models,) = models.Registry.import_models([models.NAMES.email])
import test_utils
class SentEmailModelUnitTests(test_utils.GenericTestBase):
"""Test the SentEmailModel class."""
def test_sent_email_model_instances_are_read_only(self):
email_models.SentEmailModel.create(
'recipient_id', 'recipient@email.com', 'sender_id',
'sender@email.com', email_models.INTENT_SIGNUP,
'Email Subject', 'Email Body', datetime.datetime.utcnow())
model = email_models.SentEmailModel.get_all().fetch()[0]
model.recipient_id = 'new_recipient_id'
with self.assertRaises(Exception):
model.put()
| apache-2.0 |
Gavitron/pipulator | udp_handshake.py | 1 | 2233 | # udp listener
#
# Run this in the background, and you will appear to the pipboy app as an instance of Fallout4
import socket
import struct
import sys
import time
import json
# debouncer magic number
min_delta=100
#return current millis
def now():
return time.time()*1000000
#return millis since 'then'
def dif(then):
return now() - then
#return true if millis since last_seen is older than minimum debounce interval
def stale(last_seen):
return ( dif(last_seen) > min_delta )
# set some default globals
multicast_group = '224.3.29.71'
listen_address = ('', 28000)
ttl = struct.pack('b', 127) # Set the time-to-live for UDP messages. should be 1.
# here we go
# Create the socket
hand_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
hand_sock.bind(listen_address)
hand_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
hand_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Receive/respond loop
isRunning=True
last_seen = {}
print >>sys.stderr, '\nHANDSHAKE READY...'
while isRunning:
raw_data, address = hand_sock.recvfrom(1024)
nodeID = ':'.join(map(str,address))
print >>sys.stderr, 'HANDSHAKE recieved %d bytes, from: %s' % (len(raw_data), nodeID)
if not last_seen.get(nodeID):
print >>sys.stderr, 'HANDSHAKE new tuple: %s' % nodeID
last_seen[nodeID] = 0
if stale(last_seen[nodeID]):
print >>sys.stderr, 'HANDSHAKE old timestamp: %d diff: %d stale: %s' % (last_seen[nodeID],dif(last_seen[nodeID]),stale(last_seen[nodeID]))
udp_msg = json.loads(raw_data)
if udp_msg['cmd'] == 'autodiscover':
print >>sys.stderr, 'HANDSHAKE acknowledging discovery request from %s' % nodeID
reply = {}
reply['IsBusy'] = False
reply['MachineType'] = "PC"
hand_sock.sendto(json.dumps(reply), address)
else:
print >>sys.stderr, 'HANDSHAKE unrecognized request from %s\nHANDSHAKE content: %s' % (nodeID, udp_msg)
last_seen[nodeID] = now()
else:
print >>sys.stderr, 'HANDSHAKE ignoring duplicate request from %s' % nodeID
| bsd-3-clause |
devlin85/p2pool | p2pool/bitcoin/networks/pesetacoin.py | 10 | 1200 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'c0c0c0c0'.decode('hex')
P2P_PORT = 16639
ADDRESS_VERSION = 47
RPC_PORT = 26640
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'pesetacoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 166386*100000 >> (height + 1)//840000
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 60 # s
SYMBOL = 'PTC'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Pesetacoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Pesetacoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.pesetacoin'), 'pesetacoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://pesetacoin.info/block/' #dummy
ADDRESS_EXPLORER_URL_PREFIX = 'http://pesetacoin.info/address/'
TX_EXPLORER_URL_PREFIX = 'http://pesetacoin.info/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.03e8
| gpl-3.0 |
philsch/ansible | lib/ansible/modules/network/fortios/fortios_config.py | 50 | 5677 | #!/usr/bin/python
#
# Ansible module to manage configuration on fortios devices
# (c) 2016, Benjamin Jolivot <bjolivot@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: fortios_config
version_added: "2.3"
author: "Benjamin Jolivot (@bjolivot)"
short_description: Manage config on Fortinet FortiOS firewall devices
description:
- This module provides management of FortiOS Devices configuration.
extends_documentation_fragment: fortios
options:
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote device.
filter:
description:
- Only for partial backup, you can restrict by giving expected configuration path (ex. firewall address).
default: ""
notes:
- This module requires pyFG python library
"""
EXAMPLES = """
- name: Backup current config
fortios_config:
host: 192.168.0.254
username: admin
password: password
backup: yes
- name: Backup only address objects
fortios_config:
host: 192.168.0.254
username: admin
password: password
backup: yes
backup_path: /tmp/forti_backup/
filter: "firewall address"
- name: Update configuration from file
fortios_config:
host: 192.168.0.254
username: admin
password: password
src: new_configuration.conf
"""
RETURN = """
running_config:
description: full config string
returned: always
type: string
change_string:
description: The commands really executed by the module
returned: only if config changed
type: string
"""
from ansible.module_utils.fortios import fortios_argument_spec, fortios_required_if
from ansible.module_utils.fortios import backup
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
#check for pyFG lib
try:
from pyFG import FortiOS, FortiConfig
from pyFG.fortios import logger
from pyFG.exceptions import CommandExecutionException, FailedCommit, ForcedCommit
HAS_PYFG=True
except:
HAS_PYFG=False
# some blocks don't support update, so remove them
NOT_UPDATABLE_CONFIG_OBJECTS=[
"vpn certificate local",
]
def main():
argument_spec = dict(
src = dict(type='str', default=None),
filter = dict(type='str', default=""),
)
argument_spec.update(fortios_argument_spec)
required_if = fortios_required_if
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
)
result = dict(changed=False)
# fail if pyFG not present
if not HAS_PYFG:
module.fail_json(msg='Could not import the python library pyFG required by this module')
#define device
f = FortiOS( module.params['host'],
username=module.params['username'],
password=module.params['password'],
timeout=module.params['timeout'],
vdom=module.params['vdom'])
#connect
try:
f.open()
except:
module.fail_json(msg='Error connecting device')
#get config
try:
f.load_config(path=module.params['filter'])
result['running_config'] = f.running_config.to_text()
except:
module.fail_json(msg='Error reading running config')
#backup config
if module.params['backup']:
backup(module, f.running_config.to_text())
#update config
if module.params['src'] is not None:
#store config in str
try:
conf_str = open(module.params['src'], 'r').read()
f.load_config(in_candidate=True, config_text=conf_str)
except:
module.fail_json(msg="Can't open configuration file, or configuration invalid")
#get updates lines
change_string = f.compare_config()
#remove not updatable parts
c = FortiConfig()
c.parse_config_output(change_string)
for o in NOT_UPDATABLE_CONFIG_OBJECTS:
c.del_block(o)
change_string = c.to_text()
if change_string != "":
result['change_string'] = change_string
result['changed'] = True
#Commit if not check mode
if module.check_mode is False and change_string != "":
try:
f.commit(change_string)
except CommandExecutionException:
e = get_exception()
module.fail_json(msg="Unable to execute command, check your args, the error was {0}".format(e.message))
except FailedCommit:
e = get_exception()
module.fail_json(msg="Unable to commit, check your args, the error was {0}".format(e.message))
except ForcedCommit:
e = get_exception()
module.fail_json(msg="Failed to force commit, check your args, the error was {0}".format(e.message))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
apporc/cinder | cinder/volume/drivers/emc/scaleio.py | 1 | 42051 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for EMC ScaleIO based on ScaleIO remote CLI.
"""
import base64
import binascii
import json
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import requests
import six
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.i18n import _, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.san import san
from cinder.volume import volume_types
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
scaleio_opts = [
cfg.StrOpt('sio_rest_server_port',
default='443',
help='REST server port.'),
cfg.BoolOpt('sio_verify_server_certificate',
default=False,
help='Whether to verify server certificate.'),
cfg.StrOpt('sio_server_certificate_path',
help='Server certificate path.'),
cfg.BoolOpt('sio_round_volume_capacity',
default=True,
help='Whether to round volume capacity.'),
cfg.BoolOpt('sio_force_delete',
default=False,
help='Whether to allow force delete.'),
cfg.BoolOpt('sio_unmap_volume_before_deletion',
default=False,
help='Whether to unmap volume before deletion.'),
cfg.StrOpt('sio_protection_domain_id',
help='Protection domain id.'),
cfg.StrOpt('sio_protection_domain_name',
help='Protection domain name.'),
cfg.StrOpt('sio_storage_pools',
help='Storage pools.'),
cfg.StrOpt('sio_storage_pool_name',
help='Storage pool name.'),
cfg.StrOpt('sio_storage_pool_id',
help='Storage pool id.')
]
CONF.register_opts(scaleio_opts)
STORAGE_POOL_NAME = 'sio:sp_name'
STORAGE_POOL_ID = 'sio:sp_id'
PROTECTION_DOMAIN_NAME = 'sio:pd_name'
PROTECTION_DOMAIN_ID = 'sio:pd_id'
PROVISIONING_KEY = 'sio:provisioning_type'
IOPS_LIMIT_KEY = 'sio:iops_limit'
BANDWIDTH_LIMIT = 'sio:bandwidth_limit'
BLOCK_SIZE = 8
OK_STATUS_CODE = 200
VOLUME_NOT_FOUND_ERROR = 78
VOLUME_NOT_MAPPED_ERROR = 84
VOLUME_ALREADY_MAPPED_ERROR = 81
class ScaleIODriver(driver.VolumeDriver):
"""EMC ScaleIO Driver."""
VERSION = "2.0"
def __init__(self, *args, **kwargs):
super(ScaleIODriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(san.san_opts)
self.configuration.append_config_values(scaleio_opts)
self.server_ip = self.configuration.san_ip
self.server_port = self.configuration.sio_rest_server_port
self.server_username = self.configuration.san_login
self.server_password = self.configuration.san_password
self.server_token = None
self.verify_server_certificate = (
self.configuration.sio_verify_server_certificate)
self.server_certificate_path = None
if self.verify_server_certificate:
self.server_certificate_path = (
self.configuration.sio_server_certificate_path)
LOG.info(_LI(
"REST server IP: %(ip)s, port: %(port)s, username: %(user)s. "
"Verify server's certificate: %(verify_cert)s."),
{'ip': self.server_ip,
'port': self.server_port,
'user': self.server_username,
'verify_cert': self.verify_server_certificate})
self.storage_pools = None
if self.configuration.sio_storage_pools:
self.storage_pools = [
e.strip() for e in
self.configuration.sio_storage_pools.split(',')
]
self.storage_pool_name = self.configuration.sio_storage_pool_name
self.storage_pool_id = self.configuration.sio_storage_pool_id
if self.storage_pool_name is None and self.storage_pool_id is None:
LOG.warning(_LW("No storage pool name or id was found."))
else:
LOG.info(_LI(
"Storage pools names: %(pools)s, "
"storage pool name: %(pool)s, pool id: %(pool_id)s."),
{'pools': self.storage_pools,
'pool': self.storage_pool_name,
'pool_id': self.storage_pool_id})
self.protection_domain_name = (
self.configuration.sio_protection_domain_name)
LOG.info(_LI(
"Protection domain name: %(domain_name)s."),
{'domain_name': self.protection_domain_name})
self.protection_domain_id = self.configuration.sio_protection_domain_id
LOG.info(_LI(
"Protection domain id: %(domain_id)s."),
{'domain_id': self.protection_domain_id})
self.connector = connector.InitiatorConnector.factory(
connector.SCALEIO, utils.get_root_helper(),
device_scan_attempts=
self.configuration.num_volume_device_scan_tries
)
self.connection_properties = {}
self.connection_properties['scaleIO_volname'] = None
self.connection_properties['hostIP'] = None
self.connection_properties['serverIP'] = self.server_ip
self.connection_properties['serverPort'] = self.server_port
self.connection_properties['serverUsername'] = self.server_username
self.connection_properties['serverPassword'] = self.server_password
self.connection_properties['serverToken'] = self.server_token
self.connection_properties['iopsLimit'] = None
self.connection_properties['bandwidthLimit'] = None
def check_for_setup_error(self):
if (not self.protection_domain_name and
not self.protection_domain_id):
LOG.warning(_LW("No protection domain name or id "
"was specified in configuration."))
if self.protection_domain_name and self.protection_domain_id:
msg = _("Cannot specify both protection domain name "
"and protection domain id.")
raise exception.InvalidInput(reason=msg)
if not self.server_ip:
msg = _("REST server IP must by specified.")
raise exception.InvalidInput(reason=msg)
if not self.server_username:
msg = _("REST server username must by specified.")
raise exception.InvalidInput(reason=msg)
if not self.server_password:
msg = _("REST server password must by specified.")
raise exception.InvalidInput(reason=msg)
if not self.verify_server_certificate:
LOG.warning(_LW("Verify certificate is not set, using default of "
"False."))
if self.verify_server_certificate and not self.server_certificate_path:
msg = _("Path to REST server's certificate must be specified.")
raise exception.InvalidInput(reason=msg)
if self.storage_pool_name and self.storage_pool_id:
msg = _("Cannot specify both storage pool name and storage "
"pool id.")
raise exception.InvalidInput(reason=msg)
if not self.storage_pool_name and not self.storage_pool_id:
msg = _("Must specify storage pool name or id.")
raise exception.InvalidInput(reason=msg)
if not self.storage_pools:
msg = _(
"Must specify storage pools. Option: sio_storage_pools."
)
raise exception.InvalidInput(reason=msg)
def _find_storage_pool_id_from_storage_type(self, storage_type):
# Default to what was configured in configuration file if not defined.
return storage_type.get(STORAGE_POOL_ID,
self.storage_pool_id)
def _find_storage_pool_name_from_storage_type(self, storage_type):
return storage_type.get(STORAGE_POOL_NAME,
self.storage_pool_name)
def _find_protection_domain_id_from_storage_type(self, storage_type):
# Default to what was configured in configuration file if not defined.
return storage_type.get(PROTECTION_DOMAIN_ID,
self.protection_domain_id)
def _find_protection_domain_name_from_storage_type(self, storage_type):
# Default to what was configured in configuration file if not defined.
return storage_type.get(PROTECTION_DOMAIN_NAME,
self.protection_domain_name)
def _find_provisioning_type(self, storage_type):
return storage_type.get(PROVISIONING_KEY)
def _find_iops_limit(self, storage_type):
return storage_type.get(IOPS_LIMIT_KEY)
def _find_bandwidth_limit(self, storage_type):
return storage_type.get(BANDWIDTH_LIMIT)
def _id_to_base64(self, id):
# Base64 encode the id to get a volume name less than 32 characters due
# to ScaleIO limitation.
name = six.text_type(id).replace("-", "")
try:
name = base64.b16decode(name.upper())
except (TypeError, binascii.Error):
pass
encoded_name = name
if isinstance(encoded_name, six.text_type):
encoded_name = encoded_name.encode('utf-8')
encoded_name = base64.b64encode(encoded_name)
if six.PY3:
encoded_name = encoded_name.decode('ascii')
LOG.debug(
"Converted id %(id)s to scaleio name %(name)s.",
{'id': id, 'name': encoded_name})
return encoded_name
def create_volume(self, volume):
"""Creates a scaleIO volume."""
self._check_volume_size(volume.size)
volname = self._id_to_base64(volume.id)
storage_type = self._get_volumetype_extraspecs(volume)
storage_pool_name = self._find_storage_pool_name_from_storage_type(
storage_type)
storage_pool_id = self._find_storage_pool_id_from_storage_type(
storage_type)
protection_domain_id = (
self._find_protection_domain_id_from_storage_type(storage_type))
protection_domain_name = (
self._find_protection_domain_name_from_storage_type(storage_type))
provisioning_type = self._find_provisioning_type(storage_type)
LOG.info(_LI(
"Volume type: %(volume_type)s, storage pool name: %(pool_name)s, "
"storage pool id: %(pool_id)s, protection domain id: "
"%(domain_id)s, protection domain name: %(domain_name)s."),
{'volume_type': storage_type,
'pool_name': storage_pool_name,
'pool_id': storage_pool_id,
'domain_id': protection_domain_id,
'domain_name': protection_domain_name})
verify_cert = self._get_verify_cert()
if storage_pool_name:
self.storage_pool_name = storage_pool_name
self.storage_pool_id = None
if storage_pool_id:
self.storage_pool_id = storage_pool_id
self.storage_pool_name = None
if protection_domain_name:
self.protection_domain_name = protection_domain_name
self.protection_domain_id = None
if protection_domain_id:
self.protection_domain_id = protection_domain_id
self.protection_domain_name = None
domain_id = self.protection_domain_id
if not domain_id:
if not self.protection_domain_name:
msg = _("Must specify protection domain name or"
" protection domain id.")
raise exception.VolumeBackendAPIException(data=msg)
domain_name = self.protection_domain_name
encoded_domain_name = urllib.parse.quote(domain_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request)
domain_id = r.json()
if not domain_id:
msg = (_("Domain with name %s wasn't found.")
% self.protection_domain_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in domain_id:
msg = (_("Error getting domain id from name %(name)s: %(id)s.")
% {'name': self.protection_domain_name,
'id': domain_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Domain id is %s."), domain_id)
pool_name = self.storage_pool_name
pool_id = self.storage_pool_id
if pool_name:
encoded_domain_name = urllib.parse.quote(pool_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'domain_id': domain_id,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
pool_id = r.json()
if not pool_id:
msg = (_("Pool with name %(pool_name)s wasn't found in "
"domain %(domain_id)s.")
% {'pool_name': pool_name,
'domain_id': domain_id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in pool_id:
msg = (_("Error getting pool id from name %(pool_name)s: "
"%(err_msg)s.")
% {'pool_name': pool_name,
'err_msg': pool_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Pool id is %s."), pool_id)
if provisioning_type == 'thin':
provisioning = "ThinProvisioned"
# Default volume type is thick.
else:
provisioning = "ThickProvisioned"
# units.Mi = 1024 ** 2
volume_size_kb = volume.size * units.Mi
params = {'protectionDomainId': domain_id,
'volumeSizeInKb': six.text_type(volume_size_kb),
'name': volname,
'volumeType': provisioning,
'storagePoolId': pool_id}
LOG.info(_LI("Params for add volume request: %s."), params)
r = requests.post(
"https://" +
self.server_ip +
":" +
self.server_port +
"/api/types/Volume/instances",
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
response = r.json()
LOG.info(_LI("Add volume response: %s"), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Error creating volume: %s.") % response['message'])
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Created volume %(volname)s, volume id %(volid)s."),
{'volname': volname, 'volid': volume.id})
return {'provider_id': response['id']}
def _check_volume_size(self, size):
if size % 8 != 0:
round_volume_capacity = (
self.configuration.sio_round_volume_capacity)
if not round_volume_capacity:
exception_msg = (_(
"Cannot create volume of size %s: not multiple of 8GB.") %
size)
LOG.error(exception_msg)
raise exception.VolumeBackendAPIException(data=exception_msg)
def create_snapshot(self, snapshot):
"""Creates a scaleio snapshot."""
volume_id = snapshot.volume.provider_id
snapname = self._id_to_base64(snapshot.id)
return self._snapshot_volume(volume_id, snapname)
def _snapshot_volume(self, vol_id, snapname):
LOG.info(_LI("Snapshot volume %(vol)s into snapshot %(id)s.") %
{'vol': vol_id, 'id': snapname})
params = {
'snapshotDefs': [{"volumeId": vol_id, "snapshotName": snapname}]}
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/System/action/snapshotVolumes") % req_vars
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
response = r.json()
LOG.info(_LI("Snapshot volume response: %s."), response)
if r.status_code != OK_STATUS_CODE and "errorCode" in response:
msg = (_("Failed creating snapshot for volume %(volname)s: "
"%(response)s.") %
{'volname': vol_id,
'response': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
return {'provider_id': response['volumeIdList'][0]}
def _check_response(self, response, request, is_get_request=True,
params=None):
if response.status_code == 401 or response.status_code == 403:
LOG.info(_LI("Token is invalid, going to re-login and get "
"a new one."))
login_request = (
"https://" + self.server_ip +
":" + self.server_port + "/api/login")
verify_cert = self._get_verify_cert()
r = requests.get(
login_request,
auth=(
self.server_username,
self.server_password),
verify=verify_cert)
token = r.json()
self.server_token = token
# Repeat request with valid token.
LOG.info(_LI(
"Going to perform request again %s with valid token."),
request)
if is_get_request:
res = requests.get(request,
auth=(self.server_username,
self.server_token),
verify=verify_cert)
else:
res = requests.post(request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=verify_cert)
return res
return response
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
# We interchange 'volume' and 'snapshot' because in ScaleIO
# snapshot is a volume: once a snapshot is generated it
# becomes a new unmapped volume in the system and the user
# may manipulate it in the same manner as any other volume
# exposed by the system
volume_id = snapshot.provider_id
snapname = self._id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO create volume from snapshot: snapshot %(snapname)s "
"to volume %(volname)s."),
{'volname': volume_id,
'snapname': snapname})
return self._snapshot_volume(volume_id, snapname)
def _get_headers(self):
return {'content-type': 'application/json'}
def _get_verify_cert(self):
verify_cert = False
if self.verify_server_certificate:
verify_cert = self.server_certificate_path
return verify_cert
def extend_volume(self, volume, new_size):
"""Extends the size of an existing available ScaleIO volume.
This action will round up the volume to the nearest size that is
a granularity of 8 GBs.
"""
vol_id = volume['provider_id']
LOG.info(_LI(
"ScaleIO extend volume: volume %(volname)s to size %(new_size)s."),
{'volname': vol_id,
'new_size': new_size})
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'vol_id': vol_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/setVolumeSize") % req_vars
LOG.info(_LI("Change volume capacity request: %s."), request)
# Round up the volume size so that it is a granularity of 8 GBs
# because ScaleIO only supports volumes with a granularity of 8 GBs.
if new_size % 8 == 0:
volume_new_size = new_size
else:
volume_new_size = new_size + 8 - (new_size % 8)
round_volume_capacity = self.configuration.sio_round_volume_capacity
if (not round_volume_capacity and not new_size % 8 == 0):
LOG.warning(_LW("ScaleIO only supports volumes with a granularity "
"of 8 GBs. The new volume size is: %d."),
volume_new_size)
params = {'sizeInGB': six.text_type(volume_new_size)}
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=self._get_verify_cert())
r = self._check_response(r, request, False, params)
if r.status_code != OK_STATUS_CODE:
response = r.json()
msg = (_("Error extending volume %(vol)s: %(err)s.")
% {'vol': vol_id,
'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def create_cloned_volume(self, volume, src_vref):
"""Creates a cloned volume."""
volume_id = src_vref['provider_id']
snapname = self._id_to_base64(volume.id)
LOG.info(_LI(
"ScaleIO create cloned volume: source volume %(src)s to target "
"volume %(tgt)s."),
{'src': volume_id,
'tgt': snapname})
return self._snapshot_volume(volume_id, snapname)
def delete_volume(self, volume):
"""Deletes a self.logical volume"""
volume_id = volume['provider_id']
self._delete_volume(volume_id)
def _delete_volume(self, vol_id):
verify_cert = self._get_verify_cert()
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'vol_id': six.text_type(vol_id)}
unmap_before_delete = (
self.configuration.sio_unmap_volume_before_deletion)
# Ensure that the volume is not mapped to any SDC before deletion in
# case unmap_before_deletion is enabled.
if unmap_before_delete:
params = {'allSdcs': ''}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/removeMappedSdc") % req_vars
LOG.info(_LI(
"Trying to unmap volume from all sdcs before deletion: %s."),
request)
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=verify_cert
)
r = self._check_response(r, request, False, params)
LOG.debug("Unmap volume response: %s.", r.text)
params = {'removeMode': 'ONLY_ME'}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(vol_id)s"
"/action/removeVolume") % req_vars
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=verify_cert
)
r = self._check_response(r, request, False, params)
if r.status_code != OK_STATUS_CODE:
response = r.json()
error_code = response['errorCode']
if error_code == 78:
force_delete = self.configuration.sio_force_delete
if force_delete:
LOG.warning(_LW(
"Ignoring error in delete volume %s: volume not found "
"due to force delete settings."), vol_id)
else:
msg = (_("Error deleting volume %s: volume not found.") %
vol_id)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
msg = (_("Error deleting volume %(vol)s: %(err)s.") %
{'vol': vol_id,
'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def delete_snapshot(self, snapshot):
"""Deletes a ScaleIO snapshot."""
snap_id = snapshot.provider_id
LOG.info(_LI("ScaleIO delete snapshot."))
return self._delete_volume(snap_id)
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
The scaleio driver returns a driver_volume_type of 'scaleio'.
"""
LOG.debug("Connector is %s.", connector)
connection_properties = dict(self.connection_properties)
volname = self._id_to_base64(volume.id)
connection_properties['scaleIO_volname'] = volname
storage_type = self._get_volumetype_extraspecs(volume)
LOG.info(_LI("Volume type is %s."), storage_type)
iops_limit = self._find_iops_limit(storage_type)
LOG.info(_LI("iops limit is: %s."), iops_limit)
bandwidth_limit = self._find_bandwidth_limit(storage_type)
LOG.info(_LI("Bandwidth limit is: %s."), bandwidth_limit)
connection_properties['iopsLimit'] = iops_limit
connection_properties['bandwidthLimit'] = bandwidth_limit
return {'driver_volume_type': 'scaleio',
'data': connection_properties}
def terminate_connection(self, volume, connector, **kwargs):
LOG.debug("scaleio driver terminate connection.")
def _update_volume_stats(self):
stats = {}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'scaleio'
stats['vendor_name'] = 'EMC'
stats['driver_version'] = self.VERSION
stats['storage_protocol'] = 'scaleio'
stats['total_capacity_gb'] = 'unknown'
stats['free_capacity_gb'] = 'unknown'
stats['reserved_percentage'] = 0
stats['QoS_support'] = False
pools = []
verify_cert = self._get_verify_cert()
max_free_capacity = 0
total_capacity = 0
for sp_name in self.storage_pools:
splitted_name = sp_name.split(':')
domain_name = splitted_name[0]
pool_name = splitted_name[1]
LOG.debug("domain name is %(domain)s, pool name is %(pool)s.",
{'domain': domain_name,
'pool': pool_name})
# Get domain id from name.
encoded_domain_name = urllib.parse.quote(domain_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'encoded_domain_name': encoded_domain_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Domain/instances/getByName::"
"%(encoded_domain_name)s") % req_vars
LOG.info(_LI("ScaleIO get domain id by name request: %s."),
request)
LOG.info(_LI("username: %(username)s, verify_cert: %(verify)s."),
{'username': self.server_username,
'verify': verify_cert})
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
r = self._check_response(r, request)
LOG.info(_LI("Get domain by name response: %s"), r.text)
domain_id = r.json()
if not domain_id:
msg = (_("Domain with name %s wasn't found.")
% self.protection_domain_name)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in domain_id:
msg = (_("Error getting domain id from name %(name)s: "
"%(err)s.")
% {'name': self.protection_domain_name,
'err': domain_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Domain id is %s."), domain_id)
# Get pool id from name.
encoded_pool_name = urllib.parse.quote(pool_name, '')
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'domain_id': domain_id,
'encoded_pool_name': encoded_pool_name}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/Pool/instances/getByName::"
"%(domain_id)s,%(encoded_pool_name)s") % req_vars
LOG.info(_LI("ScaleIO get pool id by name request: %s."), request)
r = requests.get(
request,
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
pool_id = r.json()
if not pool_id:
msg = (_("Pool with name %(pool)s wasn't found in domain "
"%(domain)s.")
% {'pool': pool_name,
'domain': domain_id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if r.status_code != OK_STATUS_CODE and "errorCode" in pool_id:
msg = (_("Error getting pool id from name %(pool)s: "
"%(err)s.")
% {'pool': pool_name,
'err': pool_id['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Pool id is %s."), pool_id)
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/types/StoragePool/instances/action/"
"querySelectedStatistics") % req_vars
params = {'ids': [pool_id], 'properties': [
"capacityInUseInKb", "capacityLimitInKb"]}
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(
self.server_username,
self.server_token),
verify=verify_cert)
response = r.json()
LOG.info(_LI("Query capacity stats response: %s."), response)
for res in response.values():
capacityInUse = res['capacityInUseInKb']
capacityLimit = res['capacityLimitInKb']
total_capacity_gb = capacityLimit / units.Mi
used_capacity_gb = capacityInUse / units.Mi
free_capacity_gb = total_capacity_gb - used_capacity_gb
LOG.info(_LI(
"free capacity of pool %(pool)s is: %(free)s, "
"total capacity: %(total)s."),
{'pool': pool_name,
'free': free_capacity_gb,
'total': total_capacity_gb})
pool = {'pool_name': sp_name,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'QoS_support': False,
'reserved_percentage': 0
}
pools.append(pool)
if free_capacity_gb > max_free_capacity:
max_free_capacity = free_capacity_gb
total_capacity = total_capacity + total_capacity_gb
stats['volume_backend_name'] = backend_name or 'scaleio'
stats['vendor_name'] = 'EMC'
stats['driver_version'] = self.VERSION
stats['storage_protocol'] = 'scaleio'
# Use zero capacities here so we always use a pool.
stats['total_capacity_gb'] = total_capacity
stats['free_capacity_gb'] = max_free_capacity
LOG.info(_LI(
"Free capacity for backend is: %(free)s, total capacity: "
"%(total)s."),
{'free': max_free_capacity,
'total': total_capacity})
stats['reserved_percentage'] = 0
stats['QoS_support'] = False
stats['pools'] = pools
LOG.info(_LI("Backend name is %s."), stats["volume_backend_name"])
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_volumetype_extraspecs(self, volume):
specs = {}
ctxt = context.get_admin_context()
type_id = volume['volume_type_id']
if type_id:
volume_type = volume_types.get_volume_type(ctxt, type_id)
specs = volume_type.get('extra_specs')
for key, value in specs.items():
specs[key] = value
return specs
def _sio_attach_volume(self, volume):
"""Call connector.connect_volume() and return the path. """
LOG.debug("Calling os-brick to attach ScaleIO volume.")
connection_properties = dict(self.connection_properties)
connection_properties['scaleIO_volname'] = self._id_to_base64(
volume.id)
device_info = self.connector.connect_volume(connection_properties)
return device_info['path']
def _sio_detach_volume(self, volume):
"""Call the connector.disconnect() """
LOG.info(_LI("Calling os-brick to detach ScaleIO volume."))
connection_properties = dict(self.connection_properties)
connection_properties['scaleIO_volname'] = self._id_to_base64(
volume.id)
self.connector.disconnect_volume(connection_properties, volume)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
LOG.info(_LI(
"ScaleIO copy_image_to_volume volume: %(vol)s image service: "
"%(service)s image id: %(id)s."),
{'vol': volume,
'service': six.text_type(image_service),
'id': six.text_type(image_id)})
try:
image_utils.fetch_to_raw(context,
image_service,
image_id,
self._sio_attach_volume(volume),
BLOCK_SIZE,
size=volume['size'])
finally:
self._sio_detach_volume(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
LOG.info(_LI(
"ScaleIO copy_volume_to_image volume: %(vol)s image service: "
"%(service)s image meta: %(meta)s."),
{'vol': volume,
'service': six.text_type(image_service),
'meta': six.text_type(image_meta)})
try:
image_utils.upload_volume(context,
image_service,
image_meta,
self._sio_attach_volume(volume))
finally:
self._sio_detach_volume(volume)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return the update from ScaleIO migrated volume.
This method updates the volume name of the new ScaleIO volume to
match the updated volume ID.
The original volume is renamed first since ScaleIO does not allow
multiple volumes to have the same name.
"""
name_id = None
location = None
if original_volume_status == 'available':
# During migration, a new volume is created and will replace
# the original volume at the end of the migration. We need to
# rename the new volume. The current_name of the new volume,
# which is the id of the new volume, will be changed to the
# new_name, which is the id of the original volume.
current_name = new_volume['id']
new_name = volume['id']
vol_id = new_volume['provider_id']
LOG.info(_LI("Renaming %(id)s from %(current_name)s to "
"%(new_name)s."),
{'id': vol_id, 'current_name': current_name,
'new_name': new_name})
# Original volume needs to be renamed first
self._rename_volume(volume, "ff" + new_name)
self._rename_volume(new_volume, new_name)
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': location}
def _rename_volume(self, volume, new_id):
new_name = self._id_to_base64(new_id)
vol_id = volume['provider_id']
req_vars = {'server_ip': self.server_ip,
'server_port': self.server_port,
'id': vol_id}
request = ("https://%(server_ip)s:%(server_port)s"
"/api/instances/Volume::%(id)s/action/setVolumeName" %
req_vars)
LOG.info(_LI("ScaleIO rename volume request: %s."), request)
params = {'newName': new_name}
r = requests.post(
request,
data=json.dumps(params),
headers=self._get_headers(),
auth=(self.server_username,
self.server_token),
verify=self._get_verify_cert()
)
r = self._check_response(r, request, False, params)
if r.status_code != OK_STATUS_CODE:
response = r.json()
msg = (_("Error renaming volume %(vol)s: %(err)s.") %
{'vol': vol_id, 'err': response['message']})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
else:
LOG.info(_LI("ScaleIO volume %(vol)s was renamed to "
"%(new_name)s."),
{'vol': vol_id, 'new_name': new_name})
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
pass
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
pass
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
pass
def check_for_export(self, context, volume_id):
"""Make sure volume is exported."""
pass
| apache-2.0 |
pbchou/trafficserver | tests/gold_tests/tls/tls_client_cert2.test.py | 6 | 8900 | '''
Test offering client cert to origin
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test client certs to origin selected via wildcard names in sni
'''
ts = Test.MakeATSProcess("ts", command="traffic_server", select_ports=True)
cafile = "{0}/signer.pem".format(Test.RunDirectory)
cafile2 = "{0}/signer2.pem".format(Test.RunDirectory)
server = Test.MakeOriginServer("server",
ssl=True,
options={"--clientCA": cafile,
"--clientverify": ""},
clientcert="{0}/signed-foo.pem".format(Test.RunDirectory),
clientkey="{0}/signed-foo.key".format(Test.RunDirectory))
server2 = Test.MakeOriginServer("server2",
ssl=True,
options={"--clientCA": cafile2,
"--clientverify": ""},
clientcert="{0}/signed2-bar.pem".format(Test.RunDirectory),
clientkey="{0}/signed-bar.key".format(Test.RunDirectory))
server4 = Test.MakeOriginServer("server4")
server.Setup.Copy("ssl/signer.pem")
server.Setup.Copy("ssl/signer2.pem")
server.Setup.Copy("ssl/signed-foo.pem")
server.Setup.Copy("ssl/signed-foo.key")
server.Setup.Copy("ssl/signed2-foo.pem")
server.Setup.Copy("ssl/signed2-bar.pem")
server.Setup.Copy("ssl/signed-bar.key")
server2.Setup.Copy("ssl/signer.pem")
server2.Setup.Copy("ssl/signer2.pem")
server2.Setup.Copy("ssl/signed-foo.pem")
server2.Setup.Copy("ssl/signed-foo.key")
server2.Setup.Copy("ssl/signed2-foo.pem")
server2.Setup.Copy("ssl/signed2-bar.pem")
server2.Setup.Copy("ssl/signed-bar.key")
request_header = {"headers": "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/combo-signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.pem")
ts.addSSLfile("ssl/signed-foo.key")
ts.addSSLfile("ssl/signed2-foo.pem")
ts.addSSLfile("ssl/signed-bar.pem")
ts.addSSLfile("ssl/signed2-bar.pem")
ts.addSSLfile("ssl/signed-bar.key")
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.url_remap.pristine_host_hdr': 1,
'proxy.config.ssl.client.verify.server.policy': 'PERMISSIVE',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map /case1 https://127.0.0.1:{0}/'.format(server.Variables.SSL_Port)
)
ts.Disk.remap_config.AddLine(
'map /case2 https://127.0.0.1:{0}/'.format(server2.Variables.SSL_Port)
)
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: bob.bar.com',
' client_cert: signed-bar.pem',
' client_key: signed-bar.key',
'- fqdn: bob.*.com',
' client_cert: {0}/combo-signed-foo.pem'.format(ts.Variables.SSLDir),
'- fqdn: "*bar.com"',
' client_cert: {0}/signed2-bar.pem'.format(ts.Variables.SSLDir),
' client_key: {0}/signed-bar.key'.format(ts.Variables.SSLDir),
'- fqdn: "foo.com"',
' client_cert: {0}/signed2-foo.pem'.format(ts.Variables.SSLDir),
' client_key: {0}/signed-foo.key'.format(ts.Variables.SSLDir),
])
ts.Disk.logging_yaml.AddLines(
'''
logging:
formats:
- name: testformat
format: '%<pssc> %<cquc> %<pscert> %<cscert>'
logs:
- mode: ascii
format: testformat
filename: squid
'''.split("\n")
)
# Should succeed
tr = Test.AddTestRun("bob.bar.com to server 1")
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(server2)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.Processes.Default.Command = "curl -H host:bob.bar.com http://127.0.0.1:{0}/case1".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
# Should fail
trfail = Test.AddTestRun("bob.bar.com to server 2")
trfail.StillRunningAfter = ts
trfail.StillRunningAfter = server
trfail.StillRunningAfter = server2
trfail.Processes.Default.Command = 'curl -H host:bob.bar.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
trfail.Processes.Default.ReturnCode = 0
trfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Should succeed
tr = Test.AddTestRun("bob.foo.com to server 1")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.Processes.Default.Command = "curl -H host:bob.foo.com http://127.0.0.1:{0}/case1".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
# Should fail
trfail = Test.AddTestRun("bob.foo.com to server 2")
trfail.StillRunningAfter = ts
trfail.StillRunningAfter = server
trfail.StillRunningAfter = server2
trfail.Processes.Default.Command = 'curl -H host:bob.foo.com http://127.0.0.1:{0}/case2'.format(ts.Variables.port)
trfail.Processes.Default.ReturnCode = 0
trfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Should succeed
tr = Test.AddTestRun("random.bar.com to server 2")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.Processes.Default.Command = "curl -H host:random.bar.com http://127.0.0.1:{0}/case2".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ExcludesExpression("Could Not Connect", "Check response")
# Should fail
trfail = Test.AddTestRun("random.bar.com to server 1")
trfail.StillRunningAfter = ts
trfail.StillRunningAfter = server
trfail.StillRunningAfter = server2
trfail.Processes.Default.Command = 'curl -H host:random.bar.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
trfail.Processes.Default.ReturnCode = 0
trfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Should fail
tr = Test.AddTestRun("random.foo.com to server 2")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.StillRunningAfter = server2
tr.Processes.Default.Command = "curl -H host:random.foo.com http://127.0.0.1:{0}/case2".format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
# Should fail
trfail = Test.AddTestRun("random.foo.com to server 1")
trfail.StillRunningAfter = ts
trfail.StillRunningAfter = server
trfail.StillRunningAfter = server2
trfail.Processes.Default.Command = 'curl -H host:random.foo.com http://127.0.0.1:{0}/case1'.format(ts.Variables.port)
trfail.Processes.Default.ReturnCode = 0
trfail.Processes.Default.Streams.stdout = Testers.ContainsExpression("Could Not Connect", "Check response")
tr = Test.AddTestRun("Wait for the access log to write out")
tr.Processes.Default.StartBefore(server4, ready=When.FileExists(ts.Disk.squid_log))
tr.StillRunningAfter = ts
tr.Processes.Default.Command = 'echo "Log file exists"'
tr.Processes.Default.ReturnCode = 0
ts.Disk.squid_log.Content = "gold/proxycert2-accesslog.gold"
| apache-2.0 |
admcrae/tensorflow | tensorflow/python/training/saver_large_partitioned_variable_test.py | 141 | 2261 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargePartitionedVariableTest(test.TestCase):
# Need to do this in a separate test because of the amount of memory needed
# to run this test.
def testLargePartitionedVariables(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
var_name = "my_var"
# Saving large partition variable.
with session.Session("", graph=ops.Graph()) as sess:
with ops.device("/cpu:0"):
# Create a partitioned variable which is larger than int32 size but
# split into smaller sized variables.
init = lambda shape, dtype, partition_info: constant_op.constant(
True, dtype, shape)
partitioned_var = partitioned_variables.create_partitioned_variables(
[1 << 31], [4], init, dtype=dtypes.bool, name=var_name)
variables.global_variables_initializer().run()
save = saver.Saver(partitioned_var)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
if __name__ == "__main__":
test.main()
| apache-2.0 |
kumarkrishna/sympy | sympy/physics/quantum/shor.py | 98 | 5800 | """Shor's algorithm and helper functions.
Todo:
* Get the CMod gate working again using the new Gate API.
* Fix everything.
* Update docstrings and reformat.
* Remove print statements. We may want to think about a better API for this.
"""
from __future__ import print_function, division
import math
import random
from sympy import Mul, S
from sympy import log, sqrt
from sympy.core.numbers import igcd
from sympy.core.compatibility import range
from sympy.ntheory import continued_fraction_periodic as continued_fraction
from sympy.utilities.iterables import variations
from sympy.physics.quantum.gate import Gate
from sympy.physics.quantum.qubit import Qubit, measure_partial_oneshot
from sympy.physics.quantum.qapply import qapply
from sympy.physics.quantum.qft import QFT
from sympy.physics.quantum.qexpr import QuantumError
class OrderFindingException(QuantumError):
pass
class CMod(Gate):
"""A controlled mod gate.
This is black box controlled Mod function for use by shor's algorithm.
TODO implement a decompose property that returns how to do this in terms
of elementary gates
"""
@classmethod
def _eval_args(cls, args):
# t = args[0]
# a = args[1]
# N = args[2]
raise NotImplementedError('The CMod gate has not been completed.')
@property
def t(self):
"""Size of 1/2 input register. First 1/2 holds output."""
return self.label[0]
@property
def a(self):
"""Base of the controlled mod function."""
return self.label[1]
@property
def N(self):
"""N is the type of modular arithmetic we are doing."""
return self.label[2]
def _apply_operator_Qubit(self, qubits, **options):
"""
This directly calculates the controlled mod of the second half of
the register and puts it in the second
This will look pretty when we get Tensor Symbolically working
"""
n = 1
k = 0
# Determine the value stored in high memory.
for i in range(self.t):
k = k + n*qubits[self.t + i]
n = n*2
# The value to go in low memory will be out.
out = int(self.a**k % self.N)
# Create array for new qbit-ket which will have high memory unaffected
outarray = list(qubits.args[0][:self.t])
# Place out in low memory
for i in reversed(range(self.t)):
outarray.append((out >> i) & 1)
return Qubit(*outarray)
def shor(N):
"""This function implements Shor's factoring algorithm on the Integer N
The algorithm starts by picking a random number (a) and seeing if it is
coprime with N. If it isn't, then the gcd of the two numbers is a factor
and we are done. Otherwise, it begins the period_finding subroutine which
finds the period of a in modulo N arithmetic. This period, if even, can
be used to calculate factors by taking a**(r/2)-1 and a**(r/2)+1.
These values are returned.
"""
a = random.randrange(N - 2) + 2
if igcd(N, a) != 1:
print("got lucky with rand")
return igcd(N, a)
print("a= ", a)
print("N= ", N)
r = period_find(a, N)
print("r= ", r)
if r % 2 == 1:
print("r is not even, begin again")
shor(N)
answer = (igcd(a**(r/2) - 1, N), igcd(a**(r/2) + 1, N))
return answer
def getr(x, y, N):
fraction = continued_fraction(x, y)
# Now convert into r
total = ratioize(fraction, N)
return total
def ratioize(list, N):
if list[0] > N:
return S.Zero
if len(list) == 1:
return list[0]
return list[0] + ratioize(list[1:], N)
def period_find(a, N):
"""Finds the period of a in modulo N arithmetic
This is quantum part of Shor's algorithm.It takes two registers,
puts first in superposition of states with Hadamards so: ``|k>|0>``
with k being all possible choices. It then does a controlled mod and
a QFT to determine the order of a.
"""
epsilon = .5
#picks out t's such that maintains accuracy within epsilon
t = int(2*math.ceil(log(N, 2)))
# make the first half of register be 0's |000...000>
start = [0 for x in range(t)]
#Put second half into superposition of states so we have |1>x|0> + |2>x|0> + ... |k>x>|0> + ... + |2**n-1>x|0>
factor = 1/sqrt(2**t)
qubits = 0
for arr in variations(range(2), t, repetition=True):
qbitArray = arr + start
qubits = qubits + Qubit(*qbitArray)
circuit = (factor*qubits).expand()
#Controlled second half of register so that we have:
# |1>x|a**1 %N> + |2>x|a**2 %N> + ... + |k>x|a**k %N >+ ... + |2**n-1=k>x|a**k % n>
circuit = CMod(t, a, N)*circuit
#will measure first half of register giving one of the a**k%N's
circuit = qapply(circuit)
print("controlled Mod'd")
for i in range(t):
circuit = measure_partial_oneshot(circuit, i)
print("measured 1")
#Now apply Inverse Quantum Fourier Transform on the second half of the register
circuit = qapply(QFT(t, t*2).decompose()*circuit, floatingPoint=True)
print("QFT'd")
for i in range(t):
circuit = measure_partial_oneshot(circuit, i + t)
print(circuit)
if isinstance(circuit, Qubit):
register = circuit
elif isinstance(circuit, Mul):
register = circuit.args[-1]
else:
register = circuit.args[-1].args[-1]
print(register)
n = 1
answer = 0
for i in range(len(register)/2):
answer += n*register[i + t]
n = n << 1
if answer == 0:
raise OrderFindingException(
"Order finder returned 0. Happens with chance %f" % epsilon)
#turn answer into r using continued fractions
g = getr(answer, 2**t, N)
print(g)
return g
| bsd-3-clause |
belokop/indico_bare | indico/modules/events/util.py | 1 | 22021 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import random
import warnings
from collections import defaultdict
from contextlib import contextmanager
from copy import deepcopy
from mimetypes import guess_extension
from os import path
from tempfile import NamedTemporaryFile
from flask import session, request, g, current_app
from sqlalchemy import inspect
from sqlalchemy.orm import load_only, noload, joinedload
from indico.core import signals
from indico.core.config import Config
from indico.core.db.sqlalchemy.principals import PrincipalType
from indico.core.errors import UserValueError
from indico.core.notifications import send_email, make_email
from indico.modules.api import settings as api_settings
from indico.modules.auth.util import url_for_register
from indico.modules.events import Event
from indico.modules.events.contributions.models.contributions import Contribution
from indico.modules.events.contributions.models.subcontributions import SubContribution
from indico.modules.events.models.persons import EventPerson
from indico.modules.events.models.principals import EventPrincipal
from indico.modules.events.models.report_links import ReportLink
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.timetable.models.breaks import Break
from indico.modules.events.timetable.models.entries import TimetableEntry
from indico.util.i18n import _
from indico.web.forms.colors import get_colors
from indico.web.flask.templating import get_template_module
from indico.web.flask.util import url_for
def preload_events(ids, lightweight=True, persons=False):
"""Preload events so they are in SA's identity cache
This is useful for legacy pages where we have to show large
numbers of events without being able to query them from the
db cleanly.
:param ids: An iterable of IDs or Conference objects
:param lightweight: Only load dates and title
:param persons: Also load the person links
"""
cache = g.setdefault('_event_cache', {})
ids = {int(getattr(id_, 'id', id_)) for id_ in ids} - cache.viewkeys()
query = Event.find(Event.id.in_(ids))
if lightweight:
query = query.options(load_only('id', 'title', 'start_dt', 'end_dt', 'timezone'))
if persons:
query = query.options(joinedload('person_links'))
cache.update((e.id, e) for e in query)
def get_object_from_args(args=None):
"""Retrieves an event object from request arguments.
This utility is meant to be used in cases where the same controller
can deal with objects attached to various parts of an event which
use different URLs to indicate which object to use.
:param args: The request arguments. If unspecified,
``request.view_args`` is used.
:return: An ``(object_type, event, object)`` tuple. The event is
always the :class:`Event` associated with the object.
The object may be an `Event`, `Session`, `Contribution`
or `SubContribution`. If the object does not exist,
``(object_type, None, None)`` is returned.
"""
if args is None:
args = request.view_args
object_type = args['object_type']
event = Event.find_first(id=args['confId'], is_deleted=False)
if event is None:
obj = None
elif object_type == 'event':
obj = event
elif object_type == 'session':
obj = Session.query.with_parent(event).filter_by(id=args['session_id']).first()
elif object_type == 'contribution':
obj = Contribution.query.with_parent(event).filter_by(id=args['contrib_id']).first()
elif object_type == 'subcontribution':
obj = SubContribution.find(SubContribution.id == args['subcontrib_id'], ~SubContribution.is_deleted,
SubContribution.contribution.has(event_new=event, id=args['contrib_id'],
is_deleted=False)).first()
else:
raise ValueError('Unexpected object type: {}'.format(object_type))
if obj is not None:
return object_type, event, obj
else:
return object_type, None, None
def get_events_managed_by(user, from_dt=None, to_dt=None):
"""Gets the IDs of events where the user has management privs.
:param user: A `User`
:param from_dt: The earliest event start time to look for
:param to_dt: The latest event start time to look for
:return: A set of event ids
"""
query = (user.in_event_acls
.join(Event)
.options(noload('user'), noload('local_group'), load_only('event_id'))
.filter(~Event.is_deleted, Event.starts_between(from_dt, to_dt))
.filter(EventPrincipal.has_management_role('ANY')))
return {principal.event_id for principal in query}
def get_events_created_by(user, from_dt=None, to_dt=None):
"""Gets the IDs of events created by the user
:param user: A `User`
:param from_dt: The earliest event start time to look for
:param to_dt: The latest event start time to look for
:return: A set of event ids
"""
query = user.created_events.filter(~Event.is_deleted, Event.starts_between(from_dt, to_dt))
return {event.id for event in query}
def get_events_with_linked_event_persons(user, from_dt=None, to_dt=None):
"""Returns a list of all events for which the user is an EventPerson
:param user: A `User`
:param from_dt: The earliest event start time to look for
:param to_dt: The latest event start time to look for
"""
query = (user.event_persons
.options(load_only('event_id'))
.options(noload('*'))
.join(Event, Event.id == EventPerson.event_id)
.filter(EventPerson.event_links.any())
.filter(~Event.is_deleted, Event.starts_between(from_dt, to_dt)))
return {ep.event_id for ep in query}
def get_random_color(event):
breaks = Break.query.filter(Break.timetable_entry.has(event_new=event))
used_colors = {s.colors for s in event.sessions} | {b.colors for b in breaks}
unused_colors = set(get_colors()) - used_colors
return random.choice(tuple(unused_colors) or get_colors())
def notify_pending(acl_entry):
"""Sends a notification to a user with an email-based ACL entry
:param acl_entry: An email-based EventPrincipal
"""
assert acl_entry.type == PrincipalType.email
if acl_entry.full_access:
template_name = 'events/emails/pending_manager.txt'
endpoint = 'event_mgmt.conferenceModification-managementAccess'
elif acl_entry.has_management_role('submit', explicit=True):
template_name = 'events/emails/pending_submitter.txt'
endpoint = 'event.conferenceDisplay'
else:
return
event = acl_entry.event_new
email = acl_entry.principal.email
template = get_template_module(template_name, event=event, email=email,
url=url_for_register(url_for(endpoint, event), email=email))
send_email(make_email(to_list={email}, template=template), event.as_legacy, module='Protection')
def serialize_event_person(person):
"""Serialize EventPerson to JSON-like object"""
return {'_type': 'EventPerson',
'id': person.id,
'email': person.email,
'name': person.full_name,
'firstName': person.first_name,
'familyName': person.last_name,
'title': person.title,
'affiliation': person.affiliation,
'phone': person.phone,
'address': person.address,
'user_id': person.user_id}
def serialize_person_link(person_link):
"""Serialize PersonLink to JSON-like object"""
return {'_type': 'PersonLink',
'id': person_link.person.id,
'personId': person_link.person.id,
'email': person_link.person.email,
'name': person_link.full_name,
'fullName': person_link.full_name,
'firstName': person_link.first_name,
'familyName': person_link.last_name,
'title': person_link.title,
'affiliation': person_link.affiliation,
'phone': person_link.phone,
'address': person_link.address}
def update_object_principals(obj, new_principals, read_access=False, full_access=False, role=None):
"""Updates an object's ACL with a new list of principals
Exactly one argument out of `read_access`, `full_access` and `role` must be specified.
:param obj: The object to update. Must have ``acl_entries``
:param new_principals: The set containing the new principals
:param read_access: Whether the read access ACL should be updated
:param full_access: Whether the full access ACL should be updated
:param role: The role ACL that should be updated
"""
if read_access + full_access + bool(role) != 1:
raise ValueError('Only one ACL property can be specified')
if full_access:
existing = {acl.principal for acl in obj.acl_entries if acl.full_access}
grant = {'full_access': True}
revoke = {'full_access': False}
elif read_access:
existing = {acl.principal for acl in obj.acl_entries if acl.read_access}
grant = {'read_access': True}
revoke = {'read_access': False}
elif role:
existing = {acl.principal for acl in obj.acl_entries if acl.has_management_role(role, explicit=True)}
grant = {'add_roles': {role}}
revoke = {'del_roles': {role}}
new_principals = set(new_principals)
for principal in new_principals - existing:
obj.update_principal(principal, **grant)
for principal in existing - new_principals:
obj.update_principal(principal, **revoke)
class ReporterBase(object):
"""Base class for classes performing actions on reports.
:param event: The associated `Event`
:param entry_parent: The parent of the entries of the report. If it's None,
the parent is assumed to be the event itself.
"""
#: The endpoint of the report management page
endpoint = None
#: Unique report identifier
report_link_type = None
#: The default report configuration dictionary
default_report_config = None
def __init__(self, event, entry_parent=None):
self.report_event = event
self.entry_parent = entry_parent or event
self.filterable_items = None
self.static_link_used = 'config' in request.args
def _get_config_session_key(self):
"""Compose the unique configuration ID.
This ID will be used as a key to set the report's configuration to the
session.
"""
return '{}_config_{}'.format(self.report_link_type, self.entry_parent.id)
def _get_config(self):
"""Load the report's configuration from the DB and return it."""
session_key = self._get_config_session_key()
if self.static_link_used:
uuid = request.args['config']
configuration = ReportLink.load(self.report_event, self.report_link_type, uuid)
if configuration and configuration['entry_parent_id'] == self.entry_parent.id:
session[session_key] = configuration['data']
return session.get(session_key, self.default_report_config)
def build_query(self):
"""Return the query of the report's entries.
The query should not take into account the user's filtering
configuration, for example::
return Contribution.query.with_parent(self.report_event)
"""
raise NotImplementedError
def filter_report_entries(self):
"""Apply user's filters to query and return it."""
raise NotImplementedError
def get_filters_from_request(self):
"""Get the new filters after the filter form is submitted."""
filters = deepcopy(self.default_report_config['filters'])
for item_id, item in self.filterable_items.iteritems():
if item.get('filter_choices'):
options = [x if x != 'None' else None for x in request.form.getlist('field_{}'.format(item_id))]
if options:
filters['items'][item_id] = options
return filters
def get_report_url(self, uuid=None, external=False):
"""Return the URL of the report management page."""
return url_for(self.endpoint, self.entry_parent, config=uuid, _external=external)
def generate_static_url(self):
"""Return a URL with a uuid referring to the report's configuration."""
session_key = self._get_config_session_key()
configuration = {
'entry_parent_id': self.entry_parent.id,
'data': session.get(session_key)
}
if configuration['data']:
link = ReportLink.create(self.report_event, self.report_link_type, configuration)
return self.get_report_url(uuid=link.uuid, external=True)
else:
return self.get_report_url(external=True)
def store_filters(self):
"""Load the filters from the request and store them in the session."""
filters = self.get_filters_from_request()
session_key = self._get_config_session_key()
self.report_config = session.setdefault(session_key, {})
self.report_config['filters'] = filters
session.modified = True
def get_base_ical_parameters(user, event, detail, session_=None):
"""Returns a dict of all parameters expected by iCal template"""
from indico.web.http_api.util import generate_public_auth_request
api_mode = api_settings.get('security_mode')
persistent_allowed = api_settings.get('allow_persistent')
api_key = user.api_key if user else None
persistent_user_enabled = api_key.is_persistent_allowed if api_key else None
tpl = get_template_module('api/_messages.html')
persistent_agreement = tpl.get_ical_persistent_msg()
if session_:
path = '/export/event/{0}/session/{1}.ics'.format(event.id, session_.id)
else:
path = '/export/event/{0}.ics'.format(event.id)
top_urls = generate_public_auth_request(api_key, path)
urls = generate_public_auth_request(api_key, path, {'detail': detail})
request_urls = {
'publicRequestURL': top_urls['publicRequestURL'],
'authRequestURL': top_urls['authRequestURL'],
'publicRequestDetailedURL': urls['publicRequestURL'],
'authRequestDetailedURL': urls['authRequestURL']
}
return {'api_mode': api_mode, 'api_key': api_key, 'persistent_allowed': persistent_allowed,
'persistent_user_enabled': persistent_user_enabled, 'api_active': api_key is not None,
'api_key_user_agreement': tpl.get_ical_api_key_msg(), 'api_persistent_user_agreement': persistent_agreement,
'user_logged': user is not None, 'request_urls': request_urls}
def create_event_logo_tmp_file(event):
"""Creates a temporary file with the event's logo"""
logo_meta = event.logo_metadata
logo_extension = guess_extension(logo_meta['content_type']) or path.splitext(logo_meta['filename'])[1]
temp_file = NamedTemporaryFile(delete=False, dir=Config.getInstance().getTempDir(), suffix=logo_extension)
temp_file.write(event.logo)
temp_file.flush()
return temp_file
@contextmanager
def track_time_changes(auto_extend=False, user=None):
"""Track time changes of event objects.
This provides a list of changes while the context manager was
active and also triggers `times_changed` signals.
If the code running inside the ``with`` block of this context
manager raises an exception, no signals will be triggered.
:param auto_extend: Whether entry parents will get their boundaries
automatically extended or not. Passing ``'start'`` will
extend only start datetime, ``'end'`` to extend only
end datetime.
:param user: The `User` that will trigger time changes.
"""
if auto_extend:
assert user is not None
if 'old_times' in g:
raise RuntimeError('time change tracking may not be nested')
g.old_times = defaultdict(dict)
changes = defaultdict(dict)
try:
yield changes
except:
del g.old_times
raise
else:
if auto_extend:
by_start = auto_extend in (True, 'start')
by_end = auto_extend in (True, 'end')
initial_changes = set(g.old_times)
# g.old_times changes during iteration
for obj in list(g.old_times):
if not isinstance(obj, Event):
obj.extend_parent(by_start=by_start, by_end=by_end)
cascade_changes = set(g.old_times) - initial_changes
for obj in cascade_changes:
if isinstance(obj, Event):
if not obj.can_manage(user):
# TODO: raise Forbidden exceptions after adding protection check in the UI
raise UserValueError(_("Your action requires modification of event boundaries, but you are "
"not authorized to manage the event."))
elif not obj.object.can_manage(user):
# TODO: raise Forbidden exceptions after adding protection check in the UI
raise UserValueError(_("Your action requires modification of session block boundaries, but you are "
"not authorized to manage the session block."))
old_times = g.pop('old_times')
for obj, info in old_times.iteritems():
if isinstance(obj, TimetableEntry):
obj = obj.object
if obj.start_dt != info['start_dt']:
changes[obj]['start_dt'] = (info['start_dt'], obj.start_dt)
if obj.duration != info['duration']:
changes[obj]['duration'] = (info['duration'], obj.duration)
if obj.end_dt != info['end_dt']:
changes[obj]['end_dt'] = (info['end_dt'], obj.end_dt)
for obj, obj_changes in changes.iteritems():
entry = None if isinstance(obj, Event) else obj.timetable_entry
signals.event.times_changed.send(type(obj), entry=entry, obj=obj, changes=obj_changes)
def register_time_change(entry):
"""Register a time-related change for a timetable entry
This is an internal helper function used in the models to record
changes of the start time or duration. The changes are exposed
through the `track_time_changes` contextmanager function.
"""
# if it's a new object it's not a change so we just ignore it
if not inspect(entry).persistent:
return
try:
old_times = g.old_times
except AttributeError:
msg = 'Time change of {} was not tracked'.format(entry)
if current_app.config.get('REPL'):
warnings.warn(msg + ' (exception converted to a warning since you are using the REPL)', stacklevel=2)
return
elif current_app.config['TESTING']:
warnings.warn(msg + ' (exception converted to a warning during tests)', stacklevel=2)
return
else:
raise RuntimeError(msg)
for field in ('start_dt', 'duration', 'end_dt'):
if old_times[entry].get(field) is None:
old_times[entry][field] = getattr(entry, field)
def register_event_time_change(event):
"""Register a time-related change for an event
This is an internal helper function used in the model to record
changes of the start time or end time. The changes are exposed
through the `track_time_changes` contextmanager function.
"""
# if it's a new object it's not a change so we just ignore it
if not inspect(event).persistent:
return
try:
old_times = g.old_times
except AttributeError:
msg = 'Time change of {} was not tracked'.format(event)
if current_app.config.get('REPL'):
warnings.warn(msg + ' (exception converted to a warning since you are using the REPL)', stacklevel=2)
return
elif current_app.config['TESTING']:
warnings.warn(msg + ' (exception converted to a warning during tests)', stacklevel=2)
return
else:
raise RuntimeError(msg)
for field in ('start_dt', 'duration', 'end_dt'):
if old_times[event].get(field) is None:
old_times[event][field] = getattr(event, field)
def serialize_event_for_ical(event, detail_level):
from indico.modules.events.contributions.util import serialize_contribution_for_ical
fossil = 'conferenceMetadataWithContribs' if detail_level == 'contributions' else 'conferenceMetadata'
data = {'id': event.id, 'title': event.title, 'description': event.description, 'startDate': event.start_dt,
'endDate': event.end_dt, 'url': url_for('event.conferenceDisplay', event, _external=True),
'location': event.venue_name, 'roomFullname': event.room_name, 'speakers': [], '_fossil': fossil,
'contributions': []}
if detail_level == 'contributions':
data['contributions'] = [serialize_contribution_for_ical(c) for c in event.contributions]
return data
| gpl-3.0 |
ActiveState/code | recipes/Python/578776_A_Simple_Timing_Function/recipe-578776.py | 1 | 1205 | ''' Simple Timing Function.
This function prints out a message with the elapsed time from the
previous call. It works with most Python 2.x platforms. The function
uses a simple trick to store a persistent variable (clock) without
using a global variable.
'''
import time
def dur( op=None, clock=[time.time()] ):
if op != None:
duration = time.time() - clock[0]
print '%s finished. Duration %.6f seconds.' % (op, duration)
clock[0] = time.time()
# Example
if __name__ == '__main__':
import array
dur() # Initialise the timing clock
opt1 = array.array('H')
for i in range(1000):
for n in range(1000):
opt1.append(n)
dur('Array from append')
opt2 = array.array('H')
seq = range(1000)
for i in range(1000):
opt2.extend(seq)
dur('Array from list extend')
opt3 = array.array('H')
seq = array.array('H', range(1000))
for i in range(1000):
opt3.extend(seq)
dur('Array from array extend')
# Output:
# Array from append finished. Duration 0.175320 seconds.
# Array from list extend finished. Duration 0.068974 seconds.
# Array from array extend finished. Duration 0.001394 seconds.
| mit |
pschmitt/home-assistant | tests/components/media_player/test_reproduce_state.py | 6 | 6353 | """The tests for reproduction of state."""
import pytest
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_ENQUEUE,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_SOUND_MODE,
DOMAIN,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOUND_MODE,
SERVICE_SELECT_SOURCE,
)
from homeassistant.components.media_player.reproduce_state import async_reproduce_states
from homeassistant.const import (
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import Context, State
from tests.common import async_mock_service
ENTITY_1 = "media_player.test1"
ENTITY_2 = "media_player.test2"
@pytest.mark.parametrize(
"service,state",
[
(SERVICE_TURN_ON, STATE_ON),
(SERVICE_TURN_OFF, STATE_OFF),
(SERVICE_MEDIA_PLAY, STATE_PLAYING),
(SERVICE_MEDIA_STOP, STATE_IDLE),
(SERVICE_MEDIA_PAUSE, STATE_PAUSED),
],
)
async def test_state(hass, service, state):
"""Test that we can turn a state into a service call."""
calls_1 = async_mock_service(hass, DOMAIN, service)
await async_reproduce_states(hass, [State(ENTITY_1, state)])
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": ENTITY_1}
async def test_turn_on_with_mode(hass):
"""Test that state with additional attributes call multiple services."""
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
calls_2 = async_mock_service(hass, DOMAIN, SERVICE_SELECT_SOUND_MODE)
await async_reproduce_states(
hass, [State(ENTITY_1, "on", {ATTR_SOUND_MODE: "dummy"})]
)
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": ENTITY_1}
assert len(calls_2) == 1
assert calls_2[0].data == {"entity_id": ENTITY_1, ATTR_SOUND_MODE: "dummy"}
async def test_multiple_same_state(hass):
"""Test that multiple states with same state gets calls."""
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await async_reproduce_states(hass, [State(ENTITY_1, "on"), State(ENTITY_2, "on")])
await hass.async_block_till_done()
assert len(calls_1) == 2
# order is not guaranteed
assert any(call.data == {"entity_id": "media_player.test1"} for call in calls_1)
assert any(call.data == {"entity_id": "media_player.test2"} for call in calls_1)
async def test_multiple_different_state(hass):
"""Test that multiple states with different state gets calls."""
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
await async_reproduce_states(hass, [State(ENTITY_1, "on"), State(ENTITY_2, "off")])
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": "media_player.test1"}
assert len(calls_2) == 1
assert calls_2[0].data == {"entity_id": "media_player.test2"}
async def test_state_with_context(hass):
"""Test that context is forwarded."""
calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
context = Context()
await async_reproduce_states(hass, [State(ENTITY_1, "on")], context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {"entity_id": ENTITY_1}
assert calls[0].context == context
async def test_attribute_no_state(hass):
"""Test that no state service call is made with none state."""
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
calls_2 = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
calls_3 = async_mock_service(hass, DOMAIN, SERVICE_SELECT_SOUND_MODE)
value = "dummy"
await async_reproduce_states(
hass, [State(ENTITY_1, None, {ATTR_SOUND_MODE: value})]
)
await hass.async_block_till_done()
assert len(calls_1) == 0
assert len(calls_2) == 0
assert len(calls_3) == 1
assert calls_3[0].data == {"entity_id": ENTITY_1, ATTR_SOUND_MODE: value}
@pytest.mark.parametrize(
"service,attribute",
[
(SERVICE_VOLUME_SET, ATTR_MEDIA_VOLUME_LEVEL),
(SERVICE_VOLUME_MUTE, ATTR_MEDIA_VOLUME_MUTED),
(SERVICE_MEDIA_SEEK, ATTR_MEDIA_SEEK_POSITION),
(SERVICE_SELECT_SOURCE, ATTR_INPUT_SOURCE),
(SERVICE_SELECT_SOUND_MODE, ATTR_SOUND_MODE),
],
)
async def test_attribute(hass, service, attribute):
"""Test that service call is made for each attribute."""
calls_1 = async_mock_service(hass, DOMAIN, service)
value = "dummy"
await async_reproduce_states(hass, [State(ENTITY_1, None, {attribute: value})])
await hass.async_block_till_done()
assert len(calls_1) == 1
assert calls_1[0].data == {"entity_id": ENTITY_1, attribute: value}
async def test_play_media(hass):
"""Test that no state service call is made with none state."""
calls_1 = async_mock_service(hass, DOMAIN, SERVICE_PLAY_MEDIA)
value_1 = "dummy_1"
value_2 = "dummy_2"
value_3 = "dummy_3"
await async_reproduce_states(
hass,
[
State(
ENTITY_1,
None,
{ATTR_MEDIA_CONTENT_TYPE: value_1, ATTR_MEDIA_CONTENT_ID: value_2},
)
],
)
await async_reproduce_states(
hass,
[
State(
ENTITY_1,
None,
{
ATTR_MEDIA_CONTENT_TYPE: value_1,
ATTR_MEDIA_CONTENT_ID: value_2,
ATTR_MEDIA_ENQUEUE: value_3,
},
)
],
)
await hass.async_block_till_done()
assert len(calls_1) == 2
assert calls_1[0].data == {
"entity_id": ENTITY_1,
ATTR_MEDIA_CONTENT_TYPE: value_1,
ATTR_MEDIA_CONTENT_ID: value_2,
}
assert calls_1[1].data == {
"entity_id": ENTITY_1,
ATTR_MEDIA_CONTENT_TYPE: value_1,
ATTR_MEDIA_CONTENT_ID: value_2,
ATTR_MEDIA_ENQUEUE: value_3,
}
| apache-2.0 |
dex4er/django | tests/context_processors/tests.py | 63 | 1328 | """
Tests for Django's bundled context processors.
"""
from django.test import TestCase
class RequestContextProcessorTests(TestCase):
"""
Tests for the ``django.core.context_processors.request`` processor.
"""
urls = 'context_processors.urls'
def test_request_attributes(self):
"""
Test that the request object is available in the template and that its
attributes can't be overridden by GET and POST parameters (#3828).
"""
url = '/request_attrs/'
# We should have the request object in the template.
response = self.client.get(url)
self.assertContains(response, 'Have request')
# Test is_secure.
response = self.client.get(url)
self.assertContains(response, 'Not secure')
response = self.client.get(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
response = self.client.post(url, {'is_secure': 'blah'})
self.assertContains(response, 'Not secure')
# Test path.
response = self.client.get(url)
self.assertContains(response, url)
response = self.client.get(url, {'path': '/blah/'})
self.assertContains(response, url)
response = self.client.post(url, {'path': '/blah/'})
self.assertContains(response, url)
| bsd-3-clause |
Lyleo/nupic | nupic/frameworks/opf/previousvaluemodel.py | 15 | 5794 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Module containing the trivial predictor OPF model implementation. """
import itertools
from nupic.data import fieldmeta
from nupic.frameworks.opf import model
from nupic.frameworks.opf import opfutils
from opfutils import InferenceType
class PreviousValueModel(model.Model):
"""Previous value model."""
def __init__(self, inferenceType=InferenceType.TemporalNextStep,
fieldNames=[],
fieldTypes=[],
predictedField=None,
predictionSteps=[]):
""" PVM constructor.
inferenceType: An opfutils.InferenceType value that specifies what type of
inference (i.e. TemporalNextStep, TemporalMultiStep, etc.)
fieldNames: a list of field names
fieldTypes: a list of the types for the fields mentioned in fieldNames
predictedField: the field from fieldNames which is to be predicted
predictionSteps: a list of steps for which a prediction is made. This is
only needed in the case of multi step predictions
"""
super(PreviousValueModel, self).__init__(inferenceType)
self._logger = opfutils.initLogger(self)
self._predictedField = predictedField
self._fieldNames = fieldNames
self._fieldTypes = fieldTypes
# only implement multistep and temporalnextstep
if inferenceType == InferenceType.TemporalNextStep:
self._predictionSteps = [1]
elif inferenceType == InferenceType.TemporalMultiStep:
self._predictionSteps = predictionSteps
else:
assert False, "Previous Value Model only works for next step or multi-step."
def run(self, inputRecord):
"""Run one iteration of this model.
Args:
inputRecord: A record object formatted according to
nupic.data.FileSource.getNext() result format.
Returns:
A ModelResult named tuple (see opfutils.py). The contents of
ModelResult.inferences depends on the specific inference type of this
model, which can be queried by getInferenceType().
TODO: Implement getInferenceType()?
"""
# set the results. note that there is no translation to sensorInput
results = super(PreviousValueModel, self).run(inputRecord)
results.sensorInput = opfutils.SensorInput(dataRow= \
[inputRecord[fn] for fn in self._fieldNames])
# select the current value for the prediction with probablity of 1
results.inferences = { opfutils.InferenceElement.multiStepBestPredictions : \
dict((steps, inputRecord[self._predictedField]) \
for steps in self._predictionSteps),
opfutils.InferenceElement.multiStepPredictions : \
dict((steps, {inputRecord[self._predictedField] : 1}) \
for steps in self._predictionSteps)
}
# set the next step prediction if step of 1 is selected
if 1 in self._predictionSteps:
results.inferences[opfutils.InferenceElement.prediction] = \
inputRecord[self._predictedField]
return results
def finishLearning(self):
"""Places the model in a permanent "finished learning" mode.
The PVM does not learn, so this function has no effect.
"""
pass
def setFieldStatistics(self,fieldStats):
"""
This method is used for the data source to communicate to the
model any statistics that it knows about the fields
Since the PVM has no use for this information, this is a no-op
"""
pass
def getFieldInfo(self):
"""Returns the metadata specifying the format of the model's output.
The result may be different than the list of
nupic.data.fieldmeta.FieldMetaInfo objects supplied at initialization due
to the transcoding of some input fields into meta- fields, such as
datetime -> dayOfWeek, timeOfDay, etc.
"""
return tuple(fieldmeta.FieldMetaInfo(*args) for args in
itertools.izip(
self._fieldNames, self._fieldTypes,
itertools.repeat(fieldmeta.FieldMetaSpecial.none)))
def getRuntimeStats(self):
"""Get the runtime statistics specific to the model.
I.E. activeCellOverlapAvg
Returns:
A dict mapping statistic names to values.
"""
# TODO: Add debugging stats.
# > what sort of stats are we supposed to return?
return dict()
def _getLogger(self):
"""Get the logger created by this subclass.
Returns:
A logging.Logger object. Should not be None.
"""
return self._logger
def resetSequenceStates(self):
"""Called to indicate the start of a new sequence.
The next call to run should not perform learning.
"""
self._reset = True
def __getstate__(self):
del self._logger
return self.__dict__
def __setstate__(self):
self._logger = opfutils.initLogger(self)
| gpl-3.0 |
kooditiimi/linkedevents | events/attic/api.py | 3 | 1863 | class OrganizationOrPersonRelatedField(serializers.RelatedField):
def __init__(self, hide_ld_context=False):
self.hide_ld_context = hide_ld_context
super(OrganizationOrPersonRelatedField, self).__init__(
queryset=Organization.objects, read_only=False)
def to_native(self, value):
if isinstance(value, Organization):
serializer = OrganizationSerializer(
value, hide_ld_context=self.hide_ld_context)
elif isinstance(value, Person):
serializer = PersonSerializer(value,
hide_ld_context=self.hide_ld_context)
else:
raise Exception('Unexpected type of related object')
return serializer.data
def from_native(self, data):
"""
TODO: fix, this is just a skeleton. We should save and fetch right
content_type (and content_id) to parent.
"""
if data["@type"] == 'Organization':
pass # Organization is the default queryset
elif data["@type"] == 'Person':
self.queryset = Person.objects
else:
raise ValidationError('Unexpected type of related object')
super(OrganizationOrPersonRelatedField, self).from_native(data)
class PersonSerializer(LinkedEventsSerializer):
# Fallback to URL references to get around of circular serializer problem
creator = JSONLDHyperLinkedRelatedField(view_name='person-detail')
editor = JSONLDHyperLinkedRelatedField(view_name='person-detail')
view_name = 'person-detail'
class Meta:
model = Person
class OrganizationSerializer(LinkedEventsSerializer):
creator = PersonSerializer(hide_ld_context=True)
editor = PersonSerializer(hide_ld_context=True)
view_name = 'organization-detail'
class Meta:
model = Organization
| bsd-3-clause |
mrquim/repository.mrquim | repo/script.module.youtube.dl/lib/youtube_dl/extractor/minoto.py | 66 | 2177 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import int_or_none
class MinotoIE(InfoExtractor):
_VALID_URL = r'(?:minoto:|https?://(?:play|iframe|embed)\.minoto-video\.com/(?P<player_id>[0-9]+)/)(?P<id>[a-zA-Z0-9]+)'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
player_id = mobj.group('player_id') or '1'
video_id = mobj.group('id')
video_data = self._download_json('http://play.minoto-video.com/%s/%s.js' % (player_id, video_id), video_id)
video_metadata = video_data['video-metadata']
formats = []
for fmt in video_data['video-files']:
fmt_url = fmt.get('url')
if not fmt_url:
continue
container = fmt.get('container')
if container == 'hls':
formats.extend(fmt_url, video_id, 'mp4', m3u8_id='hls', fatal=False)
else:
fmt_profile = fmt.get('profile') or {}
f = {
'format_id': fmt_profile.get('name-short'),
'format_note': fmt_profile.get('name'),
'url': fmt_url,
'container': container,
'tbr': int_or_none(fmt.get('bitrate')),
'filesize': int_or_none(fmt.get('filesize')),
'width': int_or_none(fmt.get('width')),
'height': int_or_none(fmt.get('height')),
}
codecs = fmt.get('codecs')
if codecs:
codecs = codecs.split(',')
if len(codecs) == 2:
f.update({
'vcodec': codecs[0],
'acodec': codecs[1],
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': video_metadata['title'],
'description': video_metadata.get('description'),
'thumbnail': video_metadata.get('video-poster', {}).get('url'),
'formats': formats,
}
| gpl-2.0 |
Enether/algos | shell_sort/shell_sort.py | 7 | 1158 | def shell_sort(array):
"""
Sorts the given array of integers using the Shell Sort algorithm
Time Complexity : O((len(array))^2)
Space Complexity : O(len(array))
:param array: A List of integers.
:return: returns the array sorted
"""
# calculate the gap using Knuth's formula
gap = 1
while gap < len(array) // 3:
gap = (gap * 3) + 1
while gap > 0:
# using this gap, exchange elements while you can
for idx in range(gap, len(array)):
val_to_insert = array[idx]
candidate_idx = idx
# shift all bigger elements to the right, creating a hole
while candidate_idx > gap - 1 and array[candidate_idx - gap] > val_to_insert:
array[candidate_idx] = array[candidate_idx - gap]
candidate_idx -= gap
# insert our element at the hole
array[candidate_idx] = val_to_insert
# decrease gap, math alert
gap = (gap - 1) // 3
return array
def main():
sample_arr = [1, -312, 4, 12, 3, 17, 2542, 20, 18]
print(shell_sort(sample_arr))
if __name__ == '__main__':
main()
| mit |
bowlofstew/zulip | tools/deprecated/finbot/money.py | 114 | 7730 | #!/usr/bin/python
import datetime
import monthdelta
def parse_date(date_str):
return datetime.datetime.strptime(date_str, "%Y-%m-%d")
def unparse_date(date_obj):
return date_obj.strftime("%Y-%m-%d")
class Company(object):
def __init__(self, name):
self.name = name
self.flows = []
self.verbose = False
def __str__(self):
return self.name
def add_flow(self, flow):
self.flows.append(flow)
def cash_at_date_internal(self, start_date, end_date):
cash = 0
for flow in self.flows:
delta = flow.cashflow(start_date, end_date, (end_date - start_date).days)
cash += delta
if self.verbose:
print flow.name, round(delta, 2)
return round(cash, 2)
def cash_at_date(self, start, end):
start_date = parse_date(start)
end_date = parse_date(end)
return self.cash_at_date_internal(start_date, end_date)
def cash_monthly_summary(self, start, end):
start_date = parse_date(start)
cur_date = parse_date(start)
end_date = parse_date(end)
while cur_date <= end_date:
print cur_date, self.cash_at_date_internal(start_date, cur_date)
cur_date += monthdelta.MonthDelta(1)
if self.verbose:
print
# CashFlow objects fundamentally just provide a function that says how
# much cash has been spent by that source at each time
#
# The API is that one needs to define a function .cashflow(date)
class CashFlow(object):
def __init__(self, name):
self.name = name
class FixedCost(CashFlow):
def __init__(self, name, amount):
super(FixedCost, self).__init__(name)
self.cost = -amount
def cashflow(self, start, end, days):
return self.cost
class ConstantCost(CashFlow):
def __init__(self, name, amount):
super(ConstantCost, self).__init__(name)
self.rate = -amount
def cashflow(self, start, end, days):
return self.rate * days / 365.
class PeriodicCost(CashFlow):
def __init__(self, name, amount, start, interval):
super(PeriodicCost, self).__init__(name)
self.amount = -amount
self.start = parse_date(start)
self.interval = interval
def cashflow(self, start, end, days):
cur = self.start
delta = 0
while (cur <= end):
if cur >= start:
delta += self.amount
cur += datetime.timedelta(days=self.interval)
return delta
class MonthlyCost(CashFlow):
def __init__(self, name, amount, start):
super(MonthlyCost, self).__init__(name)
self.amount = -amount
self.start = parse_date(start)
def cashflow(self, start, end, days):
cur = self.start
delta = 0
while (cur <= end):
if cur >= start:
delta += self.amount
cur += monthdelta.MonthDelta(1)
return delta
class TotalCost(CashFlow):
def __init__(self, name, *args):
self.name = name
self.flows = args
def cashflow(self, start, end, days):
return sum(cost.cashflow(start, end, days) for cost in self.flows)
class SemiMonthlyCost(TotalCost):
def __init__(self, name, amount, start1, start2 = None):
if start2 is None:
start2 = unparse_date(parse_date(start1) + datetime.timedelta(days=14))
super(SemiMonthlyCost, self).__init__(name,
MonthlyCost(name, amount, start1),
MonthlyCost(name, amount, start2)
)
class SemiMonthlyWagesNoTax(SemiMonthlyCost):
def __init__(self, name, wage, start):
super(SemiMonthlyWagesNoTax, self).__init__(name, self.compute_wage(wage), start)
def compute_wage(self, wage):
return wage / 24.
class SemiMonthlyWages(SemiMonthlyWagesNoTax):
def compute_wage(self, wage):
fica_tax = min(wage, 110100) * 0.062 + wage * 0.0145
unemp_tax = 450
return (wage + fica_tax + unemp_tax) / 24.
def __init__(self, name, wage, start):
super(SemiMonthlyWages, self).__init__(name, wage, start)
class DelayedCost(CashFlow):
def __init__(self, start, base_model):
super(DelayedCost, self).__init__("Delayed")
self.base_model = base_model
self.start = parse_date(start)
def cashflow(self, start, end, days):
start = max(start, self.start)
if start > end:
return 0
time_delta = (end-start).days
return self.base_model.cashflow(start, end, time_delta)
class BiweeklyWagesNoTax(PeriodicCost):
def __init__(self, name, wage, start):
super(BiweeklyWagesNoTax, self).__init__(name, self.compute_wage(wage), start, 14)
def compute_wage(self, wage):
# You would think this calculation would be (wage * 14 /
# 365.24), but you'd be wrong -- companies paying biweekly
# wages overpay by about 0.34% by doing the math this way
return wage / 26.
class BiweeklyWages(BiweeklyWagesNoTax):
def compute_wage(self, wage):
fica_tax = min(wage, 110100) * 0.062 + wage * 0.0145
unemp_tax = 450
# You would think this calculation would be (wage * 14 /
# 365.24), but you'd be wrong -- companies paying biweekly
# wages overpay by about 0.34% by doing the math this way
return (wage + fica_tax + unemp_tax) / 26.
def __init__(self, name, wage, start):
super(BiweeklyWages, self).__init__(name, wage, start)
if __name__ == "__main__":
# Tests
c = Company("Example Inc")
c.add_flow(FixedCost("Initial Cash", -500000))
c.add_flow(FixedCost("Incorporation", 500))
assert(c.cash_at_date("2012-01-01", "2012-03-01") == 500000 - 500)
c.add_flow(FixedCost("Incorporation", -500))
c.add_flow(ConstantCost("Office", 50000))
assert(c.cash_at_date("2012-01-01", "2012-01-02") == 500000 - round(50000*1/365., 2))
c.add_flow(ConstantCost("Office", -50000))
c.add_flow(PeriodicCost("Payroll", 4000, "2012-01-05", 14))
assert(c.cash_at_date("2012-01-01", "2012-01-02") == 500000)
assert(c.cash_at_date("2012-01-01", "2012-01-06") == 500000 - 4000)
c.add_flow(PeriodicCost("Payroll", -4000, "2012-01-05", 14))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", 50000)))
assert(c.cash_at_date("2012-01-01", "2012-01-05") == 500000)
assert(c.cash_at_date("2012-01-01", "2012-02-05") == 500000 - round(50000*4/365., 2))
c.add_flow(DelayedCost("2012-02-01", ConstantCost("Office", -50000)))
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", 50000)))
assert(c.cash_at_date("2012-01-01", "2012-01-15") == 500000)
c.add_flow(DelayedCost("2012-02-01", FixedCost("Financing", -50000)))
c.add_flow(SemiMonthlyCost("Payroll", 4000, "2012-01-01"))
assert(c.cash_at_date("2012-01-01", "2012-01-01") == 500000 - 4000)
assert(c.cash_at_date("2012-01-01", "2012-01-14") == 500000 - 4000)
assert(c.cash_at_date("2012-01-01", "2012-01-15") == 500000 - 4000 * 2)
assert(c.cash_at_date("2012-01-01", "2012-01-31") == 500000 - 4000 * 2)
assert(c.cash_at_date("2012-01-01", "2012-02-01") == 500000 - 4000 * 3)
assert(c.cash_at_date("2012-01-01", "2012-02-15") == 500000 - 4000 * 4)
c.add_flow(SemiMonthlyCost("Payroll", -4000, "2012-01-01"))
c.add_flow(SemiMonthlyWages("Payroll", 4000, "2012-01-01"))
assert(c.cash_at_date("2012-01-01", "2012-02-15") == 499207.33)
c.add_flow(SemiMonthlyWages("Payroll", -4000, "2012-01-01"))
print c
c.cash_monthly_summary("2012-01-01", "2012-07-01")
| apache-2.0 |
Elbagoury/odoo | addons/purchase_requisition/wizard/bid_line_qty.py | 374 | 1711 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class bid_line_qty(osv.osv_memory):
_name = "bid.line.qty"
_description = "Change Bid line quantity"
_columns = {
'qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def change_qty(self, cr, uid, ids, context=None):
active_ids = context and context.get('active_ids', [])
data = self.browse(cr, uid, ids, context=context)[0]
self.pool.get('purchase.order.line').write(cr, uid, active_ids, {'quantity_bid': data.qty})
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zj8487/godot | platform/winrt/detect.py | 32 | 8382 |
import os
import sys
import string
def is_active():
return True
def get_name():
return "WinRT"
def can_build():
if (os.name=="nt"):
#building natively on windows!
if (os.getenv("VSINSTALLDIR")):
return True
return False
def get_opts():
return []
def get_flags():
return []
def configure(env):
env.Append(CPPPATH=['#platform/winrt', '#platform/winrt/include'])
arch = ""
if os.getenv('PLATFORM') == "ARM":
# compiler commandline
# debug: /Yu"pch.h" /MP /GS /analyze- /W3 /wd"4453" /wd"28204" /Zc:wchar_t /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.WindowsPhone\" /I"Generated Files\" /I"ARM\Debug\" /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.Shared\" /ZW:nostdlib /Zi /Gm- /Od /sdl /Fd"ARM\Debug\vc120.pdb" /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /D "_DEBUG" /errorReport:prompt /WX- /Zc:forScope /RTC1 /ZW /Gd /Oy- /MDd /Fa"ARM\Debug\" /EHsc /nologo /Fo"ARM\Debug\" /Fp"ARM\Debug\App2.WindowsPhone.pch"
# release: /Yu"pch.h" /MP /GS /GL /analyze- /W3 /wd"4453" /wd"28204" /Gy /Zc:wchar_t /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.WindowsPhone\" /I"Generated Files\" /I"ARM\Release\" /I"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\App2\App2.Shared\" /ZW:nostdlib /Zi /Gm- /O2 /sdl /Fd"ARM\Release\vc120.pdb" /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /ZW /Gd /Oy- /Oi /MD /Fa"ARM\Release\" /EHsc /nologo /Fo"ARM\Release\" /Fp"ARM\Release\App2.WindowsPhone.pch"
# linker commandline
# debug: /OUT:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.WindowsPhone.exe" /MANIFEST:NO /NXCOMPAT /PDB:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.WindowsPhone.pdb" /DYNAMICBASE "WindowsPhoneCore.lib" "RuntimeObject.lib" "PhoneAppModelHost.lib" /DEBUG /MACHINE:ARM /NODEFAULTLIB:"kernel32.lib" /NODEFAULTLIB:"ole32.lib" /WINMD /APPCONTAINER /INCREMENTAL /PGD:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.WindowsPhone.pgd" /WINMDFILE:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Debug\App2.WindowsPhone\App2.winmd" /SUBSYSTEM:WINDOWS /MANIFESTUAC:NO /ManifestFile:"ARM\Debug\App2.WindowsPhone.exe.intermediate.manifest" /ERRORREPORT:PROMPT /NOLOGO /TLBID:1
# release: /OUT:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.WindowsPhone.exe" /MANIFEST:NO /LTCG /NXCOMPAT /PDB:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.WindowsPhone.pdb" /DYNAMICBASE "WindowsPhoneCore.lib" "RuntimeObject.lib" "PhoneAppModelHost.lib" /DEBUG /MACHINE:ARM /NODEFAULTLIB:"kernel32.lib" /NODEFAULTLIB:"ole32.lib" /WINMD /APPCONTAINER /OPT:REF /PGD:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.WindowsPhone.pgd" /WINMDFILE:"C:\Users\ariel\Documents\Visual Studio 2013\Projects\App2\ARM\Release\App2.WindowsPhone\App2.winmd" /SUBSYSTEM:WINDOWS /MANIFESTUAC:NO /ManifestFile:"ARM\Release\App2.WindowsPhone.exe.intermediate.manifest" /OPT:ICF /ERRORREPORT:PROMPT /NOLOGO /TLBID:1
arch = "arm"
env.Append(LINKFLAGS=['/INCREMENTAL:NO', '/MANIFEST:NO', '/NXCOMPAT', '/DYNAMICBASE', "WindowsPhoneCore.lib", "RuntimeObject.lib", "PhoneAppModelHost.lib", "/DEBUG", "/MACHINE:ARM", '/NODEFAULTLIB:"kernel32.lib"', '/NODEFAULTLIB:"ole32.lib"', '/WINMD', '/APPCONTAINER', '/MANIFESTUAC:NO', '/ERRORREPORT:PROMPT', '/NOLOGO', '/TLBID:1'])
env.Append(LIBPATH=['#platform/winrt/ARM/lib'])
env.Append(CCFLAGS=string.split('/MP /GS /wd"4453" /wd"28204" /analyze- /Zc:wchar_t /Zi /Gm- /Od /fp:precise /fp:precise /D "PSAPI_VERSION=2" /D "WINAPI_FAMILY=WINAPI_FAMILY_PHONE_APP" /DWINDOWSPHONE_ENABLED /D "_UITHREADCTXT_SUPPORT=0" /D "_UNICODE" /D "UNICODE" /errorReport:prompt /WX- /Zc:forScope /Gd /Oy- /Oi /MD /RTC1 /Gd /EHsc /nologo'))
env.Append(CXXFLAGS=string.split('/ZW'))
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
elif (env["target"]=="test"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Zi','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG', '/D_DEBUG'])
elif (env["target"]=="profile"):
env.Append(CCFLAGS=['-g','-pg'])
env.Append(LINKFLAGS=['-pg'])
env['ENV'] = os.environ;
# fix environment for windows phone 8.1
env['ENV']['WINDOWSPHONEKITDIR'] = env['ENV']['WINDOWSPHONEKITDIR'].replace("8.0", "8.1") # wtf
env['ENV']['INCLUDE'] = env['ENV']['INCLUDE'].replace("8.0", "8.1")
env['ENV']['LIB'] = env['ENV']['LIB'].replace("8.0", "8.1")
env['ENV']['PATH'] = env['ENV']['PATH'].replace("8.0", "8.1")
env['ENV']['LIBPATH'] = env['ENV']['LIBPATH'].replace("8.0\\Windows Metadata", "8.1\\References\\CommonConfiguration\\Neutral")
else:
arch = "x64"
env.Append(LINKFLAGS=['/MANIFEST:NO', '/NXCOMPAT', '/DYNAMICBASE', "kernel32.lib", '/MACHINE:X64', '/WINMD', '/APPCONTAINER', '/MANIFESTUAC:NO', '/ERRORREPORT:PROMPT', '/NOLOGO', '/TLBID:1'])
env.Append(LIBPATH=['#platform/winrt/x64/lib'])
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="test"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Zi','/DDEBUG_ENABLED','/DD3D_DEBUG_INFO'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG', '/D_DEBUG'])
elif (env["target"]=="profile"):
env.Append(CCFLAGS=['-g','-pg'])
env.Append(LINKFLAGS=['-pg'])
env.Append(CCFLAGS=string.split('/MP /GS /wd"4453" /wd"28204" /Zc:wchar_t /Gm- /Od /fp:precise /D "_UNICODE" /D "UNICODE" /D "WINAPI_FAMILY=WINAPI_FAMILY_APP" /errorReport:prompt /WX- /Zc:forScope /RTC1 /Gd /MDd /EHsc /nologo'))
env.Append(CXXFLAGS=string.split('/ZW'))
env.Append(CCFLAGS=['/AI', os.environ['VCINSTALLDIR']+'\\vcpackages', '/AI', os.environ['WINDOWSSDKDIR']+'\\References\\CommonConfiguration\\Neutral'])
env.Append(CCFLAGS=['/DWINAPI_FAMILY=WINAPI_FAMILY_APP', '/D_WIN32_WINNT=0x0603', '/DNTDDI_VERSION=0x06030000'])
env['ENV'] = os.environ;
env["PROGSUFFIX"]="."+arch+env["PROGSUFFIX"]
env["OBJSUFFIX"]="."+arch+env["OBJSUFFIX"]
env["LIBSUFFIX"]="."+arch+env["LIBSUFFIX"]
#env.Append(CCFLAGS=['/Gd','/GR','/nologo', '/EHsc'])
#env.Append(CXXFLAGS=['/TP', '/ZW'])
#env.Append(CPPFLAGS=['/DMSVC', '/GR', ])
##env.Append(CCFLAGS=['/I'+os.getenv("WindowsSdkDir")+"/Include"])
env.Append(CCFLAGS=['/DWINRT_ENABLED'])
env.Append(CCFLAGS=['/DWINDOWS_ENABLED'])
env.Append(CCFLAGS=['/DRTAUDIO_ENABLED'])
#env.Append(CCFLAGS=['/DWIN32'])
env.Append(CCFLAGS=['/DTYPED_METHOD_BIND'])
env.Append(CCFLAGS=['/DGLES2_ENABLED'])
#env.Append(CCFLAGS=['/DGLES1_ENABLED'])
LIBS=[
#'winmm',
'libEGL',
'libGLESv2',
'libANGLE',
#'kernel32','ole32','user32', 'advapi32'
]
env.Append(LINKFLAGS=[p+".lib" for p in LIBS])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#/c/Program Files (x86)/Windows Phone Kits/8.1/lib/ARM/WindowsPhoneCore.lib
| mit |
40223137/cdag7test37 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/_exceptions.py | 625 | 4885 | """Different kinds of SAX Exceptions"""
#in brython the 4 lines below causes an $globals['Exception'] error
#import sys
#if sys.platform[:4] == "java":
# from java.lang import Exception
#del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.getColumnNumber()
self._linenum = self._locator.getLineNumber()
def getColumnNumber(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.getColumnNumber()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
pass
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted."""
pass
| gpl-3.0 |
amitsaha/compose | tests/integration/cli_test.py | 7 | 29305 | from __future__ import absolute_import
import os
import shlex
import sys
from operator import attrgetter
from six import StringIO
from .. import mock
from .testcases import DockerClientTestCase
from compose.cli.command import get_project
from compose.cli.errors import UserError
from compose.cli.main import TopLevelCommand
from compose.project import NoSuchService
class CLITestCase(DockerClientTestCase):
def setUp(self):
super(CLITestCase, self).setUp()
self.old_sys_exit = sys.exit
sys.exit = lambda code=0: None
self.command = TopLevelCommand()
self.command.base_dir = 'tests/fixtures/simple-composefile'
def tearDown(self):
sys.exit = self.old_sys_exit
self.project.kill()
self.project.remove_stopped()
for container in self.project.containers(stopped=True, one_off=True):
container.remove(force=True)
super(CLITestCase, self).tearDown()
@property
def project(self):
# Hack: allow project to be overridden. This needs refactoring so that
# the project object is built exactly once, by the command object, and
# accessed by the test case object.
if hasattr(self, '_project'):
return self._project
return get_project(self.command.base_dir)
def test_help(self):
old_base_dir = self.command.base_dir
self.command.base_dir = 'tests/fixtures/no-composefile'
with self.assertRaises(SystemExit) as exc_context:
self.command.dispatch(['help', 'up'], None)
self.assertIn('Usage: up [options] [SERVICE...]', str(exc_context.exception))
# self.project.kill() fails during teardown
# unless there is a composefile.
self.command.base_dir = old_base_dir
# TODO: address the "Inappropriate ioctl for device" warnings in test output
@mock.patch('sys.stdout', new_callable=StringIO)
def test_ps(self, mock_stdout):
self.project.get_service('simple').create_container()
self.command.dispatch(['ps'], None)
self.assertIn('simplecomposefile_simple_1', mock_stdout.getvalue())
@mock.patch('sys.stdout', new_callable=StringIO)
def test_ps_default_composefile(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
self.command.dispatch(['up', '-d'], None)
self.command.dispatch(['ps'], None)
output = mock_stdout.getvalue()
self.assertIn('multiplecomposefiles_simple_1', output)
self.assertIn('multiplecomposefiles_another_1', output)
self.assertNotIn('multiplecomposefiles_yetanother_1', output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_ps_alternate_composefile(self, mock_stdout):
config_path = os.path.abspath(
'tests/fixtures/multiple-composefiles/compose2.yml')
self._project = get_project(self.command.base_dir, [config_path])
self.command.base_dir = 'tests/fixtures/multiple-composefiles'
self.command.dispatch(['-f', 'compose2.yml', 'up', '-d'], None)
self.command.dispatch(['-f', 'compose2.yml', 'ps'], None)
output = mock_stdout.getvalue()
self.assertNotIn('multiplecomposefiles_simple_1', output)
self.assertNotIn('multiplecomposefiles_another_1', output)
self.assertIn('multiplecomposefiles_yetanother_1', output)
@mock.patch('compose.service.log')
def test_pull(self, mock_logging):
self.command.dispatch(['pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (busybox:latest)...')
@mock.patch('compose.service.log')
def test_pull_with_digest(self, mock_logging):
self.command.dispatch(['-f', 'digest.yml', 'pull'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call(
'Pulling digest (busybox@'
'sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d)...')
@mock.patch('compose.service.log')
def test_pull_with_ignore_pull_failures(self, mock_logging):
self.command.dispatch(['-f', 'ignore-pull-failures.yml', 'pull', '--ignore-pull-failures'], None)
mock_logging.info.assert_any_call('Pulling simple (busybox:latest)...')
mock_logging.info.assert_any_call('Pulling another (nonexisting-image:latest)...')
mock_logging.error.assert_any_call('Error: image library/nonexisting-image:latest not found')
@mock.patch('sys.stdout', new_callable=StringIO)
def test_build_plain(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
self.command.dispatch(['build', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
self.assertNotIn(pull_indicator, output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_build_no_cache(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
self.command.dispatch(['build', '--no-cache', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
self.assertNotIn(pull_indicator, output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_build_pull(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
self.command.dispatch(['build', '--pull', 'simple'], None)
output = mock_stdout.getvalue()
self.assertIn(cache_indicator, output)
self.assertIn(pull_indicator, output)
@mock.patch('sys.stdout', new_callable=StringIO)
def test_build_no_cache_pull(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/simple-dockerfile'
self.command.dispatch(['build', 'simple'], None)
mock_stdout.truncate(0)
cache_indicator = 'Using cache'
pull_indicator = 'Status: Image is up to date for busybox:latest'
self.command.dispatch(['build', '--no-cache', '--pull', 'simple'], None)
output = mock_stdout.getvalue()
self.assertNotIn(cache_indicator, output)
self.assertIn(pull_indicator, output)
def test_up_detached(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
container, = service.containers()
self.assertFalse(container.get('Config.AttachStderr'))
self.assertFalse(container.get('Config.AttachStdout'))
self.assertFalse(container.get('Config.AttachStdin'))
def test_up_attached(self):
with mock.patch(
'compose.cli.main.attach_to_logs',
autospec=True
) as mock_attach:
self.command.dispatch(['up'], None)
_, args, kwargs = mock_attach.mock_calls[0]
_project, log_printer, _names, _timeout = args
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
self.assertEqual(
set(log_printer.containers),
set(self.project.containers())
)
def test_up_with_links(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
def test_up_with_no_deps(self):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', '--no-deps', 'web'], None)
web = self.project.get_service('web')
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(web.containers()), 1)
self.assertEqual(len(db.containers()), 0)
self.assertEqual(len(console.containers()), 0)
def test_up_with_force_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--force-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertNotEqual(old_ids, new_ids)
def test_up_with_no_recreate(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
old_ids = [c.id for c in service.containers()]
self.command.dispatch(['up', '-d', '--no-recreate'], None)
self.assertEqual(len(service.containers()), 1)
new_ids = [c.id for c in service.containers()]
self.assertEqual(old_ids, new_ids)
def test_up_with_force_recreate_and_no_recreate(self):
with self.assertRaises(UserError):
self.command.dispatch(['up', '-d', '--force-recreate', '--no-recreate'], None)
def test_up_with_timeout(self):
self.command.dispatch(['up', '-d', '-t', '1'], None)
service = self.project.get_service('simple')
another = self.project.get_service('another')
self.assertEqual(len(service.containers()), 1)
self.assertEqual(len(another.containers()), 1)
# Ensure containers don't have stdin and stdout connected in -d mode
config = service.containers()[0].inspect()['Config']
self.assertFalse(config['AttachStderr'])
self.assertFalse(config['AttachStdout'])
self.assertFalse(config['AttachStdin'])
@mock.patch('dockerpty.start')
def test_run_service_without_links(self, mock_stdout):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'console', '/bin/true'], None)
self.assertEqual(len(self.project.containers()), 0)
# Ensure stdin/out was open
container = self.project.containers(stopped=True, one_off=True)[0]
config = container.inspect()['Config']
self.assertTrue(config['AttachStderr'])
self.assertTrue(config['AttachStdout'])
self.assertTrue(config['AttachStdin'])
@mock.patch('dockerpty.start')
def test_run_service_with_links(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', 'web', '/bin/true'], None)
db = self.project.get_service('db')
console = self.project.get_service('console')
self.assertEqual(len(db.containers()), 1)
self.assertEqual(len(console.containers()), 0)
@mock.patch('dockerpty.start')
def test_run_with_no_deps(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['run', '--no-deps', 'web', '/bin/true'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 0)
@mock.patch('dockerpty.start')
def test_run_does_not_recreate_linked_containers(self, _):
self.command.base_dir = 'tests/fixtures/links-composefile'
self.command.dispatch(['up', '-d', 'db'], None)
db = self.project.get_service('db')
self.assertEqual(len(db.containers()), 1)
old_ids = [c.id for c in db.containers()]
self.command.dispatch(['run', 'web', '/bin/true'], None)
self.assertEqual(len(db.containers()), 1)
new_ids = [c.id for c in db.containers()]
self.assertEqual(old_ids, new_ids)
@mock.patch('dockerpty.start')
def test_run_without_command(self, _):
self.command.base_dir = 'tests/fixtures/commands-composefile'
self.check_build('tests/fixtures/simple-dockerfile', tag='composetest_test')
self.command.dispatch(['run', 'implicit'], None)
service = self.project.get_service('implicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/sh -c echo "success"'],
)
self.command.dispatch(['run', 'explicit'], None)
service = self.project.get_service('explicit')
containers = service.containers(stopped=True, one_off=True)
self.assertEqual(
[c.human_readable_command for c in containers],
[u'/bin/true'],
)
@mock.patch('dockerpty.start')
def test_run_service_with_entrypoint_overridden(self, _):
self.command.base_dir = 'tests/fixtures/dockerfile_with_entrypoint'
name = 'service'
self.command.dispatch(
['run', '--entrypoint', '/bin/echo', name, 'helloworld'],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(
shlex.split(container.human_readable_command),
[u'/bin/echo', u'helloworld'],
)
@mock.patch('dockerpty.start')
def test_run_service_with_user_overridden(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '--user={user}'.format(user=user), name]
self.command.dispatch(args, None)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@mock.patch('dockerpty.start')
def test_run_service_with_user_overridden_short_form(self, _):
self.command.base_dir = 'tests/fixtures/user-composefile'
name = 'service'
user = 'sshd'
args = ['run', '-u', user, name]
self.command.dispatch(args, None)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
self.assertEqual(user, container.get('Config.User'))
@mock.patch('dockerpty.start')
def test_run_service_with_environement_overridden(self, _):
name = 'service'
self.command.base_dir = 'tests/fixtures/environment-composefile'
self.command.dispatch(
['run', '-e', 'foo=notbar', '-e', 'allo=moto=bobo',
'-e', 'alpha=beta', name],
None
)
service = self.project.get_service(name)
container = service.containers(stopped=True, one_off=True)[0]
# env overriden
self.assertEqual('notbar', container.environment['foo'])
# keep environement from yaml
self.assertEqual('world', container.environment['hello'])
# added option from command line
self.assertEqual('beta', container.environment['alpha'])
# make sure a value with a = don't crash out
self.assertEqual('moto=bobo', container.environment['allo'])
@mock.patch('dockerpty.start')
def test_run_service_without_map_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_random, None)
self.assertEqual(port_assigned, None)
@mock.patch('dockerpty.start')
def test_run_service_with_map_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '--service-ports', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_random = container.get_local_port(3000)
port_assigned = container.get_local_port(3001)
port_range = container.get_local_port(3002), container.get_local_port(3003)
# close all one off containers we just created
container.stop()
# check the ports
self.assertNotEqual(port_random, None)
self.assertIn("0.0.0.0", port_random)
self.assertEqual(port_assigned, "0.0.0.0:49152")
self.assertEqual(port_range[0], "0.0.0.0:49153")
self.assertEqual(port_range[1], "0.0.0.0:49154")
@mock.patch('dockerpty.start')
def test_run_service_with_explicitly_maped_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '-p', '30000:3000', '--publish', '30001:3001', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "0.0.0.0:30000")
self.assertEqual(port_full, "0.0.0.0:30001")
@mock.patch('dockerpty.start')
def test_run_service_with_explicitly_maped_ip_ports(self, _):
# create one off container
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['run', '-d', '-p', '127.0.0.1:30000:3000', '--publish', '127.0.0.1:30001:3001', 'simple'], None)
container = self.project.get_service('simple').containers(one_off=True)[0]
# get port information
port_short = container.get_local_port(3000)
port_full = container.get_local_port(3001)
# close all one off containers we just created
container.stop()
# check the ports
self.assertEqual(port_short, "127.0.0.1:30000")
self.assertEqual(port_full, "127.0.0.1:30001")
@mock.patch('dockerpty.start')
def test_run_with_custom_name(self, _):
self.command.base_dir = 'tests/fixtures/environment-composefile'
name = 'the-container-name'
self.command.dispatch(['run', '--name', name, 'service'], None)
service = self.project.get_service('service')
container, = service.containers(stopped=True, one_off=True)
self.assertEqual(container.name, name)
def test_rm(self):
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '--force'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
service = self.project.get_service('simple')
service.create_container()
service.kill()
self.assertEqual(len(service.containers(stopped=True)), 1)
self.command.dispatch(['rm', '-f'], None)
self.assertEqual(len(service.containers(stopped=True)), 0)
def test_stop(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['stop', '-t', '1'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_pause_unpause(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertFalse(service.containers()[0].is_paused)
self.command.dispatch(['pause'], None)
self.assertTrue(service.containers()[0].is_paused)
self.command.dispatch(['unpause'], None)
self.assertFalse(service.containers()[0].is_paused)
def test_logs_invalid_service_name(self):
with self.assertRaises(NoSuchService):
self.command.dispatch(['logs', 'madeupname'], None)
def test_kill(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_kill_signal_sigstop(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.assertEqual(len(service.containers()), 1)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertEqual(len(service.containers()), 1)
# The container is still running. It has only been paused
self.assertTrue(service.containers()[0].is_running)
def test_kill_stopped_service(self):
self.command.dispatch(['up', '-d'], None)
service = self.project.get_service('simple')
self.command.dispatch(['kill', '-s', 'SIGSTOP'], None)
self.assertTrue(service.containers()[0].is_running)
self.command.dispatch(['kill', '-s', 'SIGKILL'], None)
self.assertEqual(len(service.containers(stopped=True)), 1)
self.assertFalse(service.containers(stopped=True)[0].is_running)
def test_restart(self):
service = self.project.get_service('simple')
container = service.create_container()
service.start_container(container)
started_at = container.dictionary['State']['StartedAt']
self.command.dispatch(['restart', '-t', '1'], None)
container.inspect()
self.assertNotEqual(
container.dictionary['State']['FinishedAt'],
'0001-01-01T00:00:00Z',
)
self.assertNotEqual(
container.dictionary['State']['StartedAt'],
started_at,
)
def test_scale(self):
project = self.project
self.command.scale(project, {'SERVICE=NUM': ['simple=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=3', 'another=2']})
self.assertEqual(len(project.get_service('simple').containers()), 3)
self.assertEqual(len(project.get_service('another').containers()), 2)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=1', 'another=1']})
self.assertEqual(len(project.get_service('simple').containers()), 1)
self.assertEqual(len(project.get_service('another').containers()), 1)
self.command.scale(project, {'SERVICE=NUM': ['simple=0', 'another=0']})
self.assertEqual(len(project.get_service('simple').containers()), 0)
self.assertEqual(len(project.get_service('another').containers()), 0)
def test_port(self):
self.command.base_dir = 'tests/fixtures/ports-composefile'
self.command.dispatch(['up', '-d'], None)
container = self.project.get_service('simple').get_container()
@mock.patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout):
self.command.dispatch(['port', 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), container.get_local_port(3000))
self.assertEqual(get_port(3001), "0.0.0.0:49152")
self.assertEqual(get_port(3002), "0.0.0.0:49153")
def test_port_with_scale(self):
self.command.base_dir = 'tests/fixtures/ports-composefile-scale'
self.command.dispatch(['scale', 'simple=2'], None)
containers = sorted(
self.project.containers(service_names=['simple']),
key=attrgetter('name'))
@mock.patch('sys.stdout', new_callable=StringIO)
def get_port(number, mock_stdout, index=None):
if index is None:
self.command.dispatch(['port', 'simple', str(number)], None)
else:
self.command.dispatch(['port', '--index=' + str(index), 'simple', str(number)], None)
return mock_stdout.getvalue().rstrip()
self.assertEqual(get_port(3000), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=1), containers[0].get_local_port(3000))
self.assertEqual(get_port(3000, index=2), containers[1].get_local_port(3000))
self.assertEqual(get_port(3002), "")
def test_env_file_relative_to_compose_file(self):
config_path = os.path.abspath('tests/fixtures/env-file/docker-compose.yml')
self.command.dispatch(['-f', config_path, 'up', '-d'], None)
self._project = get_project(self.command.base_dir, [config_path])
containers = self.project.containers(stopped=True)
self.assertEqual(len(containers), 1)
self.assertIn("FOO=1", containers[0].get('Config.Env'))
@mock.patch.dict(os.environ)
def test_home_and_env_var_in_volume_path(self):
os.environ['VOLUME_NAME'] = 'my-volume'
os.environ['HOME'] = '/tmp/home-dir'
expected_host_path = os.path.join(os.environ['HOME'], os.environ['VOLUME_NAME'])
self.command.base_dir = 'tests/fixtures/volume-path-interpolation'
self.command.dispatch(['up', '-d'], None)
container = self.project.containers(stopped=True)[0]
actual_host_path = container.get('Volumes')['/container-path']
components = actual_host_path.split('/')
self.assertTrue(components[-2:] == ['home-dir', 'my-volume'],
msg="Last two components differ: %s, %s" % (actual_host_path, expected_host_path))
def test_up_with_default_override_file(self):
self.command.base_dir = 'tests/fixtures/override-files'
self.command.dispatch(['up', '-d'], None)
containers = self.project.containers()
self.assertEqual(len(containers), 2)
web, db = containers
self.assertEqual(web.human_readable_command, 'top')
self.assertEqual(db.human_readable_command, 'top')
def test_up_with_multiple_files(self):
self.command.base_dir = 'tests/fixtures/override-files'
config_paths = [
'docker-compose.yml',
'docker-compose.override.yml',
'extra.yml',
]
self._project = get_project(self.command.base_dir, config_paths)
self.command.dispatch(
[
'-f', config_paths[0],
'-f', config_paths[1],
'-f', config_paths[2],
'up', '-d',
],
None)
containers = self.project.containers()
self.assertEqual(len(containers), 3)
web, other, db = containers
self.assertEqual(web.human_readable_command, 'top')
self.assertTrue({'db', 'other'} <= set(web.links()))
self.assertEqual(db.human_readable_command, 'top')
self.assertEqual(other.human_readable_command, 'top')
def test_up_with_extends(self):
self.command.base_dir = 'tests/fixtures/extends'
self.command.dispatch(['up', '-d'], None)
self.assertEqual(
set([s.name for s in self.project.services]),
set(['mydb', 'myweb']),
)
# Sort by name so we get [db, web]
containers = sorted(
self.project.containers(stopped=True),
key=lambda c: c.name,
)
self.assertEqual(len(containers), 2)
web = containers[1]
self.assertEqual(set(web.links()), set(['db', 'mydb_1', 'extends_mydb_1']))
expected_env = set([
"FOO=1",
"BAR=2",
"BAZ=2",
])
self.assertTrue(expected_env <= set(web.get('Config.Env')))
| apache-2.0 |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/python/kernel_tests/conv2d_backprop_filter_grad_test.py | 101 | 2774 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class Conv2DBackpropFilterGradTest(test.TestCase):
def testGradient(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for stride in [1, 2]:
np.random.seed(1)
in_shape = [5, 8, 6, 4]
in_val = constant_op.constant(
2 * np.random.random_sample(in_shape) - 1, dtype=dtypes.float32)
filter_shape = [3, 3, 4, 6]
# Make a convolution op with the current settings, just to easily get
# the shape of the output.
conv_out = nn_ops.conv2d(in_val,
array_ops.zeros(filter_shape),
[1, stride, stride, 1], padding)
out_backprop_shape = conv_out.get_shape().as_list()
out_backprop_val = constant_op.constant(
2 * np.random.random_sample(out_backprop_shape) - 1,
dtype=dtypes.float32)
output = nn_ops.conv2d_backprop_filter(in_val, filter_shape,
out_backprop_val,
[1, stride, stride, 1],
padding)
err = gradient_checker.compute_gradient_error(
[in_val, out_backprop_val], [in_shape, out_backprop_shape],
output, filter_shape)
print("conv2d_backprop_filter gradient err = %g " % err)
err_tolerance = 2e-3
self.assertLess(err, err_tolerance)
if __name__ == "__main__":
test.main()
| mit |
aioue/ansible | lib/ansible/modules/windows/win_iis_virtualdirectory.py | 78 | 2485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_virtualdirectory
version_added: "2.0"
short_description: Configures a virtual directory in IIS.
description:
- Creates, Removes and configures a virtual directory in IIS.
options:
name:
description:
- The name of the virtual directory to create or remove
required: true
state:
description:
- Whether to add or remove the specified virtual directory
choices:
- absent
- present
required: false
default: present
site:
description:
- The site name under which the virtual directory is created or exists.
required: true
application:
description:
- The application under which the virtual directory is created or exists.
required: false
default: null
physical_path:
description:
- The physical path to the folder in which the new virtual directory is created. The specified folder must already exist.
required: false
default: null
author: Henrik Wallström
'''
EXAMPLES = r'''
- name: Create a virtual directory if it does not exist
win_iis_virtualdirectory:
name: somedirectory
site: somesite
state: present
physical_path: c:\virtualdirectory\some
- name: Remove a virtual directory if it exists
win_iis_virtualdirectory:
name: somedirectory
site: somesite
state: absent
- name: Create a virtual directory on an application if it does not exist
win_iis_virtualdirectory:
name: somedirectory
site: somesite
application: someapp
state: present
physical_path: c:\virtualdirectory\some
'''
| gpl-3.0 |
Johnzero/OE7 | openerp/addons-modules/crm/wizard/__init__.py | 60 | 1205 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_partner_binding
import crm_phonecall_to_phonecall
import crm_opportunity_to_phonecall
import crm_lead_to_opportunity
import crm_merge_opportunities
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mihailignatenko/erp | addons/gamification/models/challenge.py | 91 | 43642 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import ustr, DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from datetime import date, datetime, timedelta
import calendar
import logging
_logger = logging.getLogger(__name__)
# display top 3 in ranking, could be db variable
MAX_VISIBILITY_RANKING = 3
def start_end_date_for_period(period, default_start_date=False, default_end_date=False):
"""Return the start and end date for a goal period based on today
:param str default_start_date: string date in DEFAULT_SERVER_DATE_FORMAT format
:param str default_end_date: string date in DEFAULT_SERVER_DATE_FORMAT format
:return: (start_date, end_date), dates in string format, False if the period is
not defined or unknown"""
today = date.today()
if period == 'daily':
start_date = today
end_date = start_date
elif period == 'weekly':
delta = timedelta(days=today.weekday())
start_date = today - delta
end_date = start_date + timedelta(days=7)
elif period == 'monthly':
month_range = calendar.monthrange(today.year, today.month)
start_date = today.replace(day=1)
end_date = today.replace(day=month_range[1])
elif period == 'yearly':
start_date = today.replace(month=1, day=1)
end_date = today.replace(month=12, day=31)
else: # period == 'once':
start_date = default_start_date # for manual goal, start each time
end_date = default_end_date
return (start_date, end_date)
return (datetime.strftime(start_date, DF), datetime.strftime(end_date, DF))
class gamification_challenge(osv.Model):
"""Gamification challenge
Set of predifined objectives assigned to people with rules for recurrence and
rewards
If 'user_ids' is defined and 'period' is different than 'one', the set will
be assigned to the users for each period (eg: every 1st of each month if
'monthly' is selected)
"""
_name = 'gamification.challenge'
_description = 'Gamification challenge'
_inherit = 'mail.thread'
def _get_next_report_date(self, cr, uid, ids, field_name, arg, context=None):
"""Return the next report date based on the last report date and report
period.
:return: a string in DEFAULT_SERVER_DATE_FORMAT representing the date"""
res = {}
for challenge in self.browse(cr, uid, ids, context=context):
last = datetime.strptime(challenge.last_report_date, DF).date()
if challenge.report_message_frequency == 'daily':
next = last + timedelta(days=1)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'weekly':
next = last + timedelta(days=7)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'monthly':
month_range = calendar.monthrange(last.year, last.month)
next = last.replace(day=month_range[1]) + timedelta(days=1)
res[challenge.id] = next.strftime(DF)
elif challenge.report_message_frequency == 'yearly':
res[challenge.id] = last.replace(year=last.year + 1).strftime(DF)
# frequency == 'once', reported when closed only
else:
res[challenge.id] = False
return res
def _get_categories(self, cr, uid, context=None):
return [
('hr', 'Human Ressources / Engagement'),
('other', 'Settings / Gamification Tools'),
]
def _get_report_template(self, cr, uid, context=None):
try:
return self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'simple_report_template')[1]
except ValueError:
return False
_order = 'end_date, start_date, name, id'
_columns = {
'name': fields.char('Challenge Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'state': fields.selection([
('draft', 'Draft'),
('inprogress', 'In Progress'),
('done', 'Done'),
], copy=False,
string='State', required=True, track_visibility='onchange'),
'manager_id': fields.many2one('res.users',
string='Responsible', help="The user responsible for the challenge."),
'user_ids': fields.many2many('res.users', 'gamification_challenge_users_rel',
string='Users',
help="List of users participating to the challenge"),
'user_domain': fields.char('User domain', help="Alternative to a list of users"),
'period': fields.selection([
('once', 'Non recurring'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
],
string='Periodicity',
help='Period of automatic goal assigment. If none is selected, should be launched manually.',
required=True),
'start_date': fields.date('Start Date',
help="The day a new challenge will be automatically started. If no periodicity is set, will use this date as the goal start date."),
'end_date': fields.date('End Date',
help="The day a new challenge will be automatically closed. If no periodicity is set, will use this date as the goal end date."),
'invited_user_ids': fields.many2many('res.users', 'gamification_invited_user_ids_rel',
string="Suggest to users"),
'line_ids': fields.one2many('gamification.challenge.line', 'challenge_id',
string='Lines',
help="List of goals that will be set",
required=True, copy=True),
'reward_id': fields.many2one('gamification.badge', string="For Every Succeding User"),
'reward_first_id': fields.many2one('gamification.badge', string="For 1st user"),
'reward_second_id': fields.many2one('gamification.badge', string="For 2nd user"),
'reward_third_id': fields.many2one('gamification.badge', string="For 3rd user"),
'reward_failure': fields.boolean('Reward Bests if not Succeeded?'),
'reward_realtime': fields.boolean('Reward as soon as every goal is reached',
help="With this option enabled, a user can receive a badge only once. The top 3 badges are still rewarded only at the end of the challenge."),
'visibility_mode': fields.selection([
('personal', 'Individual Goals'),
('ranking', 'Leader Board (Group Ranking)'),
],
string="Display Mode", required=True),
'report_message_frequency': fields.selection([
('never', 'Never'),
('onchange', 'On change'),
('daily', 'Daily'),
('weekly', 'Weekly'),
('monthly', 'Monthly'),
('yearly', 'Yearly')
],
string="Report Frequency", required=True),
'report_message_group_id': fields.many2one('mail.group',
string='Send a copy to',
help='Group that will receive a copy of the report in addition to the user'),
'report_template_id': fields.many2one('email.template', string="Report Template", required=True),
'remind_update_delay': fields.integer('Non-updated manual goals will be reminded after',
help="Never reminded if no value or zero is specified."),
'last_report_date': fields.date('Last Report Date'),
'next_report_date': fields.function(_get_next_report_date,
type='date', string='Next Report Date', store=True),
'category': fields.selection(lambda s, *a, **k: s._get_categories(*a, **k),
string="Appears in", help="Define the visibility of the challenge through menus", required=True),
}
_defaults = {
'period': 'once',
'state': 'draft',
'visibility_mode': 'personal',
'report_message_frequency': 'never',
'last_report_date': fields.date.today,
'manager_id': lambda s, cr, uid, c: uid,
'category': 'hr',
'reward_failure': False,
'report_template_id': lambda s, *a, **k: s._get_report_template(*a, **k),
'reward_realtime': True,
}
def create(self, cr, uid, vals, context=None):
"""Overwrite the create method to add the user of groups"""
if vals.get('user_domain'):
user_ids = self._get_challenger_users(cr, uid, vals.get('user_domain'), context=context)
if not vals.get('user_ids'):
vals['user_ids'] = []
vals['user_ids'] += [(4, user_id) for user_id in user_ids]
return super(gamification_challenge, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int,long)):
ids = [ids]
if vals.get('user_domain'):
user_ids = self._get_challenger_users(cr, uid, vals.get('user_domain'), context=context)
if not vals.get('user_ids'):
vals['user_ids'] = []
vals['user_ids'] += [(4, user_id) for user_id in user_ids]
write_res = super(gamification_challenge, self).write(cr, uid, ids, vals, context=context)
if vals.get('report_message_frequency', 'never') != 'never':
# _recompute_challenge_users do not set users for challenges with no reports, subscribing them now
for challenge in self.browse(cr, uid, ids, context=context):
self.message_subscribe(cr, uid, [challenge.id], [user.partner_id.id for user in challenge.user_ids], context=context)
if vals.get('state') == 'inprogress':
self._recompute_challenge_users(cr, uid, ids, context=context)
self._generate_goals_from_challenge(cr, uid, ids, context=context)
elif vals.get('state') == 'done':
self.check_challenge_reward(cr, uid, ids, force=True, context=context)
elif vals.get('state') == 'draft':
# resetting progress
if self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', 'in', ids), ('state', '=', 'inprogress')], context=context):
raise osv.except_osv("Error", "You can not reset a challenge with unfinished goals.")
return write_res
##### Update #####
def _cron_update(self, cr, uid, context=None, ids=False):
"""Daily cron check.
- Start planned challenges (in draft and with start_date = today)
- Create the missing goals (eg: modified the challenge to add lines)
- Update every running challenge
"""
if context is None:
context = {}
# start scheduled challenges
planned_challenge_ids = self.search(cr, uid, [
('state', '=', 'draft'),
('start_date', '<=', fields.date.today())])
if planned_challenge_ids:
self.write(cr, uid, planned_challenge_ids, {'state': 'inprogress'}, context=context)
# close scheduled challenges
planned_challenge_ids = self.search(cr, uid, [
('state', '=', 'inprogress'),
('end_date', '>=', fields.date.today())])
if planned_challenge_ids:
self.write(cr, uid, planned_challenge_ids, {'state': 'done'}, context=context)
if not ids:
ids = self.search(cr, uid, [('state', '=', 'inprogress')], context=context)
# in cron mode, will do intermediate commits
# TODO in trunk: replace by parameter
context = dict(context, commit_gamification=True)
return self._update_all(cr, uid, ids, context=context)
def _update_all(self, cr, uid, ids, context=None):
"""Update the challenges and related goals
:param list(int) ids: the ids of the challenges to update, if False will
update only challenges in progress."""
if not ids:
return True
if isinstance(ids, (int,long)):
ids = [ids]
goal_obj = self.pool.get('gamification.goal')
# include yesterday goals to update the goals that just ended
# exclude goals for users that did not connect since the last update
yesterday = date.today() - timedelta(days=1)
cr.execute("""SELECT gg.id
FROM gamification_goal as gg,
gamification_challenge as gc,
res_users as ru
WHERE gg.challenge_id = gc.id
AND gg.user_id = ru.id
AND gg.write_date < ru.login_date
AND gg.closed IS false
AND gc.id IN %s
AND (gg.state = 'inprogress'
OR (gg.state = 'reached'
AND (gg.end_date >= %s OR gg.end_date IS NULL)))
""", (tuple(ids), yesterday.strftime(DF)))
goal_ids = [res[0] for res in cr.fetchall()]
# update every running goal already generated linked to selected challenges
goal_obj.update(cr, uid, goal_ids, context=context)
self._recompute_challenge_users(cr, uid, ids, context=context)
self._generate_goals_from_challenge(cr, uid, ids, context=context)
for challenge in self.browse(cr, uid, ids, context=context):
if challenge.last_report_date != fields.date.today():
# goals closed but still opened at the last report date
closed_goals_to_report = goal_obj.search(cr, uid, [
('challenge_id', '=', challenge.id),
('start_date', '>=', challenge.last_report_date),
('end_date', '<=', challenge.last_report_date)
])
if challenge.next_report_date and fields.date.today() >= challenge.next_report_date:
self.report_progress(cr, uid, challenge, context=context)
elif len(closed_goals_to_report) > 0:
# some goals need a final report
self.report_progress(cr, uid, challenge, subset_goal_ids=closed_goals_to_report, context=context)
self.check_challenge_reward(cr, uid, ids, context=context)
return True
def quick_update(self, cr, uid, challenge_id, context=None):
"""Update all the goals of a specific challenge, no generation of new goals"""
goal_ids = self.pool.get('gamification.goal').search(cr, uid, [('challenge_id', '=', challenge_id)], context=context)
self.pool.get('gamification.goal').update(cr, uid, goal_ids, context=context)
return True
def _get_challenger_users(self, cr, uid, domain, context=None):
user_domain = eval(ustr(domain))
return self.pool['res.users'].search(cr, uid, user_domain, context=context)
def _recompute_challenge_users(self, cr, uid, challenge_ids, context=None):
"""Recompute the domain to add new users and remove the one no longer matching the domain"""
for challenge in self.browse(cr, uid, challenge_ids, context=context):
if challenge.user_domain:
old_user_ids = [user.id for user in challenge.user_ids]
new_user_ids = self._get_challenger_users(cr, uid, challenge.user_domain, context=context)
to_remove_ids = list(set(old_user_ids) - set(new_user_ids))
to_add_ids = list(set(new_user_ids) - set(old_user_ids))
write_op = [(3, user_id) for user_id in to_remove_ids]
write_op += [(4, user_id) for user_id in to_add_ids]
if write_op:
self.write(cr, uid, [challenge.id], {'user_ids': write_op}, context=context)
return True
def action_start(self, cr, uid, ids, context=None):
"""Start a challenge"""
return self.write(cr, uid, ids, {'state': 'inprogress'}, context=context)
def action_check(self, cr, uid, ids, context=None):
"""Check a challenge
Create goals that haven't been created yet (eg: if added users)
Recompute the current value for each goal related"""
return self._update_all(cr, uid, ids=ids, context=context)
def action_report_progress(self, cr, uid, ids, context=None):
"""Manual report of a goal, does not influence automatic report frequency"""
if isinstance(ids, (int,long)):
ids = [ids]
for challenge in self.browse(cr, uid, ids, context=context):
self.report_progress(cr, uid, challenge, context=context)
return True
##### Automatic actions #####
def _generate_goals_from_challenge(self, cr, uid, ids, context=None):
"""Generate the goals for each line and user.
If goals already exist for this line and user, the line is skipped. This
can be called after each change in the list of users or lines.
:param list(int) ids: the list of challenge concerned"""
goal_obj = self.pool.get('gamification.goal')
for challenge in self.browse(cr, uid, ids, context=context):
(start_date, end_date) = start_end_date_for_period(challenge.period)
to_update = []
# if no periodicity, use challenge dates
if not start_date and challenge.start_date:
start_date = challenge.start_date
if not end_date and challenge.end_date:
end_date = challenge.end_date
for line in challenge.line_ids:
# there is potentially a lot of users
# detect the ones with no goal linked to this line
date_clause = ""
query_params = [line.id]
if start_date:
date_clause += "AND g.start_date = %s"
query_params.append(start_date)
if end_date:
date_clause += "AND g.end_date = %s"
query_params.append(end_date)
query = """SELECT u.id AS user_id
FROM res_users u
LEFT JOIN gamification_goal g
ON (u.id = g.user_id)
WHERE line_id = %s
{date_clause}
""".format(date_clause=date_clause)
cr.execute(query, query_params)
user_with_goal_ids = cr.dictfetchall()
participant_user_ids = [user.id for user in challenge.user_ids]
user_without_goal_ids = list(set(participant_user_ids) - set([user['user_id'] for user in user_with_goal_ids]))
user_squating_challenge_ids = list(set([user['user_id'] for user in user_with_goal_ids]) - set(participant_user_ids))
if user_squating_challenge_ids:
# users that used to match the challenge
goal_to_remove_ids = goal_obj.search(cr, uid, [('challenge_id', '=', challenge.id), ('user_id', 'in', user_squating_challenge_ids)], context=context)
goal_obj.unlink(cr, uid, goal_to_remove_ids, context=context)
values = {
'definition_id': line.definition_id.id,
'line_id': line.id,
'target_goal': line.target_goal,
'state': 'inprogress',
}
if start_date:
values['start_date'] = start_date
if end_date:
values['end_date'] = end_date
# the goal is initialised over the limit to make sure we will compute it at least once
if line.condition == 'higher':
values['current'] = line.target_goal - 1
else:
values['current'] = line.target_goal + 1
if challenge.remind_update_delay:
values['remind_update_delay'] = challenge.remind_update_delay
for user_id in user_without_goal_ids:
values.update({'user_id': user_id})
goal_id = goal_obj.create(cr, uid, values, context=context)
to_update.append(goal_id)
goal_obj.update(cr, uid, to_update, context=context)
return True
##### JS utilities #####
def _get_serialized_challenge_lines(self, cr, uid, challenge, user_id=False, restrict_goal_ids=False, restrict_top=False, context=None):
"""Return a serialised version of the goals information if the user has not completed every goal
:challenge: browse record of challenge to compute
:user_id: res.users id of the user retrieving progress (False if no distinction, only for ranking challenges)
:restrict_goal_ids: <list(int)> compute only the results for this subset if gamification.goal ids, if False retrieve every goal of current running challenge
:restrict_top: <int> for challenge lines where visibility_mode == 'ranking', retrieve only these bests results and itself, if False retrieve all
restrict_goal_ids has priority over restrict_top
format list
# if visibility_mode == 'ranking'
{
'name': <gamification.goal.description name>,
'description': <gamification.goal.description description>,
'condition': <reach condition {lower,higher}>,
'computation_mode': <target computation {manually,count,sum,python}>,
'monetary': <{True,False}>,
'suffix': <value suffix>,
'action': <{True,False}>,
'display_mode': <{progress,boolean}>,
'target': <challenge line target>,
'own_goal_id': <gamification.goal id where user_id == uid>,
'goals': [
{
'id': <gamification.goal id>,
'rank': <user ranking>,
'user_id': <res.users id>,
'name': <res.users name>,
'state': <gamification.goal state {draft,inprogress,reached,failed,canceled}>,
'completeness': <percentage>,
'current': <current value>,
}
]
},
# if visibility_mode == 'personal'
{
'id': <gamification.goal id>,
'name': <gamification.goal.description name>,
'description': <gamification.goal.description description>,
'condition': <reach condition {lower,higher}>,
'computation_mode': <target computation {manually,count,sum,python}>,
'monetary': <{True,False}>,
'suffix': <value suffix>,
'action': <{True,False}>,
'display_mode': <{progress,boolean}>,
'target': <challenge line target>,
'state': <gamification.goal state {draft,inprogress,reached,failed,canceled}>,
'completeness': <percentage>,
'current': <current value>,
}
"""
goal_obj = self.pool.get('gamification.goal')
(start_date, end_date) = start_end_date_for_period(challenge.period)
res_lines = []
all_reached = True
for line in challenge.line_ids:
line_data = {
'name': line.definition_id.name,
'description': line.definition_id.description,
'condition': line.definition_id.condition,
'computation_mode': line.definition_id.computation_mode,
'monetary': line.definition_id.monetary,
'suffix': line.definition_id.suffix,
'action': True if line.definition_id.action_id else False,
'display_mode': line.definition_id.display_mode,
'target': line.target_goal,
}
domain = [
('line_id', '=', line.id),
('state', '!=', 'draft'),
]
if restrict_goal_ids:
domain.append(('ids', 'in', restrict_goal_ids))
else:
# if no subset goals, use the dates for restriction
if start_date:
domain.append(('start_date', '=', start_date))
if end_date:
domain.append(('end_date', '=', end_date))
if challenge.visibility_mode == 'personal':
if not user_id:
raise osv.except_osv(_('Error!'),_("Retrieving progress for personal challenge without user information"))
domain.append(('user_id', '=', user_id))
sorting = goal_obj._order
limit = 1
else:
line_data.update({
'own_goal_id': False,
'goals': [],
})
sorting = "completeness desc, current desc"
limit = False
goal_ids = goal_obj.search(cr, uid, domain, order=sorting, limit=limit, context=context)
ranking = 0
for goal in goal_obj.browse(cr, uid, goal_ids, context=context):
if challenge.visibility_mode == 'personal':
# limit=1 so only one result
line_data.update({
'id': goal.id,
'current': goal.current,
'completeness': goal.completeness,
'state': goal.state,
})
if goal.state != 'reached':
all_reached = False
else:
ranking += 1
if user_id and goal.user_id.id == user_id:
line_data['own_goal_id'] = goal.id
elif restrict_top and ranking > restrict_top:
# not own goal and too low to be in top
continue
line_data['goals'].append({
'id': goal.id,
'user_id': goal.user_id.id,
'name': goal.user_id.name,
'rank': ranking,
'current': goal.current,
'completeness': goal.completeness,
'state': goal.state,
})
if goal.state != 'reached':
all_reached = False
if goal_ids:
res_lines.append(line_data)
if all_reached:
return []
return res_lines
##### Reporting #####
def report_progress(self, cr, uid, challenge, context=None, users=False, subset_goal_ids=False):
"""Post report about the progress of the goals
:param challenge: the challenge object that need to be reported
:param users: the list(res.users) of users that are concerned by
the report. If False, will send the report to every user concerned
(goal users and group that receive a copy). Only used for challenge with
a visibility mode set to 'personal'.
:param goal_ids: the list(int) of goal ids linked to the challenge for
the report. If not specified, use the goals for the current challenge
period. This parameter can be used to produce report for previous challenge
periods.
:param subset_goal_ids: a list(int) of goal ids to restrict the report
"""
if context is None:
context = {}
temp_obj = self.pool.get('email.template')
ctx = context.copy()
if challenge.visibility_mode == 'ranking':
lines_boards = self._get_serialized_challenge_lines(cr, uid, challenge, user_id=False, restrict_goal_ids=subset_goal_ids, restrict_top=False, context=context)
ctx.update({'challenge_lines': lines_boards})
body_html = temp_obj.render_template(cr, uid, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx)
# send to every follower and participant of the challenge
self.message_post(cr, uid, challenge.id,
body=body_html,
partner_ids=[user.partner_id.id for user in challenge.user_ids],
context=context,
subtype='mail.mt_comment')
if challenge.report_message_group_id:
self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id,
body=body_html,
context=context,
subtype='mail.mt_comment')
else:
# generate individual reports
for user in users or challenge.user_ids:
goals = self._get_serialized_challenge_lines(cr, uid, challenge, user.id, restrict_goal_ids=subset_goal_ids, context=context)
if not goals:
continue
ctx.update({'challenge_lines': goals})
body_html = temp_obj.render_template(cr, user.id, challenge.report_template_id.body_html, 'gamification.challenge', challenge.id, context=ctx)
# send message only to users, not on the challenge
self.message_post(cr, uid, 0,
body=body_html,
partner_ids=[(4, user.partner_id.id)],
context=context,
subtype='mail.mt_comment')
if challenge.report_message_group_id:
self.pool.get('mail.group').message_post(cr, uid, challenge.report_message_group_id.id,
body=body_html,
context=context,
subtype='mail.mt_comment')
return self.write(cr, uid, challenge.id, {'last_report_date': fields.date.today()}, context=context)
##### Challenges #####
# TODO in trunk, remove unused parameter user_id
def accept_challenge(self, cr, uid, challenge_ids, context=None, user_id=None):
"""The user accept the suggested challenge"""
return self._accept_challenge(cr, uid, uid, challenge_ids, context=context)
def _accept_challenge(self, cr, uid, user_id, challenge_ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, user_id, context=context)
message = "%s has joined the challenge" % user.name
self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context)
self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': [(3, user_id)], 'user_ids': [(4, user_id)]}, context=context)
return self._generate_goals_from_challenge(cr, SUPERUSER_ID, challenge_ids, context=context)
# TODO in trunk, remove unused parameter user_id
def discard_challenge(self, cr, uid, challenge_ids, context=None, user_id=None):
"""The user discard the suggested challenge"""
return self._discard_challenge(cr, uid, uid, challenge_ids, context=context)
def _discard_challenge(self, cr, uid, user_id, challenge_ids, context=None):
user = self.pool.get('res.users').browse(cr, uid, user_id, context=context)
message = "%s has refused the challenge" % user.name
self.message_post(cr, SUPERUSER_ID, challenge_ids, body=message, context=context)
return self.write(cr, SUPERUSER_ID, challenge_ids, {'invited_user_ids': (3, user_id)}, context=context)
def reply_challenge_wizard(self, cr, uid, challenge_id, context=None):
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'gamification', 'challenge_wizard')
id = result and result[1] or False
result = self.pool.get('ir.actions.act_window').read(cr, uid, [id], context=context)[0]
result['res_id'] = challenge_id
return result
def check_challenge_reward(self, cr, uid, ids, force=False, context=None):
"""Actions for the end of a challenge
If a reward was selected, grant it to the correct users.
Rewards granted at:
- the end date for a challenge with no periodicity
- the end of a period for challenge with periodicity
- when a challenge is manually closed
(if no end date, a running challenge is never rewarded)
"""
if isinstance(ids, (int,long)):
ids = [ids]
commit = context.get('commit_gamification', False)
for challenge in self.browse(cr, uid, ids, context=context):
(start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date)
yesterday = date.today() - timedelta(days=1)
rewarded_users = []
challenge_ended = end_date == yesterday.strftime(DF) or force
if challenge.reward_id and (challenge_ended or challenge.reward_realtime):
# not using start_date as intemportal goals have a start date but no end_date
reached_goals = self.pool.get('gamification.goal').read_group(cr, uid, [
('challenge_id', '=', challenge.id),
('end_date', '=', end_date),
('state', '=', 'reached')
], fields=['user_id'], groupby=['user_id'], context=context)
for reach_goals_user in reached_goals:
if reach_goals_user['user_id_count'] == len(challenge.line_ids):
# the user has succeeded every assigned goal
user_id = reach_goals_user['user_id'][0]
if challenge.reward_realtime:
badges = self.pool['gamification.badge.user'].search(cr, uid, [
('challenge_id', '=', challenge.id),
('badge_id', '=', challenge.reward_id.id),
('user_id', '=', user_id),
], count=True, context=context)
if badges > 0:
# has already recieved the badge for this challenge
continue
self.reward_user(cr, uid, user_id, challenge.reward_id.id, challenge.id, context=context)
rewarded_users.append(user_id)
if commit:
cr.commit()
if challenge_ended:
# open chatter message
message_body = _("The challenge %s is finished." % challenge.name)
if rewarded_users:
user_names = self.pool['res.users'].name_get(cr, uid, rewarded_users, context=context)
message_body += _("<br/>Reward (badge %s) for every succeeding user was sent to %s." % (challenge.reward_id.name, ", ".join([name for (user_id, name) in user_names])))
else:
message_body += _("<br/>Nobody has succeeded to reach every goal, no badge is rewared for this challenge.")
# reward bests
if challenge.reward_first_id:
(first_user, second_user, third_user) = self.get_top3_users(cr, uid, challenge, context=context)
if first_user:
self.reward_user(cr, uid, first_user.id, challenge.reward_first_id.id, challenge.id, context=context)
message_body += _("<br/>Special rewards were sent to the top competing users. The ranking for this challenge is :")
message_body += "<br/> 1. %s - %s" % (first_user.name, challenge.reward_first_id.name)
else:
message_body += _("Nobody reached the required conditions to receive special badges.")
if second_user and challenge.reward_second_id:
self.reward_user(cr, uid, second_user.id, challenge.reward_second_id.id, challenge.id, context=context)
message_body += "<br/> 2. %s - %s" % (second_user.name, challenge.reward_second_id.name)
if third_user and challenge.reward_third_id:
self.reward_user(cr, uid, third_user.id, challenge.reward_second_id.id, challenge.id, context=context)
message_body += "<br/> 3. %s - %s" % (third_user.name, challenge.reward_third_id.name)
self.message_post(cr, uid, challenge.id,
partner_ids=[user.partner_id.id for user in challenge.user_ids],
body=message_body,
context=context)
if commit:
cr.commit()
return True
def get_top3_users(self, cr, uid, challenge, context=None):
"""Get the top 3 users for a defined challenge
Ranking criterias:
1. succeed every goal of the challenge
2. total completeness of each goal (can be over 100)
Top 3 is computed only for users succeeding every goal of the challenge,
except if reward_failure is True, in which case every user is
considered.
:return: ('first', 'second', 'third'), tuple containing the res.users
objects of the top 3 users. If no user meets the criterias for a rank,
it is set to False. Nobody can receive a rank is noone receives the
higher one (eg: if 'second' == False, 'third' will be False)
"""
goal_obj = self.pool.get('gamification.goal')
(start_date, end_date) = start_end_date_for_period(challenge.period, challenge.start_date, challenge.end_date)
challengers = []
for user in challenge.user_ids:
all_reached = True
total_completness = 0
# every goal of the user for the running period
goal_ids = goal_obj.search(cr, uid, [
('challenge_id', '=', challenge.id),
('user_id', '=', user.id),
('start_date', '=', start_date),
('end_date', '=', end_date)
], context=context)
for goal in goal_obj.browse(cr, uid, goal_ids, context=context):
if goal.state != 'reached':
all_reached = False
if goal.definition_condition == 'higher':
# can be over 100
total_completness += 100.0 * goal.current / goal.target_goal
elif goal.state == 'reached':
# for lower goals, can not get percentage so 0 or 100
total_completness += 100
challengers.append({'user': user, 'all_reached': all_reached, 'total_completness': total_completness})
sorted_challengers = sorted(challengers, key=lambda k: (k['all_reached'], k['total_completness']), reverse=True)
if len(sorted_challengers) == 0 or (not challenge.reward_failure and not sorted_challengers[0]['all_reached']):
# nobody succeeded
return (False, False, False)
if len(sorted_challengers) == 1 or (not challenge.reward_failure and not sorted_challengers[1]['all_reached']):
# only one user succeeded
return (sorted_challengers[0]['user'], False, False)
if len(sorted_challengers) == 2 or (not challenge.reward_failure and not sorted_challengers[2]['all_reached']):
# only one user succeeded
return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], False)
return (sorted_challengers[0]['user'], sorted_challengers[1]['user'], sorted_challengers[2]['user'])
def reward_user(self, cr, uid, user_id, badge_id, challenge_id=False, context=None):
"""Create a badge user and send the badge to him
:param user_id: the user to reward
:param badge_id: the concerned badge
"""
badge_user_obj = self.pool.get('gamification.badge.user')
user_badge_id = badge_user_obj.create(cr, uid, {'user_id': user_id, 'badge_id': badge_id, 'challenge_id':challenge_id}, context=context)
return badge_user_obj._send_badge(cr, uid, [user_badge_id], context=context)
class gamification_challenge_line(osv.Model):
"""Gamification challenge line
Predifined goal for 'gamification_challenge'
These are generic list of goals with only the target goal defined
Should only be created for the gamification_challenge object
"""
_name = 'gamification.challenge.line'
_description = 'Gamification generic goal for challenge'
_order = "sequence, id"
def on_change_definition_id(self, cr, uid, ids, definition_id=False, context=None):
goal_definition = self.pool.get('gamification.goal.definition')
if not definition_id:
return {'value': {'definition_id': False}}
goal_definition = goal_definition.browse(cr, uid, definition_id, context=context)
ret = {
'value': {
'condition': goal_definition.condition,
'definition_full_suffix': goal_definition.full_suffix
}
}
return ret
_columns = {
'name': fields.related('definition_id', 'name', string="Name", type="char"),
'challenge_id': fields.many2one('gamification.challenge',
string='Challenge',
required=True,
ondelete="cascade"),
'definition_id': fields.many2one('gamification.goal.definition',
string='Goal Definition',
required=True,
ondelete="cascade"),
'target_goal': fields.float('Target Value to Reach',
required=True),
'sequence': fields.integer('Sequence',
help='Sequence number for ordering'),
'condition': fields.related('definition_id', 'condition', type="selection",
readonly=True, string="Condition", selection=[('lower', '<='), ('higher', '>=')]),
'definition_suffix': fields.related('definition_id', 'suffix', type="char", readonly=True, string="Unit"),
'definition_monetary': fields.related('definition_id', 'monetary', type="boolean", readonly=True, string="Monetary"),
'definition_full_suffix': fields.related('definition_id', 'full_suffix', type="char", readonly=True, string="Suffix"),
}
_default = {
'sequence': 1,
}
| agpl-3.0 |
gemmellr/qpid-proton | python/tests/proton_tests/codec.py | 3 | 19278 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
import sys
from uuid import uuid4
from proton import *
from . import common
class Test(common.Test):
def setUp(self):
self.data = Data()
def tearDown(self):
self.data = None
class DataTest(Test):
def testTopLevelNext(self):
assert self.data.next() is None
self.data.put_null()
self.data.put_bool(False)
self.data.put_int(0)
assert self.data.next() is None
self.data.rewind()
assert self.data.next() == Data.NULL
assert self.data.next() == Data.BOOL
assert self.data.next() == Data.INT
assert self.data.next() is None
def testNestedNext(self):
assert self.data.next() is None
self.data.put_null()
assert self.data.next() is None
self.data.put_list()
assert self.data.next() is None
self.data.put_bool(False)
assert self.data.next() is None
self.data.rewind()
assert self.data.next() is Data.NULL
assert self.data.next() is Data.LIST
self.data.enter()
assert self.data.next() is None
self.data.put_ubyte(0)
assert self.data.next() is None
self.data.put_uint(0)
assert self.data.next() is None
self.data.put_int(0)
assert self.data.next() is None
self.data.exit()
assert self.data.next() is Data.BOOL
assert self.data.next() is None
self.data.rewind()
assert self.data.next() is Data.NULL
assert self.data.next() is Data.LIST
assert self.data.enter()
assert self.data.next() is Data.UBYTE
assert self.data.next() is Data.UINT
assert self.data.next() is Data.INT
assert self.data.next() is None
assert self.data.exit()
assert self.data.next() is Data.BOOL
assert self.data.next() is None
def testEnterExit(self):
assert self.data.next() is None
assert not self.data.enter()
self.data.put_list()
assert self.data.enter()
assert self.data.next() is None
self.data.put_list()
assert self.data.enter()
self.data.put_list()
assert self.data.enter()
assert self.data.exit()
assert self.data.get_list() == 0
assert self.data.exit()
assert self.data.get_list() == 1
assert self.data.exit()
assert self.data.get_list() == 1
assert not self.data.exit()
assert self.data.get_list() == 1
assert self.data.next() is None
self.data.rewind()
assert self.data.next() is Data.LIST
assert self.data.get_list() == 1
assert self.data.enter()
assert self.data.next() is Data.LIST
assert self.data.get_list() == 1
assert self.data.enter()
assert self.data.next() is Data.LIST
assert self.data.get_list() == 0
assert self.data.enter()
assert self.data.next() is None
assert self.data.exit()
assert self.data.get_list() == 0
assert self.data.exit()
assert self.data.get_list() == 1
assert self.data.exit()
assert self.data.get_list() == 1
assert not self.data.exit()
def put(self, putter, v):
"""More informative exception from putters, include bad value"""
try:
putter(v)
except Exception:
etype, value, trace = sys.exc_info()
v = etype("%s(%r): %s" % (putter.__name__, v, value))
if trace is None:
raise v
else:
raise v.with_traceback(trace)
return putter
# (bits, signed) for each integer type
INT_TYPES = {
"byte": (8, True),
"ubyte": (8, False),
"short": (16, True),
"ushort": (16, False),
"int": (32, True),
"uint": (32, False),
"long": (64, True),
"ulong": (64, False)
}
def int_values(self, dtype):
"""Set of test values for integer type dtype, include extreme and medial values"""
bits, signed = self.INT_TYPES[dtype]
values = [0, 1, 2, 5, 42]
if signed:
min, max = -2**(bits - 1), 2**(bits - 1) - 1
values.append(max // 2)
values += [-i for i in values if i]
values += [min, max]
else:
max = 2**(bits) - 1
values += [max // 2, max]
return sorted(values)
def _testArray(self, dtype, descriptor, atype, *values):
if dtype:
dTYPE = getattr(self.data, dtype.upper())
aTYPE = getattr(self.data, atype.upper())
self.data.put_array(dtype is not None, aTYPE)
self.data.enter()
if dtype is not None:
putter = getattr(self.data, "put_%s" % dtype)
self.put(putter, descriptor)
putter = getattr(self.data, "put_%s" % atype)
for v in values:
self.put(putter, v)
self.data.exit()
self.data.rewind()
assert self.data.next() == Data.ARRAY
count, described, type = self.data.get_array()
assert count == len(values), count
if dtype is None:
assert described == False
else:
assert described
assert type == aTYPE, type
assert self.data.enter()
if described:
assert self.data.next() == dTYPE
getter = getattr(self.data, "get_%s" % dtype)
gotten = getter()
assert gotten == descriptor, gotten
if values:
getter = getattr(self.data, "get_%s" % atype)
for v in values:
assert self.data.next() == aTYPE
gotten = getter()
assert gotten == v, gotten
assert self.data.next() is None
assert self.data.exit()
def testStringArray(self):
self._testArray(None, None, "string", "one", "two", "three")
def testDescribedStringArray(self):
self._testArray("symbol", "url", "string", "one", "two", "three")
def _test_int_array(self, atype):
self._testArray(None, None, atype, *self.int_values(atype))
def testByteArray(self): self._test_int_array("byte")
def testUbyteArray(self): self._test_int_array("ubyte")
def testShortArray(self): self._test_int_array("short")
def testUshortArray(self): self._test_int_array("ushort")
def testIntArray(self): self._test_int_array("int")
def testUintArray(self): self._test_int_array("uint")
def testLongArray(self): self._test_int_array("long")
def testUlongArray(self): self._test_int_array("ulong")
def testUUIDArray(self):
self._testArray(None, None, "uuid", uuid4(), uuid4(), uuid4())
def testEmptyArray(self):
self._testArray(None, None, "null")
def testDescribedEmptyArray(self):
self._testArray("long", 0, "null")
def testPropertyDict(self):
a = PropertyDict(one=1, two=2, three=3)
b = PropertyDict({'one': 1, 'two': 2, 'three': 3})
c = PropertyDict(zip(['one', 'two', 'three'], [1, 2, 3]))
d = PropertyDict([('two', 2), ('one', 1), ('three', 3)])
e = PropertyDict({symbol('three'): 3, symbol('one'): 1, symbol('two'): 2})
f = PropertyDict(a)
g = PropertyDict()
g['one'] = 1
g[symbol('two')] = 2
g['three'] = 3
assert a == b == c == d == e == f == g
for k in a.keys():
assert isinstance(k, symbol)
self.assertRaises(KeyError, AnnotationDict, {'one': 1, None: 'none'})
self.assertRaises(KeyError, AnnotationDict, {'one': 1, 1.23: 4})
def testPropertyDictNoRaiseError(self):
a = PropertyDict(one=1, two=2, three=3, raise_on_error=False)
a[4] = 'four'
b = PropertyDict({'one': 1, 'two': 2, 'three': 3, 4: 'four'}, raise_on_error=False)
c = PropertyDict(zip(['one', 'two', 'three', 4], [1, 2, 3, 'four']), raise_on_error=False)
d = PropertyDict([('two', 2), ('one', 1), ('three', 3), (4, 'four')], raise_on_error=False)
e = PropertyDict({4: 'four', symbol('three'): 3, symbol('one'): 1, symbol('two'): 2}, raise_on_error=False)
f = PropertyDict(a, raise_on_error=False)
g = PropertyDict(raise_on_error=False)
g['one'] = 1
g[4] = 'four'
g[symbol('two')] = 2
g['three'] = 3
assert a == b == c == d == e == f == g
def testAnnotationDict(self):
# AnnotationMap c'tor calls update(), so this method is also covered
a = AnnotationDict(one=1, two=2, three=3)
a[ulong(4)] = 'four'
b = AnnotationDict({'one': 1, 'two': 2, 'three': 3, ulong(4): 'four'})
c = AnnotationDict(zip(['one', 'two', 'three', ulong(4)], [1, 2, 3, 'four']))
d = AnnotationDict([('two', 2), ('one', 1), ('three', 3), (ulong(4), 'four')])
e = AnnotationDict({symbol('three'): 3, ulong(4): 'four', symbol('one'): 1, symbol('two'): 2})
f = AnnotationDict(a)
g = AnnotationDict()
g[ulong(4)] = 'four'
g['one'] = 1
g[symbol('two')] = 2
g['three'] = 3
assert a == b == c == d == e == f == g
for k in a.keys():
assert isinstance(k, (symbol, ulong))
self.assertRaises(KeyError, AnnotationDict, {'one': 1, None: 'none'})
self.assertRaises(KeyError, AnnotationDict, {'one': 1, 1.23: 4})
def testAnnotationDictNoRaiseError(self):
a = AnnotationDict(one=1, two=2, three=3, raise_on_error=False)
a[ulong(4)] = 'four'
a[5] = 'five'
b = AnnotationDict({'one': 1, 'two': 2, 'three': 3, ulong(4): 'four', 5: 'five'}, raise_on_error=False)
c = AnnotationDict(zip(['one', 'two', 'three', ulong(4), 5], [1, 2, 3, 'four', 'five']), raise_on_error=False)
d = AnnotationDict([('two', 2), ('one', 1), ('three', 3),
(ulong(4), 'four'), (5, 'five')], raise_on_error=False)
e = AnnotationDict({5: 'five', symbol('three'): 3, ulong(4): 'four',
symbol('one'): 1, symbol('two'): 2}, raise_on_error=False)
f = AnnotationDict(a, raise_on_error=False)
g = AnnotationDict(raise_on_error=False)
g[ulong(4)] = 'four'
g['one'] = 1
g[symbol('two')] = 2
g[5] = 'five'
g['three'] = 3
assert a == b == c == d == e == f == g
def testSymbolList(self):
a = SymbolList(['one', 'two', 'three'])
b = SymbolList([symbol('one'), symbol('two'), symbol('three')])
c = SymbolList()
c.append('one')
c.extend([symbol('two'), 'three'])
d1 = SymbolList(['one'])
d2 = SymbolList(['two', symbol('three')])
d = d1 + d2
e = SymbolList(['one'])
e += SymbolList(['two', symbol('three')])
f = SymbolList(['one', 'hello', 'goodbye'])
f[1] = symbol('two')
f[2] = 'three'
g = SymbolList(a)
assert a == b == c == d == e == f == g
for v in a:
assert isinstance(v, symbol)
self.assertRaises(TypeError, SymbolList, ['one', None])
self.assertRaises(TypeError, SymbolList, ['one', 2])
self.assertRaises(TypeError, SymbolList, ['one', ['two']])
self.assertRaises(TypeError, SymbolList, ['one', {'two': 3}])
def testSymbolListNoRaiseError(self):
a = SymbolList(['one', 'two', 'three', 4], raise_on_error=False)
b = SymbolList([symbol('one'), symbol('two'), symbol('three'), 4], raise_on_error=False)
c = SymbolList(raise_on_error=False)
c.append('one')
c.extend([symbol('two'), 'three', 4])
d1 = SymbolList(['one'], raise_on_error=False)
d2 = SymbolList(['two', symbol('three'), 4], raise_on_error=False)
d = d1 + d2
e = SymbolList(['one'], raise_on_error=False)
e += SymbolList(['two', symbol('three'), 4], raise_on_error=False)
f = SymbolList(['one', 'hello', 'goodbye', 'what?'], raise_on_error=False)
f[1] = symbol('two')
f[2] = 'three'
f[3] = 4
g = SymbolList(a, raise_on_error=False)
assert a == b == c == d == e == f == g
def _test(self, dtype, *values, **kwargs):
eq = kwargs.get("eq", lambda x, y: x == y)
ntype = getattr(Data, dtype.upper())
putter = getattr(self.data, "put_%s" % dtype)
getter = getattr(self.data, "get_%s" % dtype)
for v in values:
self.put(putter, v)
gotten = getter()
assert eq(gotten, v), (gotten, v)
self.data.rewind()
for v in values:
vtype = self.data.next()
assert vtype == ntype, vtype
gotten = getter()
assert eq(gotten, v), (gotten, v)
encoded = self.data.encode()
copy = Data(0)
while encoded:
n = copy.decode(encoded)
encoded = encoded[n:]
copy.rewind()
cgetter = getattr(copy, "get_%s" % dtype)
for v in values:
vtype = copy.next()
assert vtype == ntype, vtype
gotten = cgetter()
assert eq(gotten, v), (gotten, v)
def _test_int(self, itype):
self._test(itype, *self.int_values(itype))
def testByte(self): self._test_int("byte")
def testUbyte(self):
self._test_int("ubyte")
self.assertRaises(AssertionError, ubyte, -1)
def testShort(self): self._test_int("short")
def testUshort(self):
self._test("ushort")
self.assertRaises(AssertionError, ushort, -1)
def testInt(self): self._test_int("int")
def testUint(self):
self._test_int("uint")
self.assertRaises(AssertionError, uint, -1)
def testLong(self): self._test_int("long")
def testUlong(self):
self._test_int("ulong")
self.assertRaises(AssertionError, ulong, -1)
def testString(self):
self._test("string", "one", "two", "three", "this is a test", "")
def testFloat(self):
# we have to use a special comparison here because python
# internally only uses doubles and converting between floats and
# doubles is imprecise
self._test("float", 0, 1, 2, 3, 0.1, 0.2, 0.3, -1, -2, -3, -0.1, -0.2, -0.3,
eq=lambda x, y: x - y < 0.000001)
def testDouble(self):
self._test("double", 0, 1, 2, 3, 0.1, 0.2, 0.3, -1, -2, -3, -0.1, -0.2, -0.3)
def testBinary(self):
self._test("binary", b"this", b"is", b"a", b"test", b"of" b"b\x00inary")
def testSymbol(self):
self._test("symbol", symbol("this is a symbol test"), symbol("bleh"), symbol("blah"))
def testTimestamp(self):
self._test("timestamp", timestamp(0), timestamp(12345), timestamp(1000000))
def testChar(self):
self._test("char", char('a'), char('b'), char('c'), char(u'\u20AC'))
def testUUID(self):
self._test("uuid", uuid4(), uuid4(), uuid4())
def testDecimal32(self):
self._test("decimal32", decimal32(0), decimal32(1), decimal32(2), decimal32(3), decimal32(4), decimal32(2**30))
def testDecimal64(self):
self._test("decimal64", decimal64(0), decimal64(1), decimal64(2), decimal64(3), decimal64(4), decimal64(2**60))
def testDecimal128(self):
self._test("decimal128", decimal128(b"fdsaasdf;lkjjkl;"), decimal128(b"x" * 16))
def testCopy(self):
self.data.put_described()
self.data.enter()
self.data.put_ulong(123)
self.data.put_map()
self.data.enter()
self.data.put_string("pi")
self.data.put_double(3.14159265359)
dst = Data()
dst.copy(self.data)
copy = dst.format()
orig = self.data.format()
assert copy == orig, (copy, orig)
def testCopyNested(self):
nested = [1, 2, 3, [4, 5, 6], 7, 8, 9]
self.data.put_object(nested)
dst = Data()
dst.copy(self.data)
assert dst.format() == self.data.format()
def testCopyNestedArray(self):
nested = [Array(UNDESCRIBED, Data.LIST,
["first", [Array(UNDESCRIBED, Data.INT, 1, 2, 3)]],
["second", [Array(UNDESCRIBED, Data.INT, 1, 2, 3)]],
["third", [Array(UNDESCRIBED, Data.INT, 1, 2, 3)]],
),
"end"]
self.data.put_object(nested)
dst = Data()
dst.copy(self.data)
assert dst.format() == self.data.format()
def testRoundTrip(self):
obj = {symbol("key"): timestamp(1234),
ulong(123): "blah",
char("c"): "bleh",
u"desc": Described(symbol("url"), u"http://example.org"),
u"array": Array(UNDESCRIBED, Data.INT, 1, 2, 3),
u"list": [1, 2, 3, None, 4],
u"boolean": True}
self.data.put_object(obj)
enc = self.data.encode()
data = Data()
data.decode(enc)
data.rewind()
assert data.next()
copy = data.get_object()
assert copy == obj, (copy, obj)
def testBuffer(self):
try:
self.data.put_object(buffer(b"foo"))
except NameError:
# python >= 3.0 does not have `buffer`
return
data = Data()
data.decode(self.data.encode())
data.rewind()
assert data.next()
assert data.type() == Data.BINARY
assert data.get_object() == b"foo"
def testMemoryView(self):
self.data.put_object(memoryview(b"foo"))
data = Data()
data.decode(self.data.encode())
data.rewind()
assert data.next()
assert data.type() == Data.BINARY
assert data.get_object() == b"foo"
def testLookup(self):
obj = {symbol("key"): u"value",
symbol("pi"): 3.14159,
symbol("list"): [1, 2, 3, 4]}
self.data.put_object(obj)
self.data.rewind()
self.data.next()
self.data.enter()
self.data.narrow()
assert self.data.lookup("pi")
assert self.data.get_object() == 3.14159
self.data.rewind()
assert self.data.lookup("key")
assert self.data.get_object() == u"value"
self.data.rewind()
assert self.data.lookup("list")
assert self.data.get_object() == [1, 2, 3, 4]
self.data.widen()
self.data.rewind()
assert not self.data.lookup("pi")
| apache-2.0 |
ZazieTheBeast/oscar | oscar/lib/python2.7/site-packages/pip/_vendor/distlib/index.py | 571 | 20976 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
for s in ('gpg2', 'gpg'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protcol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
| bsd-3-clause |
maellak/invenio | modules/websubmit/lib/functions/Send_APP_Mail.py | 24 | 12215 | ## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Send_APP_Mail
## This function send an email informing the original
## submitter of a document that the referee has approved/
## rejected the document. The email is also sent to the
## referee for checking.
## Author: T.Baron
## PARAMETERS:
## newrnin: name of the file containing the 2nd reference
## addressesAPP: email addresses to which the email will
## be sent (additionally to the author)
## categformatAPP: variable needed to derive the addresses
## mentioned above
import os
import re
from invenio.config import CFG_SITE_NAME, \
CFG_SITE_URL, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_CERN_SITE, \
CFG_SITE_RECORD
from invenio.access_control_admin import acc_get_role_users, acc_get_role_id
from invenio.dbquery import run_sql
from invenio.websubmit_config import CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN
from invenio.errorlib import register_exception
from invenio.search_engine import print_record
from invenio.mailutils import scheduled_send_email
from invenio.bibtask import bibtask_allocate_sequenceid
## The field in which to search for the record submitter/owner's email address:
if CFG_CERN_SITE:
## This is a CERN site - we use 859__f for submitter/record owner's email:
CFG_WEBSUBMIT_RECORD_OWNER_EMAIL = "859__f"
else:
## Non-CERN site. Use 8560_f for submitter/record owner's email:
CFG_WEBSUBMIT_RECORD_OWNER_EMAIL = "8560_f"
def Send_APP_Mail (parameters, curdir, form, user_info=None):
"""
This function send an email informing the original submitter of a
document that the referee has approved/ rejected the document. The
email is also sent to the referee for checking.
Parameters:
* addressesAPP: email addresses of the people who will receive
this email (comma separated list). this parameter may contain
the <CATEG> string. In which case the variable computed from
the [categformatAFP] parameter replaces this string.
eg.: "<CATEG>-email@cern.ch"
* categformatAPP contains a regular expression used to compute
the category of the document given the reference of the
document.
eg.: if [categformatAFP]="TEST-<CATEG>-.*" and the reference
of the document is "TEST-CATEGORY1-2001-001", then the computed
category equals "CATEGORY1"
* newrnin: Name of the file containing the 2nd reference of the
approved document (if any).
* edsrn: Name of the file containing the reference of the
approved document.
"""
global titlevalue,authorvalue, emailvalue,sysno,rn
FROMADDR = '%s Submission Engine <%s>' % (CFG_SITE_NAME,CFG_SITE_SUPPORT_EMAIL)
sequence_id = bibtask_allocate_sequenceid(curdir)
doctype = form['doctype']
titlevalue = titlevalue.replace("\n"," ")
authorvalue = authorvalue.replace("\n","; ")
# variables declaration
categformat = parameters['categformatAPP']
otheraddresses = parameters['addressesAPP']
newrnpath = parameters['newrnin']
## Get the name of the decision file:
try:
decision_filename = parameters['decision_file']
except KeyError:
decision_filename = ""
## Get the name of the comments file:
try:
comments_filename = parameters['comments_file']
except KeyError:
comments_filename = ""
## Now try to read the comments from the comments_filename:
if comments_filename in (None, "", "NULL"):
## We don't have a name for the comments file.
## For backward compatibility reasons, try to read the comments from
## a file called 'COM' in curdir:
if os.path.exists("%s/COM" % curdir):
try:
fh_comments = open("%s/COM" % curdir, "r")
comment = fh_comments.read()
fh_comments.close()
except IOError:
## Unable to open the comments file
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open " \
"comments file [%s/COM] but was " \
"unable to." % curdir
register_exception(prefix=exception_prefix)
comment = ""
else:
comment = comment.strip()
else:
comment = ""
else:
## Try to read the comments from the comments file:
if os.path.exists("%s/%s" % (curdir, comments_filename)):
try:
fh_comments = open("%s/%s" % (curdir, comments_filename), "r")
comment = fh_comments.read()
fh_comments.close()
except IOError:
## Oops, unable to open the comments file.
comment = ""
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open comments " \
"file [%s/%s] but was unable to." \
% (curdir, comments_filename)
register_exception(prefix=exception_prefix)
else:
comment = comment.strip()
else:
comment = ""
## Now try to read the decision from the decision_filename:
if decision_filename in (None, "", "NULL"):
## We don't have a name for the decision file.
## For backward compatibility reasons, try to read the decision from
## a file called 'decision' in curdir:
if os.path.exists("%s/decision" % curdir):
try:
fh_decision = open("%s/decision" % curdir, "r")
decision = fh_decision.read()
fh_decision.close()
except IOError:
## Unable to open the decision file
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open " \
"decision file [%s/decision] but was " \
"unable to." % curdir
register_exception(prefix=exception_prefix)
decision = ""
else:
decision = decision.strip()
else:
decision = ""
else:
## Try to read the decision from the decision file:
try:
fh_decision = open("%s/%s" % (curdir, decision_filename), "r")
decision = fh_decision.read()
fh_decision.close()
except IOError:
## Oops, unable to open the decision file.
decision = ""
exception_prefix = "Error in WebSubmit function " \
"Send_APP_Mail. Tried to open decision " \
"file [%s/%s] but was unable to." \
% (curdir, decision_filename)
register_exception(prefix=exception_prefix)
else:
decision = decision.strip()
if os.path.exists("%s/%s" % (curdir,newrnpath)):
fp = open("%s/%s" % (curdir,newrnpath) , "r")
newrn = fp.read()
fp.close()
else:
newrn = ""
# Document name
res = run_sql("SELECT ldocname FROM sbmDOCTYPE WHERE sdocname=%s", (doctype,))
docname = res[0][0]
# retrieve category
categformat = categformat.replace("<CATEG>", "([^-]*)")
m_categ_search = re.match(categformat, rn)
if m_categ_search is not None:
if len(m_categ_search.groups()) > 0:
## Found a match for the category of this document. Get it:
category = m_categ_search.group(1)
else:
## This document has no category.
category = "unknown"
else:
category = "unknown"
## Get the referee email address:
if CFG_CERN_SITE:
## The referees system in CERN now works with listbox membership.
## List names should take the format
## "service-cds-referee-doctype-category@cern.ch"
## Make sure that your list exists!
## FIXME - to be replaced by a mailing alias in webaccess in the
## future.
referee_listname = "service-cds-referee-%s" % doctype.lower()
if category != "":
referee_listname += "-%s" % category.lower()
referee_listname += "@cern.ch"
addresses = referee_listname
else:
# Build referee's email address
refereeaddress = ""
# Try to retrieve the referee's email from the referee's database
for user in acc_get_role_users(acc_get_role_id("referee_%s_%s" % (doctype,category))):
refereeaddress += user[1] + ","
# And if there is a general referee
for user in acc_get_role_users(acc_get_role_id("referee_%s_*" % doctype)):
refereeaddress += user[1] + ","
refereeaddress = re.sub(",$","",refereeaddress)
# Creation of the mail for the referee
otheraddresses = otheraddresses.replace("<CATEG>",category)
addresses = ""
if refereeaddress != "":
addresses = refereeaddress + ","
if otheraddresses != "":
addresses += otheraddresses
else:
addresses = re.sub(",$","",addresses)
## Add the record's submitter(s) into the list of recipients:
## Get the email address(es) of the record submitter(s)/owner(s) from
## the record itself:
record_owners = print_record(sysno, 'tm', \
[CFG_WEBSUBMIT_RECORD_OWNER_EMAIL]).strip()
if record_owners != "":
record_owners_list = record_owners.split("\n")
record_owners_list = [email.lower().strip() \
for email in record_owners_list]
else:
#if the record owner can not be retrieved from the metadata
#(in case the record has not been inserted yet),
#try to use the global variable emailvalue
try:
record_owners_list = [emailvalue]
except NameError:
record_owners_list = []
record_owners = ",".join([owner for owner in record_owners_list])
if record_owners != "":
addresses += ",%s" % record_owners
if decision == "approve":
mailtitle = "%s has been approved" % rn
mailbody = "The %s %s has been approved." % (docname,rn)
mailbody += "\nIt will soon be accessible here:\n\n<%s/%s/%s>" % (CFG_SITE_URL,CFG_SITE_RECORD,sysno)
else:
mailtitle = "%s has been rejected" % rn
mailbody = "The %s %s has been rejected." % (docname,rn)
if rn != newrn and decision == "approve" and newrn != "":
mailbody += "\n\nIts new reference number is: %s" % newrn
mailbody += "\n\nTitle: %s\n\nAuthor(s): %s\n\n" % (titlevalue,authorvalue)
if comment != "":
mailbody += "Comments from the referee:\n%s\n" % comment
# Send mail to referee if any recipients or copy to admin
if addresses or CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN:
scheduled_send_email(FROMADDR, addresses, mailtitle, mailbody,
copy_to_admin=CFG_WEBSUBMIT_COPY_MAILS_TO_ADMIN,
other_bibtasklet_arguments=['-I', str(sequence_id)])
return ""
| gpl-2.0 |
piffey/ansible | lib/ansible/utils/module_docs_fragments/shell_common.py | 21 | 1782 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
remote_tmp:
description:
- Temporary directory to use on targets when executing tasks.
default: '~/.ansible/tmp'
env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
ini:
- section: defaults
key: remote_tmp
vars:
- name: ansible_remote_tmp
system_tmpdirs:
description:
- "List of valid system temporary directories for Ansible to choose when it cannot use
``remote_tmp``, normally due to permission issues. These must be world readable, writable,
and executable."
default: [ /var/tmp, /tmp ]
type: list
env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
ini:
- section: defaults
key: system_tmpdirs
vars:
- name: ansible_system_tmpdirs
async_dir:
description:
- Directory in which ansible will keep async job inforamtion
default: '~/.ansible_async'
env: [{name: ANSIBLE_ASYNC_DIR}]
ini:
- section: defaults
key: async_dir
vars:
- name: ansible_async_dir
environment:
type: dict
default: {}
description:
- dictionary of environment variables and their values to use when executing commands.
admin_users:
type: list
default: ['root', 'toor', 'admin']
description:
- list of users to be expected to have admin privileges, for BSD you might want to add 'toor' for windows 'Administrator'.
env:
- name: ANSIBLE_ADMIN_USERS
ini:
- section: defaults
key: admin_users
vars:
- name: ansible_admin_users
"""
| gpl-3.0 |
Pal3love/otRebuilder | Package/otRebuilder/Dep/fontTools/ttLib/tables/C_O_L_R_.py | 3 | 5304 | # Copyright 2013 Google, Inc. All Rights Reserved.
#
# Google Author(s): Behdad Esfahbod
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.misc.textTools import safeEval
from . import DefaultTable
import operator
import struct
class table_C_O_L_R_(DefaultTable.DefaultTable):
""" This table is structured so that you can treat it like a dictionary keyed by glyph name.
ttFont['COLR'][<glyphName>] will return the color layers for any glyph
ttFont['COLR'][<glyphName>] = <value> will set the color layers for any glyph.
"""
def decompile(self, data, ttFont):
self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14])
assert (self.version == 0), "Version of COLR table is higher than I know how to handle"
glyphOrder = ttFont.getGlyphOrder()
gids = []
layerLists = []
glyphPos = offsetBaseGlyphRecord
for i in range(numBaseGlyphRecords):
gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6])
glyphPos += 6
gids.append(gid)
assert (firstLayerIndex + numLayers <= numLayerRecords)
layerPos = offsetLayerRecord + firstLayerIndex * 4
layers = []
for j in range(numLayers):
layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4])
try:
layerName = glyphOrder[layerGid]
except IndexError:
layerName = self.getGlyphName(layerGid)
layerPos += 4
layers.append(LayerRecord(layerName, colorID))
layerLists.append(layers)
self.ColorLayers = colorLayerLists = {}
try:
names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids))
except IndexError:
getGlyphName = self.getGlyphName
names = list(map(getGlyphName, gids ))
list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists))
def compile(self, ttFont):
ordered = []
ttFont.getReverseGlyphMap(rebuild=True)
glyphNames = self.ColorLayers.keys()
for glyphName in glyphNames:
try:
gid = ttFont.getGlyphID(glyphName)
except:
assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
ordered.append([gid, glyphName, self.ColorLayers[glyphName]])
ordered.sort()
glyphMap = []
layerMap = []
for (gid, glyphName, layers) in ordered:
glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers)))
for layer in layers:
layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID))
dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))]
dataList.extend(glyphMap)
dataList.extend(layerMap)
data = bytesjoin(dataList)
return data
def toXML(self, writer, ttFont):
writer.simpletag("version", value=self.version)
writer.newline()
ordered = []
glyphNames = self.ColorLayers.keys()
for glyphName in glyphNames:
try:
gid = ttFont.getGlyphID(glyphName)
except:
assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
ordered.append([gid, glyphName, self.ColorLayers[glyphName]])
ordered.sort()
for entry in ordered:
writer.begintag("ColorGlyph", name=entry[1])
writer.newline()
for layer in entry[2]:
layer.toXML(writer, ttFont)
writer.endtag("ColorGlyph")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if not hasattr(self, "ColorLayers"):
self.ColorLayers = {}
self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
if name == "ColorGlyph":
glyphName = attrs["name"]
for element in content:
if isinstance(element, basestring):
continue
layers = []
for element in content:
if isinstance(element, basestring):
continue
layer = LayerRecord()
layer.fromXML(element[0], element[1], element[2], ttFont)
layers.append (layer)
operator.setitem(self, glyphName, layers)
elif "value" in attrs:
setattr(self, name, safeEval(attrs["value"]))
def __getitem__(self, glyphSelector):
if isinstance(glyphSelector, int):
# its a gid, convert to glyph name
glyphSelector = self.getGlyphName(glyphSelector)
if glyphSelector not in self.ColorLayers:
return None
return self.ColorLayers[glyphSelector]
def __setitem__(self, glyphSelector, value):
if isinstance(glyphSelector, int):
# its a gid, convert to glyph name
glyphSelector = self.getGlyphName(glyphSelector)
if value:
self.ColorLayers[glyphSelector] = value
elif glyphSelector in self.ColorLayers:
del self.ColorLayers[glyphSelector]
def __delitem__(self, glyphSelector):
del self.ColorLayers[glyphSelector]
class LayerRecord(object):
def __init__(self, name=None, colorID=None):
self.name = name
self.colorID = colorID
def toXML(self, writer, ttFont):
writer.simpletag("layer", name=self.name, colorID=self.colorID)
writer.newline()
def fromXML(self, eltname, attrs, content, ttFont):
for (name, value) in attrs.items():
if name == "name":
if isinstance(value, int):
value = ttFont.getGlyphName(value)
setattr(self, name, value)
else:
setattr(self, name, safeEval(value))
| mit |
hendradarwin/VTK | IO/Image/Testing/Python/dem.py | 20 | 3326 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
#
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
Scale = 5
lut = vtk.vtkLookupTable()
lut.SetHueRange(0.6, 0)
lut.SetSaturationRange(1.0, 0)
lut.SetValueRange(0.5, 1.0)
demModel = vtk.vtkDEMReader()
demModel.SetFileName(VTK_DATA_ROOT + "/Data/SainteHelens.dem")
demModel.Update()
lo = Scale * demModel.GetElevationBounds()[0]
hi = Scale * demModel.GetElevationBounds()[1]
demActor = vtk.vtkLODActor()
# create a pipeline for each lod mapper
lods = ["4", "8", "16"]
for lod in lods:
exec("shrink" + lod + " = vtk.vtkImageShrink3D()")
eval("shrink" + lod).SetShrinkFactors(int(lod), int(lod), 1)
eval("shrink" + lod).SetInputConnection(demModel.GetOutputPort())
eval("shrink" + lod).AveragingOn()
exec("geom" + lod + " = vtk.vtkImageDataGeometryFilter()")
eval("geom" + lod).SetInputConnection(eval("shrink" + lod).GetOutputPort())
eval("geom" + lod).ReleaseDataFlagOn()
exec("warp" + lod + " = vtk.vtkWarpScalar()")
eval("warp" + lod).SetInputConnection(eval("geom" + lod).GetOutputPort())
eval("warp" + lod).SetNormal(0, 0, 1)
eval("warp" + lod).UseNormalOn()
eval("warp" + lod).SetScaleFactor(Scale)
eval("warp" + lod).ReleaseDataFlagOn()
exec("elevation" + lod + " = vtk.vtkElevationFilter()")
eval("elevation" + lod).SetInputConnection(
eval("warp" + lod).GetOutputPort())
eval("elevation" + lod).SetLowPoint(0, 0, lo)
eval("elevation" + lod).SetHighPoint(0, 0, hi)
eval("elevation" + lod).SetScalarRange(lo, hi)
eval("elevation" + lod).ReleaseDataFlagOn()
exec("toPoly" + lod + " = vtk.vtkCastToConcrete()")
eval("toPoly" + lod).SetInputConnection(
eval("elevation" + lod).GetOutputPort())
exec("normals" + lod + " = vtk.vtkPolyDataNormals()")
eval("normals" + lod).SetInputConnection(
eval("toPoly" + lod).GetOutputPort())
eval("normals" + lod).SetFeatureAngle(60)
eval("normals" + lod).ConsistencyOff()
eval("normals" + lod).SplittingOff()
eval("normals" + lod).ReleaseDataFlagOn()
exec("demMapper" + lod + " = vtk.vtkPolyDataMapper()")
eval("demMapper" + lod).SetInputConnection(
eval("normals" + lod).GetOutputPort())
eval("demMapper" + lod).SetScalarRange(lo, hi)
eval("demMapper" + lod).SetLookupTable(lut)
eval("demMapper" + lod).ImmediateModeRenderingOn()
eval("demMapper" + lod).Update()
demActor.AddLODMapper(eval("demMapper" + lod))
# Add the actors to the renderer, set the background and size
#
ren1.AddActor(demActor)
ren1.SetBackground(.4, .4, .4)
iren.SetDesiredUpdateRate(1)
def TkCheckAbort (object_binding, event_name):
foo = renWin.GetEventPending()
if (foo != 0):
renWin.SetAbortRender(1)
renWin.AddObserver("AbortCheckEvent", TkCheckAbort)
ren1.GetActiveCamera().SetViewUp(0, 0, 1)
ren1.GetActiveCamera().SetPosition(-99900, -21354, 131801)
ren1.GetActiveCamera().SetFocalPoint(41461, 41461, 2815)
ren1.ResetCamera()
ren1.GetActiveCamera().Dolly(1.2)
ren1.ResetCameraClippingRange()
renWin.Render()
#iren.Start()
| bsd-3-clause |
lehins/python-surveymonkey | surveymonkey/calls/batch.py | 1 | 1109 | from surveymonkey.calls.base import Call
class Batch(Call):
def __create_flow(self, survey, collector, email_message, **kwargs):
params = {
'survey': survey,
'collector': collector,
'email_message': email_message
}
assert collector.get('type') == 'email', \
"Only supported collector type for this call is 'email'"
return self.make_call(self.__create_flow, params, kwargs)
__create_flow.allowed_params = [
'survey', 'collector', 'email_message'
]
create_flow = __create_flow
def __send_flow(self, survey_id, collector, email_message, **kwargs):
params = {
'survey_id': survey_id,
'collector': collector,
'email_message': email_message
}
assert collector.get('type') == 'email', \
"Only supported collector type for this call is 'email'"
return self.make_call(self.__send_flow, params, kwargs)
__send_flow.allowed_params = [
'survey_id', 'collector', 'email_message'
]
send_flow = __send_flow
| mit |
DanielNeugebauer/adhocracy | src/adhocracy/migration/versions/046_fix_text_parent_child_column_naming.py | 6 | 2255 | from datetime import datetime
from sqlalchemy import Column, ForeignKey, MetaData, Table
from sqlalchemy import Boolean, DateTime, Integer, Unicode, UnicodeText
metadata = MetaData()
user_table = Table('user', metadata,
Column('id', Integer, primary_key=True),
Column('user_name', Unicode(255), nullable=False, unique=True, index=True),
Column('display_name', Unicode(255), nullable=True, index=True),
Column('bio', UnicodeText(), nullable=True),
Column('email', Unicode(255), nullable=True, unique=False),
Column('email_priority', Integer, default=3),
Column('activation_code', Unicode(255), nullable=True, unique=False),
Column('reset_code', Unicode(255), nullable=True, unique=False),
Column('password', Unicode(80), nullable=False),
Column('locale', Unicode(7), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime),
Column('banned', Boolean, default=False),
Column('no_help', Boolean, default=False, nullable=True),
Column('page_size', Integer, default=10, nullable=True),
Column('proposal_sort_order', Unicode(50), default=None, nullable=True)
)
page_table = Table('page', metadata,
Column('id', Integer, ForeignKey('delegateable.id'), primary_key=True),
Column('function', Unicode(20))
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
text_table = Table('text', metadata,
Column('id', Integer, primary_key=True),
Column('page_id', Integer, ForeignKey('page.id'), nullable=False),
Column('user_id', Integer, ForeignKey('user.id'), nullable=False),
Column('parent_id', Integer, ForeignKey('text.id'), nullable=True),
Column('variant', Unicode(255), nullable=True),
Column('title', Unicode(255), nullable=True),
Column('text', UnicodeText(), nullable=True),
Column('wiki', Boolean, default=False),
Column('create_time', DateTime, default=datetime.utcnow),
Column('delete_time', DateTime)
)
text_table.c.parent_id.alter(name='child_id')
def downgrade(migrate_engine):
raise NotImplementedError()
| agpl-3.0 |
Nazanin84/python-buildpack | vendor/pip-pop/docopt.py | 283 | 19784 | """Pythonic command-line interface parser that will make you smile.
* http://docopt.org
* Repository and issue-tracker: https://github.com/docopt/docopt
* Licensed under terms of MIT license (see LICENSE-MIT)
* Copyright (c) 2013 Vladimir Keleshev, vladimir@keleshev.com
"""
import sys
import re
__all__ = ['docopt']
__version__ = '0.6.1'
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, child in enumerate(self.children):
if not hasattr(child, 'children'):
assert child in uniq
self.children[i] = uniq[uniq.index(child)]
else:
child.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(child.children) for child in transform(self).children]
for case in either:
for e in [child for child in case if case.count(child) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
def transform(pattern):
"""Expand pattern into an (almost) equivalent one, but with single Either.
Example: ((-a | -b) (-c | -d)) => (-a -c | -a -d | -b -c | -b -d)
Quirks: [-a] => (-a), (-a...) => (-a -a)
"""
result = []
groups = [[pattern]]
while groups:
children = groups.pop(0)
parents = [Required, Optional, OptionsShortcut, Either, OneOrMore]
if any(t in map(type, children) for t in parents):
child = [c for c in children if type(c) in parents][0]
children.remove(child)
if type(child) is Either:
for c in child.children:
groups.append([c] + children)
elif type(child) is OneOrMore:
groups.append(child.children * 2 + children)
else:
groups.append(child.children + children)
else:
result.append(children)
return Either(*[Required(*e) for e in result])
class LeafPattern(Pattern):
"""Leaf/terminal node of a pattern tree."""
def __init__(self, name, value=None):
self.name, self.value = name, value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
class BranchPattern(Pattern):
"""Branch/inner node of a pattern tree."""
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([child.flat(*types) for child in self.children], [])
class Argument(LeafPattern):
def single_match(self, left):
for n, pattern in enumerate(left):
if type(pattern) is Argument:
return n, Argument(self.name, pattern.value)
return None, None
@classmethod
def parse(class_, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return class_(name, value[0] if value else None)
class Command(Argument):
def __init__(self, name, value=False):
self.name, self.value = name, value
def single_match(self, left):
for n, pattern in enumerate(left):
if type(pattern) is Argument:
if pattern.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
class Option(LeafPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long, self.argcount = short, long, argcount
self.value = None if value is False and argcount else value
@classmethod
def parse(class_, option_description):
short, long, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return class_(short, long, argcount, value)
def single_match(self, left):
for n, pattern in enumerate(left):
if self.name == pattern.name:
return n, pattern
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
class Required(BranchPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for pattern in self.children:
matched, l, c = pattern.match(l, c)
if not matched:
return False, left, collected
return True, l, c
class Optional(BranchPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for pattern in self.children:
m, left, collected = pattern.match(left, collected)
return True, left, collected
class OptionsShortcut(Optional):
"""Marker/placeholder for [options] shortcut."""
class OneOrMore(BranchPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
class Either(BranchPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for pattern in self.children:
matched, _, _ = outcome = pattern.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
class Tokens(list):
def __init__(self, source, error=DocoptExit):
self += source.split() if hasattr(source, 'split') else source
self.error = error
@staticmethod
def from_pattern(source):
source = re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source)
source = [s for s in re.split('\s+|(\S*<.*?>)', source) if s]
return Tokens(source, error=DocoptLanguageError)
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
def parse_long(tokens, options):
"""long ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
long, eq, value = tokens.move().partition('=')
assert long.startswith('--')
value = None if eq == value == '' else value
similar = [o for o in options if o.long == long]
if tokens.error is DocoptExit and similar == []: # if no exact match
similar = [o for o in options if o.long and o.long.startswith(long)]
if len(similar) > 1: # might be simply specified ambiguously 2+ times?
raise tokens.error('%s is not a unique prefix: %s?' %
(long, ', '.join(o.long for o in similar)))
elif len(similar) < 1:
argcount = 1 if eq == '=' else 0
o = Option(None, long, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = Option(None, long, argcount, value if argcount else True)
else:
o = Option(similar[0].short, similar[0].long,
similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error('%s must not have an argument' % o.long)
else:
if value is None:
if tokens.current() in [None, '--']:
raise tokens.error('%s requires argument' % o.long)
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def parse_shorts(tokens, options):
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
assert token.startswith('-') and not token.startswith('--')
left = token.lstrip('-')
parsed = []
while left != '':
short, left = '-' + left[0], left[1:]
similar = [o for o in options if o.short == short]
if len(similar) > 1:
raise tokens.error('%s is specified ambiguously %d times' %
(short, len(similar)))
elif len(similar) < 1:
o = Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = Option(short, None, 0, True)
else: # why copying is necessary here?
o = Option(short, similar[0].long,
similar[0].argcount, similar[0].value)
value = None
if o.argcount != 0:
if left == '':
if tokens.current() in [None, '--']:
raise tokens.error('%s requires argument' % short)
value = tokens.move()
else:
value = left
left = ''
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def parse_pattern(source, options):
tokens = Tokens.from_pattern(source)
result = parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error('unexpected ending: %r' % ' '.join(tokens))
return Required(*result)
def parse_expr(tokens, options):
"""expr ::= seq ( '|' seq )* ;"""
seq = parse_seq(tokens, options)
if tokens.current() != '|':
return seq
result = [Required(*seq)] if len(seq) > 1 else seq
while tokens.current() == '|':
tokens.move()
seq = parse_seq(tokens, options)
result += [Required(*seq)] if len(seq) > 1 else seq
return [Either(*result)] if len(result) > 1 else result
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result
def parse_atom(tokens, options):
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| long | shorts | argument | command ;
"""
token = tokens.current()
result = []
if token in '([':
tokens.move()
matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token]
result = pattern(*parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error("unmatched '%s'" % token)
return [result]
elif token == 'options':
tokens.move()
return [OptionsShortcut()]
elif token.startswith('--') and token != '--':
return parse_long(tokens, options)
elif token.startswith('-') and token not in ('-', '--'):
return parse_shorts(tokens, options)
elif token.startswith('<') and token.endswith('>') or token.isupper():
return [Argument(tokens.move())]
else:
return [Command(tokens.move())]
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed
def parse_defaults(doc):
defaults = []
for s in parse_section('options:', doc):
# FIXME corner case "bla: options: --foo"
_, _, s = s.partition(':') # get rid of "options:"
split = re.split('\n[ \t]*(-\S+?)', '\n' + s)[1:]
split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])]
options = [Option.parse(s) for s in split if s.startswith('-')]
defaults += options
return defaults
def parse_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
def formal_usage(section):
_, _, section = section.partition(':') # drop "usage:"
pu = section.split()
return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )'
def extras(help, version, options, doc):
if help and any((o.name in ('-h', '--help')) and o.value for o in options):
print(doc.strip("\n"))
sys.exit()
if version and any(o.name == '--version' and o.value for o in options):
print(version)
sys.exit()
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options precede positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
... Usage:
... my_program tcp <host> <port> [--timeout=<seconds>]
... my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
... my_program (-h | --help | --version)
...
... Options:
... -h, --help Show this screen and exit.
... --baud=<n> Baudrate [default: 9600]
... '''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
argv = sys.argv[1:] if argv is None else argv
usage_sections = parse_section('usage:', doc)
if len(usage_sections) == 0:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_sections) > 1:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
DocoptExit.usage = usage_sections[0]
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(Tokens(argv), list(options), options_first)
pattern_options = set(pattern.flat(Option))
for options_shortcut in pattern.flat(OptionsShortcut):
doc_options = parse_defaults(doc)
options_shortcut.children = list(set(doc_options) - pattern_options)
#if any_options:
# options_shortcut.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit()
| mit |
cbertinato/pandas | pandas/core/indexes/datetimelike.py | 1 | 26175 | """
Base and utility classes for tseries type pandas objects.
"""
import operator
from typing import Set
import warnings
import numpy as np
from pandas._libs import NaT, iNaT, lib
from pandas._libs.algos import unique_deltas
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_int64, is_dtype_equal, is_float, is_integer, is_list_like,
is_period_dtype, is_scalar)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core import algorithms, ops
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import ExtensionOpsMixin
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin, _ensure_datetimelike_to_i8)
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.tools.timedeltas import to_timedelta
import pandas.io.formats.printing as printing
from pandas.tseries.frequencies import to_offset
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
"""
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method
class DatetimeIndexOpsMixin(ExtensionOpsMixin):
"""
common ops mixin to support a unified interface datetimelike Index
"""
_data = None
# DatetimeLikeArrayMixin assumes subclasses are mutable, so these are
# properties there. They can be made into cache_readonly for Index
# subclasses bc they are immutable
inferred_freq = cache_readonly(
DatetimeLikeArrayMixin.inferred_freq.fget) # type: ignore
_isnan = cache_readonly(DatetimeLikeArrayMixin._isnan.fget) # type: ignore
hasnans = cache_readonly(
DatetimeLikeArrayMixin._hasnans.fget) # type: ignore
_hasnans = hasnans # for index / array -agnostic code
_resolution = cache_readonly(
DatetimeLikeArrayMixin._resolution.fget) # type: ignore
resolution = cache_readonly(
DatetimeLikeArrayMixin.resolution.fget) # type: ignore
_maybe_mask_results = ea_passthrough(
DatetimeLikeArrayMixin._maybe_mask_results)
__iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__)
mean = ea_passthrough(DatetimeLikeArrayMixin.mean)
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._data.freq
@freq.setter
def freq(self, value):
# validation is handled by _data setter
self._data.freq = value
@property
def freqstr(self):
"""
Return the frequency object as a string if it is set, otherwise None.
"""
return self._data.freqstr
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self._data.unique()
# Note: if `self` is already unique, then self.unique() should share
# a `freq` with self. If not already unique, then self.freq must be
# None, so again sharing freq is correct.
return self._shallow_copy(result._data)
@classmethod
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = '__{}__'.format(op.__name__)
return wrapper
@property
def _ndarray_values(self):
return self._data._ndarray_values
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self):
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
@property # type: ignore # https://github.com/python/mypy/issues/1362
@Appender(DatetimeLikeArrayMixin.asi8.__doc__)
def asi8(self):
return self._data.asi8
# ------------------------------------------------------------------------
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except Exception:
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
elif is_period_dtype(self):
if not is_period_dtype(other):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
"""
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries,
DatetimeLikeArrayMixin)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _ensure_localized(self, arg, ambiguous='raise', nonexistent='raise',
from_utc=False):
# See DatetimeLikeArrayMixin._ensure_localized.__doc__
if getattr(self, 'tz', None):
# ensure_localized is only relevant for tz-aware DTI
result = self._data._ensure_localized(arg,
ambiguous=ambiguous,
nonexistent=nonexistent,
from_utc=from_utc)
return type(self)._simple_new(result, name=self.name)
return arg
def _box_values(self, values):
return self._data._box_values(values)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return (is_scalar(res) or isinstance(res, slice) or
(is_list_like(res) and len(res)))
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, mapper, na_action=None):
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.astype(object).map(mapper)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index.
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
# keep freq in PeriodArray/Index, reset otherwise
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(taken, freq=freq)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
@property
def asobject(self):
"""
Return object Index which contains boxed values.
.. deprecated:: 0.23.0
Use ``astype(object)`` instead.
*this is an internal non-public method*
"""
warnings.warn("'asobject' is deprecated. Use 'astype(object)'"
" instead", FutureWarning, stacklevel=2)
return self.astype(object)
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
return tolerance
def tolist(self):
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def _format_with_header(self, header, na_rep='NaT', **kwargs):
return header + list(self._format_native_types(na_rep, **kwargs))
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
# --------------------------------------------------------------------
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return super()._convert_scalar_indexer(key, kind=kind)
@classmethod
def _add_datetimelike_methods(cls):
"""
Add in the datetimelike methods (as we may have to override the
superclass).
"""
def __add__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__add__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__add__ = __add__
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
cls.__radd__ = __radd__
def __sub__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__sub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__sub__ = __sub__
def __rsub__(self, other):
result = self._data.__rsub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__rsub__ = __rsub__
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if len(self) == 0:
return self.copy()
if len(other) == 0:
return other.copy()
if not isinstance(other, type(self)):
result = Index.intersection(self, other, sort=sort)
if isinstance(result, type(self)):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
elif (other.freq is None or self.freq is None or
other.freq != self.freq or
not other.freq.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other, sort=sort)
# Invalidate the freq of `result`, which may not be correct at
# this point, depending on the values.
result.freq = None
if hasattr(self, 'tz'):
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
else:
result = self._shallow_copy(result._values, name=result.name,
freq=None)
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
@Appender(_index_shared_docs['repeat'] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(self.asi8.repeat(repeats), freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other, to_utc=True)
values = _ensure_datetimelike_to_i8(self, to_utc=True)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result, from_utc=True)
return self._shallow_copy(result)
def _summary(self, name=None):
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError('to_concat must have the same tz')
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs['freq'] = None
return self._simple_new(new_data, **attribs)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype) and copy is False:
# Ensure that self.astype(self.dtype) is self
return self
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values,
dtype=new_values.dtype, name=self.name, copy=False)
@deprecate_kwarg(old_arg_name='n', new_arg_name='periods')
def shift(self, periods, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
def wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (wrap_arithmetic_op(self, other, result[0]),
wrap_arithmetic_op(self, other, result[1]))
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
def maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, ABCIndexClass):
return obj._data
return obj
class DatetimelikeDelegateMixin(PandasDelegate):
"""
Delegation mechanism, specific for Datetime, Timedelta, and Period types.
Functionality is delegated from the Index class to an Array class. A
few things can be customized
* _delegate_class : type
The class being delegated to.
* _delegated_methods, delegated_properties : List
The list of property / method names being delagated.
* raw_methods : Set
The set of methods whose results should should *not* be
boxed in an index, after being returned from the array
* raw_properties : Set
The set of properties whose results should should *not* be
boxed in an index, after being returned from the array
"""
# raw_methods : dispatch methods that shouldn't be boxed in an Index
_raw_methods = set() # type: Set[str]
# raw_properties : dispatch properties that shouldn't be boxed in an Index
_raw_properties = set() # type: Set[str]
name = None
_data = None
@property
def _delegate_class(self):
raise AbstractMethodError
def _delegate_property_get(self, name, *args, **kwargs):
result = getattr(self._data, name)
if name not in self._raw_properties:
result = Index(result, name=self.name)
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
setattr(self._data, name, value)
def _delegate_method(self, name, *args, **kwargs):
result = operator.methodcaller(name, *args, **kwargs)(self._data)
if name not in self._raw_methods:
result = Index(result, name=self.name)
return result
| bsd-3-clause |
tszym/ansible | lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py | 8 | 21465 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_firewall_policy
short_description: Create/delete/update firewall policies
description:
- Create or delete or update firewall polices on Centurylink Cloud
version_added: "2.0"
options:
location:
description:
- Target datacenter for the firewall policy
required: True
state:
description:
- Whether to create or delete the firewall policy
default: present
required: False
choices: ['present', 'absent']
source:
description:
- The list of source addresses for traffic on the originating firewall.
This is required when state is 'present"
default: None
required: False
destination:
description:
- The list of destination addresses for traffic on the terminating firewall.
This is required when state is 'present'
default: None
required: False
ports:
description:
- The list of ports associated with the policy.
TCP and UDP can take in single ports or port ranges.
default: None
required: False
choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
firewall_policy_id:
description:
- Id of the firewall policy. This is required to update or delete an existing firewall policy
default: None
required: False
source_account_alias:
description:
- CLC alias for the source account
required: True
destination_account_alias:
description:
- CLC alias for the destination account
default: None
required: False
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
default: True
required: False
choices: [True, False]
enabled:
description:
- Whether the firewall policy is enabled or disabled
default: True
required: False
choices: [True, False]
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
---
- name: Create Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create / Verify an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: present
source: 10.128.216.0/24
destination: 10.128.216.0/24
ports: Any
destination_account_alias: WFAD
---
- name: Delete Firewall Policy
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Delete an Firewall Policy at CenturyLink Cloud
clc_firewall:
source_account_alias: WFAD
location: VA1
state: absent
firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
'''
RETURN = '''
firewall_policy_id:
description: The fire wall policy id
returned: success
type: string
sample: fc36f1bfd47242e488a9c44346438c05
firewall_policy:
description: The fire wall policy information
returned: success
type: dict
sample:
{
"destination":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"destinationAccount":"wfad",
"enabled":true,
"id":"fc36f1bfd47242e488a9c44346438c05",
"links":[
{
"href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
"rel":"self",
"verbs":[
"GET",
"PUT",
"DELETE"
]
}
],
"ports":[
"any"
],
"source":[
"10.1.1.0/24",
"10.2.2.0/24"
],
"status":"active"
}
'''
__version__ = '${version}'
import os
import urlparse
from time import sleep
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
try:
import clc as clc_sdk
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcFirewallPolicy:
clc = None
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
self.firewall_dict = {}
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
location=dict(required=True),
source_account_alias=dict(required=True, default=None),
destination_account_alias=dict(default=None),
firewall_policy_id=dict(default=None),
ports=dict(default=None, type='list'),
source=dict(default=None, type='list'),
destination=dict(default=None, type='list'),
wait=dict(default=True),
state=dict(default='present', choices=['present', 'absent']),
enabled=dict(default=True, choices=[True, False])
)
return argument_spec
def process_request(self):
"""
Execute the main code path, and handle the request
:return: none
"""
changed = False
firewall_policy = None
location = self.module.params.get('location')
source_account_alias = self.module.params.get('source_account_alias')
destination_account_alias = self.module.params.get(
'destination_account_alias')
firewall_policy_id = self.module.params.get('firewall_policy_id')
ports = self.module.params.get('ports')
source = self.module.params.get('source')
destination = self.module.params.get('destination')
wait = self.module.params.get('wait')
state = self.module.params.get('state')
enabled = self.module.params.get('enabled')
self.firewall_dict = {
'location': location,
'source_account_alias': source_account_alias,
'destination_account_alias': destination_account_alias,
'firewall_policy_id': firewall_policy_id,
'ports': ports,
'source': source,
'destination': destination,
'wait': wait,
'state': state,
'enabled': enabled}
self._set_clc_credentials_from_env()
if state == 'absent':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
source_account_alias, location, self.firewall_dict)
elif state == 'present':
changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
source_account_alias, location, self.firewall_dict)
return self.module.exit_json(
changed=changed,
firewall_policy_id=firewall_policy_id,
firewall_policy=firewall_policy)
@staticmethod
def _get_policy_id_from_response(response):
"""
Method to parse out the policy id from creation response
:param response: response from firewall creation API call
:return: policy_id: firewall policy id from creation call
"""
url = response.get('links')[0]['href']
path = urlparse.urlparse(url).path
path_list = os.path.split(path)
policy_id = path_list[-1]
return policy_id
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _ensure_firewall_policy_is_present(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: (changed, firewall_policy_id, firewall_policy)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was created/updated
firewall_policy: The firewall_policy object
"""
firewall_policy = None
firewall_policy_id = firewall_dict.get('firewall_policy_id')
if firewall_policy_id is None:
if not self.module.check_mode:
response = self._create_firewall_policy(
source_account_alias,
location,
firewall_dict)
firewall_policy_id = self._get_policy_id_from_response(
response)
changed = True
else:
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if not firewall_policy:
return self.module.fail_json(
msg='Unable to find the firewall policy id : {0}'.format(
firewall_policy_id))
changed = self._compare_get_request_with_dict(
firewall_policy,
firewall_dict)
if not self.module.check_mode and changed:
self._update_firewall_policy(
source_account_alias,
location,
firewall_policy_id,
firewall_dict)
if changed and firewall_policy_id:
firewall_policy = self._wait_for_requests_to_complete(
source_account_alias,
location,
firewall_policy_id)
return changed, firewall_policy_id, firewall_policy
def _ensure_firewall_policy_is_absent(
self,
source_account_alias,
location,
firewall_dict):
"""
Ensures that a given firewall policy is removed if present
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: firewall policy to delete
:return: (changed, firewall_policy_id, response)
changed: flag for if a change occurred
firewall_policy_id: the firewall policy id that was deleted
response: response from CLC API call
"""
changed = False
response = []
firewall_policy_id = firewall_dict.get('firewall_policy_id')
result = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
if result:
if not self.module.check_mode:
response = self._delete_firewall_policy(
source_account_alias,
location,
firewall_policy_id)
changed = True
return changed, firewall_policy_id, response
def _create_firewall_policy(
self,
source_account_alias,
location,
firewall_dict):
"""
Creates the firewall policy for the given account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response from CLC API call
"""
payload = {
'destinationAccount': firewall_dict.get('destination_account_alias'),
'source': firewall_dict.get('source'),
'destination': firewall_dict.get('destination'),
'ports': firewall_dict.get('ports')}
try:
response = self.clc.v2.API.Call(
'POST', '/v2-experimental/firewallPolicies/%s/%s' %
(source_account_alias, location), payload)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to create firewall policy. %s" %
str(e.response_text))
return response
def _delete_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Deletes a given firewall policy for an account alias in a datacenter
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to delete
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to delete the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _update_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id,
firewall_dict):
"""
Updates a firewall policy for a given datacenter and account alias
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: firewall policy id to update
:param firewall_dict: dictionary of request parameters for firewall policy
:return: response: response from CLC API call
"""
try:
response = self.clc.v2.API.Call(
'PUT',
'/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias,
location,
firewall_policy_id),
firewall_dict)
except APIFailedResponse as e:
return self.module.fail_json(
msg="Unable to update the firewall policy id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
@staticmethod
def _compare_get_request_with_dict(response, firewall_dict):
"""
Helper method to compare the json response for getting the firewall policy with the request parameters
:param response: response from the get method
:param firewall_dict: dictionary of request parameters for firewall policy
:return: changed: Boolean that returns true if there are differences between
the response parameters and the playbook parameters
"""
changed = False
response_dest_account_alias = response.get('destinationAccount')
response_enabled = response.get('enabled')
response_source = response.get('source')
response_dest = response.get('destination')
response_ports = response.get('ports')
request_dest_account_alias = firewall_dict.get(
'destination_account_alias')
request_enabled = firewall_dict.get('enabled')
if request_enabled is None:
request_enabled = True
request_source = firewall_dict.get('source')
request_dest = firewall_dict.get('destination')
request_ports = firewall_dict.get('ports')
if (
response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
response_enabled != request_enabled) or (
response_source and response_source != request_source) or (
response_dest and response_dest != request_dest) or (
response_ports and response_ports != request_ports):
changed = True
return changed
def _get_firewall_policy(
self,
source_account_alias,
location,
firewall_policy_id):
"""
Get back details for a particular firewall policy
:param source_account_alias: the source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: id of the firewall policy to get
:return: response - The response from CLC API call
"""
response = None
try:
response = self.clc.v2.API.Call(
'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
(source_account_alias, location, firewall_policy_id))
except APIFailedResponse as e:
if e.response_status_code != 404:
self.module.fail_json(
msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
firewall_policy_id, str(e.response_text)))
return response
def _wait_for_requests_to_complete(
self,
source_account_alias,
location,
firewall_policy_id,
wait_limit=50):
"""
Waits until the CLC requests are complete if the wait argument is True
:param source_account_alias: The source account alias for the firewall policy
:param location: datacenter of the firewall policy
:param firewall_policy_id: The firewall policy id
:param wait_limit: The number of times to check the status for completion
:return: the firewall_policy object
"""
wait = self.module.params.get('wait')
count = 0
firewall_policy = None
while wait:
count += 1
firewall_policy = self._get_firewall_policy(
source_account_alias, location, firewall_policy_id)
status = firewall_policy.get('status')
if status == 'active' or count > wait_limit:
wait = False
else:
# wait for 2 seconds
sleep(2)
return firewall_policy
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
supports_check_mode=True)
clc_firewall = ClcFirewallPolicy(module)
clc_firewall.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
KyleJamesWalker/ansible | lib/ansible/module_utils/junos.py | 16 | 6193 | #
# (c) 2017 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from contextlib import contextmanager
from xml.etree.ElementTree import Element, SubElement, tostring
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.netconf import send_request, children
from ansible.module_utils.netconf import discard_changes, validate
from ansible.module_utils.network_common import to_list
from ansible.module_utils.six import string_types
ACTIONS = frozenset(['merge', 'override', 'replace', 'update', 'set'])
JSON_ACTIONS = frozenset(['merge', 'override', 'update'])
FORMATS = frozenset(['xml', 'text', 'json'])
CONFIG_FORMATS = frozenset(['xml', 'text', 'json', 'set'])
junos_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int', default=10),
'provider': dict(type='dict', no_log=True),
'transport': dict()
}
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in junos_argument_spec:
if key in ('provider',) and module.params[key]:
warnings.append('argument %s has been deprecated and will be '
'removed in a future version' % key)
def _validate_rollback_id(value):
try:
if not 0 <= int(value) <= 49:
raise ValueError
except ValueError:
module.fail_json(msg='rollback must be between 0 and 49')
def load_configuration(module, candidate=None, action='merge', rollback=None, format='xml'):
if all((candidate is None, rollback is None)):
module.fail_json(msg='one of candidate or rollback must be specified')
elif all((candidate is not None, rollback is not None)):
module.fail_json(msg='candidate and rollback are mutually exclusive')
if format not in FORMATS:
module.fail_json(msg='invalid format specified')
if format == 'json' and action not in JSON_ACTIONS:
module.fail_json(msg='invalid action for format json')
elif format in ('text', 'xml') and action not in ACTIONS:
module.fail_json(msg='invalid action format %s' % format)
if action == 'set' and not format == 'text':
module.fail_json(msg='format must be text when action is set')
if rollback is not None:
_validate_rollback_id(rollback)
xattrs = {'rollback': str(rollback)}
else:
xattrs = {'action': action, 'format': format}
obj = Element('load-configuration', xattrs)
if candidate is not None:
lookup = {'xml': 'configuration', 'text': 'configuration-text',
'set': 'configuration-set', 'json': 'configuration-json'}
if action == 'set':
cfg = SubElement(obj, 'configuration-set')
else:
cfg = SubElement(obj, lookup[format])
if isinstance(candidate, string_types):
cfg.text = candidate
else:
cfg.append(candidate)
return send_request(module, obj)
def get_configuration(module, compare=False, format='xml', rollback='0'):
if format not in CONFIG_FORMATS:
module.fail_json(msg='invalid config format specified')
xattrs = {'format': format}
if compare:
_validate_rollback_id(rollback)
xattrs['compare'] = 'rollback'
xattrs['rollback'] = str(rollback)
return send_request(module, Element('get-configuration', xattrs))
def commit_configuration(module, confirm=False, check=False, comment=None, confirm_timeout=None):
obj = Element('commit-configuration')
if confirm:
SubElement(obj, 'confirmed')
if check:
SubElement(obj, 'check')
if comment:
subele = SubElement(obj, 'log')
subele.text = str(comment)
if confirm_timeout:
subele = SubElement(obj, 'confirm-timeout')
subele.text = int(confirm_timeout)
return send_request(module, obj)
def command(module, command, format='text', rpc_only=False):
xattrs = {'format': format}
if rpc_only:
command += ' | display xml rpc'
xattrs['format'] = 'text'
return send_request(module, Element('command', xattrs, text=command))
lock_configuration = lambda x: send_request(x, Element('lock-configuration'))
unlock_configuration = lambda x: send_request(x, Element('unlock-configuration'))
@contextmanager
def locked_config(module):
try:
lock_configuration(module)
yield
finally:
unlock_configuration(module)
def get_diff(module):
reply = get_configuration(module, compare=True, format='text')
output = reply.find('.//configuration-output')
if output is not None:
return output.text
def load_config(module, candidate, action='merge', commit=False, format='xml',
comment=None, confirm=False, confirm_timeout=None):
with locked_config(module):
if isinstance(candidate, list):
candidate = '\n'.join(candidate)
reply = load_configuration(module, candidate, action=action, format=format)
validate(module)
diff = get_diff(module)
if diff:
diff = str(diff).strip()
if commit:
commit_configuration(module, confirm=confirm, comment=comment,
confirm_timeout=confirm_timeout)
else:
discard_changes(module)
return diff
| gpl-3.0 |
aneeshusa/servo | tests/wpt/css-tests/tools/webdriver/webdriver/webelement.py | 251 | 1846 | """Element-level WebDriver operations."""
import searchcontext
class WebElement(searchcontext.SearchContext):
"""Corresponds to a DOM element in the current page."""
def __init__(self, driver, id):
self._driver = driver
self._id = id
# Set value of mode used by SearchContext
self.mode = driver.mode
def execute(self, method, path, name, body=None):
"""Execute a command against this WebElement."""
return self._driver.execute(
method, '/element/%s%s' % (self._id, path), name, body)
def is_displayed(self):
"""Is this element displayed?"""
return self.execute('GET', '/displayed', 'isDisplayed')
def is_selected(self):
"""Is this checkbox, radio button, or option selected?"""
return self.execute('GET', '/selected', 'isSelected')
def get_attribute(self, name):
"""Get the value of an element property or attribute."""
return self.execute('GET', '/attribute/%s' % name, 'getElementAttribute')
@property
def text(self):
"""Get the visible text for this element."""
return self.execute('GET', '/text', 'text')
@property
def tag_name(self):
"""Get the tag name for this element"""
return self.execute('GET', '/name', 'getElementTagName')
def click(self):
"""Click on this element."""
return self.execute('POST', '/click', 'click')
def clear(self):
"""Clear the contents of the this text input."""
self.execute('POST', '/clear', 'clear')
def send_keys(self, keys):
"""Send keys to this text input or body element."""
if isinstance(keys, str):
keys = [keys]
self.execute('POST', '/value', 'sendKeys', {'value': keys})
def to_json(self):
return {'ELEMENT': self.id}
| mpl-2.0 |
xuyuhan/depot_tools | third_party/logilab/common/modutils.py | 64 | 23589 | # -*- coding: utf-8 -*-
# copyright 2003-2013 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import splitext, join, abspath, isdir, dirname, exists, basename
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
from six.moves import range
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from logilab.common import STD_BLACKLIST, _handle_blacklist
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=1)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError as ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
module = load_module(curname, mp_file, mp_filename, mp_desc)
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = splitext(abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = abspath(path_)
if path and base[:len(path)] == path:
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = abspath(path)
if path and base.startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
cleaned = []
for modname, module in list(sys.modules.items()):
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
cleaned.append(modname)
del sys.modules[modname]
break
return cleaned
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError as ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return 0
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return 1
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return 0
for path in std_path:
if filename.startswith(abspath(path)):
return 1
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), filepath, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
try:
import pkg_resources
except ImportError:
pkg_resources = None
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
# pkg_resources support (aka setuptools namespace packages)
if (pkg_resources is not None
and modpath[0] in pkg_resources._namespace_packages
and modpath[0] in sys.modules
and len(modpath) > 1):
# setuptools has added into sys.modules a module object with proper
# __path__, get back information from there
module = sys.modules[modpath.pop(0)]
path = module.__path__
imported = []
while modpath:
modname = modpath[0]
# take care to changes in find_module implementation wrt builtin modules
#
# Python 2.6.6 (r266:84292, Sep 11 2012, 08:34:23)
# >>> imp.find_module('posix')
# (None, 'posix', ('', '', 6))
#
# Python 3.3.1 (default, Apr 26 2013, 12:08:46)
# >>> imp.find_module('posix')
# (None, None, ('', '', 6))
try:
_, mp_filename, mp_desc = find_module(modname, path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs and mp_filename:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
# XXX guess if package is using pkgutil.extend_path by looking for
# those keywords in the first four Kbytes
try:
with open(join(mp_filename, '__init__.py')) as stream:
data = stream.read(4096)
except IOError:
path = [mp_filename]
else:
if 'pkgutil' in data and 'extend_path' in data:
# extend_path is called, search sys.path for module/packages
# of this name see pkgutil.extend_path documentation
path = [join(p, *imported) for p in sys.path
if isdir(join(p, *imported))]
else:
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
| bsd-3-clause |
kthordarson/youtube-dl-ruv | test/helper.py | 1 | 6495 | from __future__ import unicode_literals
import errno
import io
import hashlib
import json
import os.path
import re
import types
import sys
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.utils import (
compat_str,
preferredencoding,
write_string,
)
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and os.name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message): return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
def gettestcases(include_onlymatching=False):
for ie in youtube_dl.extractor.gen_extractors():
t = getattr(ie, '_TEST', None)
if t:
assert not hasattr(ie, '_TESTS'), \
'%s has _TEST and _TESTS' % type(ie).__name__
tests = [t]
else:
tests = getattr(ie, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(ie).__name__[:-len('IE')]
yield t
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
def expect_info_dict(self, expected_dict, got_dict):
for info_field, expected in expected_dict.items():
if isinstance(expected, compat_str) and expected.startswith('re:'):
got = got_dict.get(info_field)
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, info_field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (info_field, got, match_str))
elif isinstance(expected, type):
got = got_dict.get(info_field)
self.assertTrue(isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, info_field, got, type(got)))
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got_dict.get(info_field))
else:
got = got_dict.get(info_field)
self.assertEqual(expected, got,
'invalid value for field %s, expected %r, got %r' % (info_field, expected, got))
# Check for the presence of mandatory fields
if got_dict.get('_type') != 'playlist':
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'")
else:
return repr(v)
info_dict_str = ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items())
write_string('\n"info_dict": {\n' + info_dict_str + '}\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexp'):
return self.assertRegexp(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found in %r' % (regexp, text)
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning
| unlicense |
arnfred/heroku-buildpack-python-hdf5-blas-lapack | vendor/pip-1.5.4/pip/_vendor/requests/packages/chardet/chardetect.py | 743 | 1141 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
| mit |
jamesls/boto | boto/ec2/securitygroup.py | 13 | 13788 | # Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents an EC2 Security Group
"""
from boto.ec2.ec2object import TaggedEC2Object
from boto.exception import BotoClientError
class SecurityGroup(TaggedEC2Object):
def __init__(self, connection=None, owner_id=None,
name=None, description=None, id=None):
TaggedEC2Object.__init__(self, connection)
self.id = id
self.owner_id = owner_id
self.name = name
self.description = description
self.vpc_id = None
self.rules = IPPermissionsList()
self.rules_egress = IPPermissionsList()
def __repr__(self):
return 'SecurityGroup:%s' % self.name
def startElement(self, name, attrs, connection):
retval = TaggedEC2Object.startElement(self, name, attrs, connection)
if retval is not None:
return retval
if name == 'ipPermissions':
return self.rules
elif name == 'ipPermissionsEgress':
return self.rules_egress
else:
return None
def endElement(self, name, value, connection):
if name == 'ownerId':
self.owner_id = value
elif name == 'groupId':
self.id = value
elif name == 'groupName':
self.name = value
elif name == 'vpcId':
self.vpc_id = value
elif name == 'groupDescription':
self.description = value
elif name == 'ipRanges':
pass
elif name == 'return':
if value == 'false':
self.status = False
elif value == 'true':
self.status = True
else:
raise Exception(
'Unexpected value of status %s for group %s'%(
value,
self.name
)
)
else:
setattr(self, name, value)
def delete(self):
if self.vpc_id:
return self.connection.delete_security_group(group_id=self.id)
else:
return self.connection.delete_security_group(self.name)
def add_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip, src_group_group_id):
"""
Add a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
rule = IPPermissions(self)
rule.ip_protocol = ip_protocol
rule.from_port = from_port
rule.to_port = to_port
self.rules.append(rule)
rule.add_grant(src_group_name, src_group_owner_id, cidr_ip, src_group_group_id)
def remove_rule(self, ip_protocol, from_port, to_port,
src_group_name, src_group_owner_id, cidr_ip, src_group_group_id):
"""
Remove a rule to the SecurityGroup object. Note that this method
only changes the local version of the object. No information
is sent to EC2.
"""
target_rule = None
for rule in self.rules:
if rule.ip_protocol == ip_protocol:
if rule.from_port == from_port:
if rule.to_port == to_port:
target_rule = rule
target_grant = None
for grant in rule.grants:
if grant.name == src_group_name or grant.group_id == src_group_group_id:
if grant.owner_id == src_group_owner_id:
if grant.cidr_ip == cidr_ip:
target_grant = grant
if target_grant:
rule.grants.remove(target_grant)
if len(rule.grants) == 0:
self.rules.remove(target_rule)
def authorize(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None):
"""
Add a new rule to this security group.
You need to pass in either src_group_name
OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string or list of strings
:param cidr_ip: The CIDR block you are providing access to.
See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
:type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
:class:`boto.ec2.securitygroup.GroupOrCIDR`
:param src_group: The Security Group you are granting access to.
:rtype: bool
:return: True if successful.
"""
group_name = None
if not self.vpc_id:
group_name = self.name
group_id = None
if self.vpc_id:
group_id = self.id
src_group_name = None
src_group_owner_id = None
src_group_group_id = None
if src_group:
cidr_ip = None
src_group_owner_id = src_group.owner_id
if not self.vpc_id:
src_group_name = src_group.name
else:
if hasattr(src_group, 'group_id'):
src_group_group_id = src_group.group_id
else:
src_group_group_id = src_group.id
status = self.connection.authorize_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip,
group_id,
src_group_group_id)
if status:
if not isinstance(cidr_ip, list):
cidr_ip = [cidr_ip]
for single_cidr_ip in cidr_ip:
self.add_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, single_cidr_ip, src_group_group_id)
return status
def revoke(self, ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, src_group=None):
group_name = None
if not self.vpc_id:
group_name = self.name
group_id = None
if self.vpc_id:
group_id = self.id
src_group_name = None
src_group_owner_id = None
src_group_group_id = None
if src_group:
cidr_ip = None
src_group_owner_id = src_group.owner_id
if not self.vpc_id:
src_group_name = src_group.name
else:
if hasattr(src_group, 'group_id'):
src_group_group_id = src_group.group_id
else:
src_group_group_id = src_group.id
status = self.connection.revoke_security_group(group_name,
src_group_name,
src_group_owner_id,
ip_protocol,
from_port,
to_port,
cidr_ip,
group_id,
src_group_group_id)
if status:
self.remove_rule(ip_protocol, from_port, to_port, src_group_name,
src_group_owner_id, cidr_ip, src_group_group_id)
return status
def copy_to_region(self, region, name=None):
"""
Create a copy of this security group in another region.
Note that the new security group will be a separate entity
and will not stay in sync automatically after the copy
operation.
:type region: :class:`boto.ec2.regioninfo.RegionInfo`
:param region: The region to which this security group will be copied.
:type name: string
:param name: The name of the copy. If not supplied, the copy
will have the same name as this security group.
:rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
:return: The new security group.
"""
if region.name == self.region:
raise BotoClientError('Unable to copy to the same Region')
conn_params = self.connection.get_params()
rconn = region.connect(**conn_params)
sg = rconn.create_security_group(name or self.name, self.description)
source_groups = []
for rule in self.rules:
for grant in rule.grants:
grant_nom = grant.name or grant.group_id
if grant_nom:
if grant_nom not in source_groups:
source_groups.append(grant_nom)
sg.authorize(None, None, None, None, grant)
else:
sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
grant.cidr_ip)
return sg
def instances(self):
"""
Find all of the current instances that are running within this
security group.
:rtype: list of :class:`boto.ec2.instance.Instance`
:return: A list of Instance objects
"""
# It would be more efficient to do this with filters now
# but not all services that implement EC2 API support filters.
instances = []
rs = self.connection.get_all_instances()
for reservation in rs:
uses_group = [g.name for g in reservation.groups if g.name == self.name]
if uses_group:
instances.extend(reservation.instances)
return instances
class IPPermissionsList(list):
def startElement(self, name, attrs, connection):
if name == 'item':
self.append(IPPermissions(self))
return self[-1]
return None
def endElement(self, name, value, connection):
pass
class IPPermissions(object):
def __init__(self, parent=None):
self.parent = parent
self.ip_protocol = None
self.from_port = None
self.to_port = None
self.grants = []
def __repr__(self):
return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol,
self.from_port, self.to_port)
def startElement(self, name, attrs, connection):
if name == 'item':
self.grants.append(GroupOrCIDR(self))
return self.grants[-1]
return None
def endElement(self, name, value, connection):
if name == 'ipProtocol':
self.ip_protocol = value
elif name == 'fromPort':
self.from_port = value
elif name == 'toPort':
self.to_port = value
else:
setattr(self, name, value)
def add_grant(self, name=None, owner_id=None, cidr_ip=None, group_id=None):
grant = GroupOrCIDR(self)
grant.owner_id = owner_id
grant.group_id = group_id
grant.name = name
grant.cidr_ip = cidr_ip
self.grants.append(grant)
return grant
class GroupOrCIDR(object):
def __init__(self, parent=None):
self.owner_id = None
self.group_id = None
self.name = None
self.cidr_ip = None
def __repr__(self):
if self.cidr_ip:
return '%s' % self.cidr_ip
else:
return '%s-%s' % (self.name or self.group_id, self.owner_id)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'userId':
self.owner_id = value
elif name == 'groupId':
self.group_id = value
elif name == 'groupName':
self.name = value
if name == 'cidrIp':
self.cidr_ip = value
else:
setattr(self, name, value)
| mit |
bmya/odoo_addons | smile_account_voucher_group/models/tools.py | 3 | 1202 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Smile (<http://www.smile.fr>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import orm, osv
def _get_exception_message(exception):
msg = isinstance(exception, (osv.except_osv, orm.except_orm)) and exception.value or exception
return tools.ustr(msg)
| agpl-3.0 |
avanzosc/hr-addons | hr_timesheet_sheet_fix_analytic/tests/test_hr_timesheet_sheet_fix_analytic.py | 1 | 2027 | # Copyright 2021 Alfredo de la Fuente - AvanzOSC
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
from odoo import fields
@common.at_install(False)
@common.post_install(True)
class TestHrTimesheetSheetFixAnalytic(common.SavepointCase):
@classmethod
def setUpClass(cls):
super(TestHrTimesheetSheetFixAnalytic, cls).setUpClass()
cls.analytic_line_obj = cls.env['account.analytic.line']
cls.partner = cls.env.ref("base.partner_admin")
cond = [('partner_id', '=', cls.partner.id)]
cls.user = cls.env['res.users'].search(cond)
cond = [('user_id', '=', cls.user.id)]
cls.employee = cls.env['hr.employee'].search(cond)
vals = {'employee_id': cls.employee.id,
'date_start': fields.Date.today(),
'date_end': fields.Date.today()}
cls.sheet = cls.env['hr_timesheet.sheet'].create(vals)
cond = [('move_id', '=', False)]
cls.analytic_line = cls.analytic_line_obj.search(cond, limit=1)
cls.analytic_line.write({
'date': fields.Date.today(),
'sheet_id': cls.sheet.id})
cls.journal = cls.env['account.journal'].search([], limit=1)
cls.account = cls.env['account.account'].search([], limit=1)
vals = {'date': fields.Date.today(),
'ref': 'aaaaaaaa',
'journal_id': cls.journal.id}
line_vals = {'account_id': cls.account.id}
vals['line_ids'] = [(0, 0, line_vals)]
cls.account_move = cls.env['account.move'].create(vals)
def test_hr_timesheet_sheet_fix_analytic(self):
cond = self.sheet._get_timesheet_sheet_lines_domain()
lines = self.analytic_line_obj.search(cond)
self.assertEquals(len(lines), 1)
self.analytic_line.move_id = self.account_move.line_ids[0].id
cond = self.sheet._get_timesheet_sheet_lines_domain()
lines = self.analytic_line_obj.search(cond)
self.assertEquals(len(lines), 0)
| agpl-3.0 |
zhouhaibing089/kubernetes | cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py | 67 | 71563 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import os
import re
import random
import shutil
import socket
import string
import json
import ipaddress
from charms.leadership import leader_get, leader_set
from shutil import move
from tempfile import TemporaryDirectory
from pathlib import Path
from shlex import split
from subprocess import check_call
from subprocess import check_output
from subprocess import CalledProcessError
from urllib.request import Request, urlopen
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import endpoint_from_flag
from charms.reactive import when, when_any, when_not, when_none
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
from charms.kubernetes.common import retry
from charms.layer import tls_client
from charmhelpers.core import hookenv
from charmhelpers.core import host
from charmhelpers.core import unitdata
from charmhelpers.core.host import service_stop
from charmhelpers.core.templating import render
from charmhelpers.fetch import apt_install
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kube-apiserver', 'kube-controller-manager',
'kube-scheduler', 'cdk-addons']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
def set_upgrade_needed(forced=False):
set_state('kubernetes-master.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
hookenv.log('set upgrade needed')
if previous_channel is None or not require_manual or forced:
hookenv.log('forcing upgrade')
set_state('kubernetes-master.upgrade-specified')
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
def service_cidr():
''' Return the charm's service-cidr config '''
frozen_cidr = db.get('kubernetes-master.service-cidr')
return frozen_cidr or hookenv.config('service-cidr')
def freeze_service_cidr():
''' Freeze the service CIDR. Once the apiserver has started, we can no
longer safely change this value. '''
db.set('kubernetes-master.service-cidr', service_cidr())
@hook('upgrade-charm')
def check_for_upgrade_needed():
'''An upgrade charm event was triggered by Juju, react to that here.'''
hookenv.status_set('maintenance', 'Checking resources')
# migrate to new flags
if is_state('kubernetes-master.restarted-for-cloud'):
remove_state('kubernetes-master.restarted-for-cloud')
set_state('kubernetes-master.cloud.ready')
if is_state('kubernetes-master.cloud-request-sent'):
# minor change, just for consistency
remove_state('kubernetes-master.cloud-request-sent')
set_state('kubernetes-master.cloud.request-sent')
migrate_from_pre_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
if not db.get('snap.resources.fingerprint.initialised'):
# We are here on an upgrade from non-rolling master
# Since this upgrade might also include resource updates eg
# juju upgrade-charm kubernetes-master --resource kube-any=my.snap
# we take no risk and forcibly upgrade the snaps.
# Forcibly means we do not prompt the user to call the upgrade action.
set_upgrade_needed(forced=True)
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Set the auto storage backend to etcd2.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if not auto_storage_backend and is_leader:
leader_set(auto_storage_backend='etcd2')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-master.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination):
if os.path.isfile(source):
os.rename(source, destination)
def migrate_from_pre_snaps():
# remove old states
remove_state('kubernetes.components.installed')
remove_state('kubernetes.dashboard.available')
remove_state('kube-dns.available')
remove_state('kubernetes-master.app_version.set')
# disable old services
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
host.service_stop(service)
# rename auth files
os.makedirs('/root/cdk', exist_ok=True)
rename_file_idempotent('/etc/kubernetes/serviceaccount.key',
'/root/cdk/serviceaccount.key')
rename_file_idempotent('/srv/kubernetes/basic_auth.csv',
'/root/cdk/basic_auth.csv')
rename_file_idempotent('/srv/kubernetes/known_tokens.csv',
'/root/cdk/known_tokens.csv')
# cleanup old files
files = [
"/lib/systemd/system/kube-apiserver.service",
"/lib/systemd/system/kube-controller-manager.service",
"/lib/systemd/system/kube-scheduler.service",
"/etc/default/kube-defaults",
"/etc/default/kube-apiserver.defaults",
"/etc/default/kube-controller-manager.defaults",
"/etc/default/kube-scheduler.defaults",
"/srv/kubernetes",
"/home/ubuntu/kubectl",
"/usr/local/bin/kubectl",
"/usr/local/bin/kube-apiserver",
"/usr/local/bin/kube-controller-manager",
"/usr/local/bin/kube-scheduler",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('kubernetes-master.upgrade-specified')
def do_upgrade():
install_snaps()
remove_state('kubernetes-master.upgrade-needed')
remove_state('kubernetes-master.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-apiserver snap')
snap.install('kube-apiserver', channel=channel)
hookenv.status_set('maintenance',
'Installing kube-controller-manager snap')
snap.install('kube-controller-manager', channel=channel)
hookenv.status_set('maintenance', 'Installing kube-scheduler snap')
snap.install('kube-scheduler', channel=channel)
hookenv.status_set('maintenance', 'Installing cdk-addons snap')
snap.install('cdk-addons', channel=channel)
calculate_and_store_resource_checksums()
db.set('snap.resources.fingerprint.initialised', True)
set_state('kubernetes-master.snaps.installed')
remove_state('kubernetes-master.components.started')
@when('config.changed.client_password', 'leadership.is_leader')
def password_changed():
"""Handle password change via the charms config."""
password = hookenv.config('client_password')
if password == "" and is_state('client.password.initialised'):
# password_changed is called during an upgrade. Nothing to do.
return
elif password == "":
# Password not initialised
password = token_generator()
setup_basic_auth(password, "admin", "admin", "system:masters")
set_state('reconfigure.authentication.setup')
remove_state('authentication.setup')
set_state('client.password.initialised')
@when('config.changed.storage-backend')
def storage_backend_changed():
remove_state('kubernetes-master.components.started')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set master configuration on the CNI relation. This lets the CNI
subordinate know that we're the master so it can respond accordingly. '''
cni.set_config(is_master=True, kubeconfig_path='')
@when('leadership.is_leader')
@when_not('authentication.setup')
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
if not os.path.isfile(known_tokens):
touch(known_tokens)
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
@when_not('leadership.is_leader')
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
def get_keys_from_leader(keys, overwrite_local=False):
"""
Gets the broadcasted keys from the leader and stores them in
the corresponding files.
Args:
keys: list of keys. Keys are actually files on the FS.
Returns: True if all key were fetched, False if not.
"""
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/root/cdk', exist_ok=True)
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k) or overwrite_local:
# Fetch data from leadership broadcast
contents = leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
hookenv.log('Missing content for file {}'.format(k))
return False
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
fp.write('\n')
return True
@when('kubernetes-master.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
version = check_output(['kube-apiserver', '--version'])
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-master.snaps.installed')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('master_snapd_refresh', timer):
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('kubernetes-master.snaps.installed')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh')
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@hookenv.atexit
def set_final_status():
''' Set the final status of the charm as we leave hook execution '''
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
cloud_blocked = is_state('kubernetes-master.cloud.blocked')
if vsphere_joined and cloud_blocked:
hookenv.status_set('blocked',
'vSphere integration requires K8s 1.12 or greater')
return
if azure_joined and cloud_blocked:
hookenv.status_set('blocked',
'Azure integration requires K8s 1.11 or greater')
return
if is_state('kubernetes-master.cloud.pending'):
hookenv.status_set('waiting', 'Waiting for cloud integration')
return
if not is_state('kube-api-endpoint.available'):
if 'kube-api-endpoint' in goal_state.get('relations', {}):
status = 'waiting'
else:
status = 'blocked'
hookenv.status_set(status, 'Waiting for kube-api-endpoint relation')
return
if not is_state('kube-control.connected'):
if 'kube-control' in goal_state.get('relations', {}):
status = 'waiting'
else:
status = 'blocked'
hookenv.status_set(status, 'Waiting for workers.')
return
upgrade_needed = is_state('kubernetes-master.upgrade-needed')
upgrade_specified = is_state('kubernetes-master.upgrade-specified')
if upgrade_needed and not upgrade_specified:
msg = 'Needs manual upgrade, run the upgrade action'
hookenv.status_set('blocked', msg)
return
if is_state('kubernetes-master.components.started'):
# All services should be up and running at this point. Double-check...
failing_services = master_services_down()
if len(failing_services) != 0:
msg = 'Stopped services: {}'.format(','.join(failing_services))
hookenv.status_set('blocked', msg)
return
is_leader = is_state('leadership.is_leader')
authentication_setup = is_state('authentication.setup')
if not is_leader and not authentication_setup:
hookenv.status_set('waiting', 'Waiting on leaders crypto keys.')
return
components_started = is_state('kubernetes-master.components.started')
addons_configured = is_state('cdk-addons.configured')
if components_started and not addons_configured:
hookenv.status_set('waiting', 'Waiting to retry addon deployment')
return
if addons_configured and not all_kube_system_pods_running():
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
return
if hookenv.config('service-cidr') != service_cidr():
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
hookenv.status_set('active', msg)
return
gpu_available = is_state('kube-control.gpu.available')
gpu_enabled = is_state('kubernetes-master.gpu.enabled')
if gpu_available and not gpu_enabled:
msg = 'GPUs available. Set allow-privileged="auto" to enable.'
hookenv.status_set('active', msg)
return
hookenv.status_set('active', 'Kubernetes master running.')
def master_services_down():
"""Ensure master services are up and running.
Return: list of failing services"""
services = ['kube-apiserver',
'kube-controller-manager',
'kube-scheduler']
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not host.service_running(daemon):
failing_services.append(service)
return failing_services
@when('etcd.available', 'tls_client.server.certificate.saved',
'authentication.setup')
@when('leadership.set.auto_storage_backend')
@when_not('kubernetes-master.components.started',
'kubernetes-master.cloud.pending',
'kubernetes-master.cloud.blocked')
def start_master(etcd):
'''Run the Kubernetes master components.'''
hookenv.status_set('maintenance',
'Configuring the Kubernetes master services.')
freeze_service_cidr()
if not etcd.get_connection_string():
# etcd is not returning a connection string. This happens when
# the master unit disconnects from etcd and is ready to terminate.
# No point in trying to start master services and fail. Just return.
return
# TODO: Make sure below relation is handled on change
# https://github.com/kubernetes/kubernetes/issues/43461
handle_etcd_relation(etcd)
# Add CLI options to all components
configure_apiserver(etcd.get_connection_string())
configure_controller_manager()
configure_scheduler()
set_state('kubernetes-master.components.started')
hookenv.open_port(6443)
@when('etcd.available')
def etcd_data_change(etcd):
''' Etcd scale events block master reconfiguration due to the
kubernetes-master.components.started state. We need a way to
handle these events consistently only when the number of etcd
units has actually changed '''
# key off of the connection string
connection_string = etcd.get_connection_string()
# If the connection string changes, remove the started state to trigger
# handling of the master components
if data_changed('etcd-connect', connection_string):
remove_state('kubernetes-master.components.started')
# We are the leader and the auto_storage_backend is not set meaning
# this is the first time we connect to etcd.
auto_storage_backend = leader_get('auto_storage_backend')
is_leader = is_state('leadership.is_leader')
if is_leader and not auto_storage_backend:
if etcd.get_version().startswith('3.'):
leader_set(auto_storage_backend='etcd3')
else:
leader_set(auto_storage_backend='etcd2')
@when('kube-control.connected')
@when('cdk-addons.configured')
def send_cluster_dns_detail(kube_control):
''' Send cluster DNS info '''
enableKubeDNS = hookenv.config('enable-kube-dns')
dnsDomain = hookenv.config('dns_domain')
dns_ip = None
if enableKubeDNS:
try:
dns_ip = get_dns_ip()
except CalledProcessError:
hookenv.log("kubedns not ready yet")
return
kube_control.set_dns(53, dnsDomain, dns_ip, enableKubeDNS)
@when('kube-control.connected')
@when('snap.installed.kubectl')
@when('leadership.is_leader')
def create_service_configs(kube_control):
"""Create the users for kubelet"""
should_restart = False
# generate the username/pass for the requesting unit
proxy_token = get_token('system:kube-proxy')
if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin')
if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin')
should_restart = True
requests = kube_control.auth_user()
for request in requests:
username = request[1]['user']
group = request[1]['group']
kubelet_token = get_token(username)
if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<nodeName>
userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group)
kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
if should_restart:
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when('kube-api-endpoint.available')
def push_service_data(kube_api):
''' Send configuration to the load balancer, and close access to the
public interface '''
kube_api.configure(port=6443)
def get_ingress_address(relation_name):
try:
network_info = hookenv.network_get(relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-api-endpoint.available')
def send_data(tls, kube_api_endpoint):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
# Get the SDN gateway based on the cidr address.
kubernetes_service_ip = get_kubernetes_service_ip()
# Get ingress address
ingress_ip = get_ingress_address(kube_api_endpoint.relation_name)
domain = hookenv.config('dns_domain')
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
socket.gethostname(),
kubernetes_service_ip,
'kubernetes',
'kubernetes.{0}'.format(domain),
'kubernetes.default',
'kubernetes.default.svc',
'kubernetes.default.svc.{0}'.format(domain)
]
# maybe they have extra names they want as SANs
extra_sans = hookenv.config('extra_sans')
if extra_sans and not extra_sans == "":
sans.extend(extra_sans.split())
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('config.changed.extra_sans', 'certificates.available',
'kube-api-endpoint.available')
def update_certificate(tls, kube_api_endpoint):
# Using the config.changed.extra_sans flag to catch changes.
# IP changes will take ~5 minutes or so to propagate, but
# it will update.
send_data(tls, kube_api_endpoint)
@when('certificates.server.cert.available',
'kubernetes-master.components.started',
'tls_client.server.certificate.written')
def kick_api_server(tls):
# need to be idempotent and don't want to kick the api server
# without need
if data_changed('cert', tls.get_server_cert()):
# certificate changed, so restart the api server
hookenv.log("Certificate information changed, restarting api server")
restart_apiserver()
tls_client.reset_certificate_write_flag('server')
@when_any('kubernetes-master.components.started', 'ceph-storage.configured')
@when('leadership.is_leader')
def configure_cdk_addons():
''' Configure CDK addons '''
remove_state('cdk-addons.configured')
load_gpu_plugin = hookenv.config('enable-nvidia-plugin').lower()
gpuEnable = (get_version('kube-apiserver') >= (1, 9) and
load_gpu_plugin == "auto" and
is_state('kubernetes-master.gpu.enabled'))
registry = hookenv.config('addons-registry')
dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()
dnsEnabled = str(hookenv.config('enable-kube-dns')).lower()
metricsEnabled = str(hookenv.config('enable-metrics')).lower()
if (is_state('ceph-storage.configured') and
get_version('kube-apiserver') >= (1, 10)):
cephEnabled = "true"
else:
cephEnabled = "false"
ceph_ep = endpoint_from_flag('ceph-storage.available')
ceph = {}
default_storage = ''
if ceph_ep:
b64_ceph_key = base64.b64encode(ceph_ep.key().encode('utf-8'))
ceph['admin_key'] = b64_ceph_key.decode('ascii')
ceph['kubernetes_key'] = b64_ceph_key.decode('ascii')
ceph['mon_hosts'] = ceph_ep.mon_hosts()
default_storage = hookenv.config('default-storage')
args = [
'arch=' + arch(),
'dns-ip=' + get_deprecated_dns_ip(),
'dns-domain=' + hookenv.config('dns_domain'),
'registry=' + registry,
'enable-dashboard=' + dbEnabled,
'enable-kube-dns=' + dnsEnabled,
'enable-metrics=' + metricsEnabled,
'enable-gpu=' + str(gpuEnable).lower(),
'enable-ceph=' + cephEnabled,
'ceph-admin-key=' + (ceph.get('admin_key', '')),
'ceph-kubernetes-key=' + (ceph.get('admin_key', '')),
'ceph-mon-hosts="' + (ceph.get('mon_hosts', '')) + '"',
'default-storage=' + default_storage,
]
check_call(['snap', 'set', 'cdk-addons'] + args)
if not addons_ready():
remove_state('cdk-addons.configured')
return
set_state('cdk-addons.configured')
@retry(times=3, delay_secs=20)
def addons_ready():
"""
Test if the add ons got installed
Returns: True is the addons got applied
"""
try:
check_call(['cdk-addons.apply'])
return True
except CalledProcessError:
hookenv.log("Addons are not ready yet.")
return False
@when('loadbalancer.available', 'certificates.ca.available',
'certificates.client.cert.available', 'authentication.setup')
def loadbalancer_kubeconfig(loadbalancer, ca, client):
# Get the potential list of loadbalancers from the relation object.
hosts = loadbalancer.get_addresses_ports()
# Get the public address of loadbalancers so users can access the cluster.
address = hosts[0].get('public-address')
# Get the port of the loadbalancer so users can access the cluster.
port = hosts[0].get('port')
server = 'https://{0}:{1}'.format(address, port)
build_kubeconfig(server)
@when('certificates.ca.available', 'certificates.client.cert.available',
'authentication.setup')
@when_not('loadbalancer.available')
def create_self_config(ca, client):
'''Create a kubernetes configuration for the master unit.'''
server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)
build_kubeconfig(server)
@when('ceph-storage.available')
def ceph_state_control(ceph_admin):
''' Determine if we should remove the state that controls the re-render
and execution of the ceph-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs, keys, and/or service pre-reqs '''
ceph_relation_data = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'hostname': socket.gethostname(),
'key': ceph_admin.key()
}
# Re-execute the rendering if the data has changed.
if data_changed('ceph-config', ceph_relation_data):
remove_state('ceph-storage.configured')
@when('ceph-storage.available')
@when_not('ceph-storage.configured')
def ceph_storage(ceph_admin):
'''Ceph on kubernetes will require a few things - namely a ceph
configuration, and the ceph secret key file used for authentication.
This method will install the client package, and render the requisit files
in order to consume the ceph-storage relation.'''
# deprecated in 1.10 in favor of using CSI
if get_version('kube-apiserver') >= (1, 10):
# this is actually false, but by setting this flag we won't keep
# running this function for no reason. Also note that we watch this
# flag to run cdk-addons.apply.
set_state('ceph-storage.configured')
return
ceph_context = {
'mon_hosts': ceph_admin.mon_hosts(),
'fsid': ceph_admin.fsid(),
'auth_supported': ceph_admin.auth(),
'use_syslog': "true",
'ceph_public_network': '',
'ceph_cluster_network': '',
'loglevel': 1,
'hostname': socket.gethostname(),
}
# Install the ceph common utilities.
apt_install(['ceph-common'], fatal=True)
etc_ceph_directory = '/etc/ceph'
if not os.path.isdir(etc_ceph_directory):
os.makedirs(etc_ceph_directory)
charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')
# Render the ceph configuration from the ceph conf template
render('ceph.conf', charm_ceph_conf, ceph_context)
# The key can rotate independently of other ceph config, so validate it
admin_key = os.path.join(etc_ceph_directory,
'ceph.client.admin.keyring')
try:
with open(admin_key, 'w') as key_file:
key_file.write("[client.admin]\n\tkey = {}\n".format(
ceph_admin.key()))
except IOError as err:
hookenv.log("IOError writing admin.keyring: {}".format(err))
# Enlist the ceph-admin key as a kubernetes secret
if ceph_admin.key():
encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))
else:
# We didn't have a key, and cannot proceed. Do not set state and
# allow this method to re-execute
return
context = {'secret': encoded_key.decode('ascii')}
render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)
try:
# At first glance this is deceptive. The apply stanza will create if
# it doesn't exist, otherwise it will update the entry, ensuring our
# ceph-secret is always reflective of what we have in /etc/ceph
# assuming we have invoked this anytime that file would change.
cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']
check_call(cmd)
os.remove('/tmp/ceph-secret.yaml')
except: # NOQA
# the enlistment in kubernetes failed, return and prepare for re-exec
return
# when complete, set a state relating to configuration of the storage
# backend that will allow other modules to hook into this and verify we
# have performed the necessary pre-req steps to interface with a ceph
# deployment.
set_state('ceph-storage.configured')
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('config.changed.authorization-mode',
'kubernetes-master.components.started')
def switch_auth_mode():
config = hookenv.config()
mode = config.get('authorization-mode')
if data_changed('auth-mode', mode):
remove_state('kubernetes-master.components.started')
@when('kubernetes-master.components.started')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = (
'snap.kube-apiserver.daemon',
'snap.kube-controller-manager.daemon',
'snap.kube-scheduler.daemon'
)
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def is_privileged():
"""Return boolean indicating whether or not to set allow-privileged=true.
"""
privileged = hookenv.config('allow-privileged').lower()
if privileged == 'auto':
return is_state('kubernetes-master.gpu.enabled')
else:
return privileged == 'true'
@when('config.changed.allow-privileged')
@when('kubernetes-master.components.started')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
remove_state('kubernetes-master.components.started')
remove_state('config.changed.allow-privileged')
@when_any('config.changed.api-extra-args',
'config.changed.audit-policy',
'config.changed.audit-webhook-config')
@when('kubernetes-master.components.started')
@when('leadership.set.auto_storage_backend')
@when('etcd.available')
def reconfigure_apiserver(etcd):
configure_apiserver(etcd.get_connection_string())
@when('config.changed.controller-manager-extra-args')
@when('kubernetes-master.components.started')
def on_config_controller_manager_extra_args_change():
configure_controller_manager()
@when('config.changed.scheduler-extra-args')
@when('kubernetes-master.components.started')
def on_config_scheduler_extra_args_change():
configure_scheduler()
@when('kube-control.gpu.available')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.gpu.enabled')
def on_gpu_available(kube_control):
"""The remote side (kubernetes-worker) is gpu-enabled.
We need to run in privileged mode.
"""
kube_version = get_version('kube-apiserver')
config = hookenv.config()
if (config['allow-privileged'].lower() == "false" and
kube_version < (1, 9)):
return
remove_state('kubernetes-master.components.started')
set_state('kubernetes-master.gpu.enabled')
@when('kubernetes-master.gpu.enabled')
@when('kubernetes-master.components.started')
@when_not('kubernetes-master.privileged')
def gpu_with_no_privileged():
"""We were in gpu mode, but the operator has set allow-privileged="false",
so we can't run in gpu mode anymore.
"""
if get_version('kube-apiserver') < (1, 9):
remove_state('kubernetes-master.gpu.enabled')
@when('kube-control.connected')
@when_not('kube-control.gpu.available')
@when('kubernetes-master.gpu.enabled')
@when('kubernetes-master.components.started')
def gpu_departed(kube_control):
"""We were in gpu mode, but the workers informed us there is
no gpu support anymore.
"""
remove_state('kubernetes-master.gpu.enabled')
@hook('stop')
def shutdown():
""" Stop the kubernetes master services
"""
service_stop('snap.kube-apiserver.daemon')
service_stop('snap.kube-controller-manager.daemon')
service_stop('snap.kube-scheduler.daemon')
def restart_apiserver():
hookenv.status_set('maintenance', 'Restarting kube-apiserver')
host.service_restart('snap.kube-apiserver.daemon')
def restart_controller_manager():
hookenv.status_set('maintenance', 'Restarting kube-controller-manager')
host.service_restart('snap.kube-controller-manager.daemon')
def restart_scheduler():
hookenv.status_set('maintenance', 'Restarting kube-scheduler')
host.service_restart('snap.kube-scheduler.daemon')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def build_kubeconfig(server):
'''Gather the relevant data for Kubernetes configuration objects and create
a config object with that information.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
ca_exists = ca and os.path.isfile(ca)
client_pass = get_password('basic_auth.csv', 'admin')
# Do we have everything we need?
if ca_exists and client_pass:
# Create an absolute path for the kubeconfig file.
kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')
# Create the kubeconfig on this system so users can access the cluster.
create_kubeconfig(kubeconfig_path, server, ca,
user='admin', password=client_pass)
# Make the config file readable by the ubuntu users so juju scp works.
cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]
check_call(cmd)
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
def get_dns_ip():
cmd = "kubectl get service --namespace kube-system kube-dns --output json"
output = check_output(cmd, shell=True).decode()
svc = json.loads(output)
return svc['spec']['clusterIP']
def get_deprecated_dns_ip():
'''We previously hardcoded the dns ip. This function returns the old
hardcoded value for use with older versions of cdk_addons.'''
interface = ipaddress.IPv4Interface(service_cidr())
ip = interface.network.network_address + 10
return ip.exploded
def get_kubernetes_service_ip():
'''Get the IP address for the kubernetes service based on the cidr.'''
interface = ipaddress.IPv4Interface(service_cidr())
# Add .1 at the end of the network
ip = interface.network.network_address + 1
return ip.exploded
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
prev_args_key = 'kubernetes-master.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
# note this is so we remove them from the snap's config
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def remove_if_exists(path):
try:
os.remove(path)
except FileNotFoundError:
pass
def write_audit_config_file(path, contents):
with open(path, 'w') as f:
header = '# Autogenerated by kubernetes-master charm'
f.write(header + '\n' + contents)
def configure_apiserver(etcd_connection_string):
api_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
client_cert_path = layer_options.get('client_certificate_path')
client_key_path = layer_options.get('client_key_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
# at one point in time, this code would set ca-client-cert,
# but this was removed. This was before configure_kubernetes_service
# kept track of old arguments and removed them, so client-ca-cert
# was able to hang around forever stored in the snap configuration.
# This removes that stale configuration from the snap if it still
# exists.
api_opts['client-ca-file'] = 'null'
if is_privileged():
api_opts['allow-privileged'] = 'true'
set_state('kubernetes-master.privileged')
else:
api_opts['allow-privileged'] = 'false'
remove_state('kubernetes-master.privileged')
# Handle static options for now
api_opts['service-cluster-ip-range'] = service_cidr()
api_opts['min-request-timeout'] = '300'
api_opts['v'] = '4'
api_opts['tls-cert-file'] = server_cert_path
api_opts['tls-private-key-file'] = server_key_path
api_opts['kubelet-certificate-authority'] = ca_cert_path
api_opts['kubelet-client-certificate'] = client_cert_path
api_opts['kubelet-client-key'] = client_key_path
api_opts['logtostderr'] = 'true'
api_opts['insecure-bind-address'] = '127.0.0.1'
api_opts['insecure-port'] = '8080'
api_opts['storage-backend'] = getStorageBackend()
api_opts['basic-auth-file'] = '/root/cdk/basic_auth.csv'
api_opts['token-auth-file'] = '/root/cdk/known_tokens.csv'
api_opts['service-account-key-file'] = '/root/cdk/serviceaccount.key'
api_opts['kubelet-preferred-address-types'] = \
'[InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP]'
api_opts['advertise-address'] = get_ingress_address('kube-control')
etcd_dir = '/root/cdk/etcd'
etcd_ca = os.path.join(etcd_dir, 'client-ca.pem')
etcd_key = os.path.join(etcd_dir, 'client-key.pem')
etcd_cert = os.path.join(etcd_dir, 'client-cert.pem')
api_opts['etcd-cafile'] = etcd_ca
api_opts['etcd-keyfile'] = etcd_key
api_opts['etcd-certfile'] = etcd_cert
api_opts['etcd-servers'] = etcd_connection_string
admission_control_pre_1_9 = [
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'ResourceQuota',
'DefaultTolerationSeconds'
]
admission_control = [
'NamespaceLifecycle',
'LimitRanger',
'ServiceAccount',
'PersistentVolumeLabel',
'DefaultStorageClass',
'DefaultTolerationSeconds',
'MutatingAdmissionWebhook',
'ValidatingAdmissionWebhook',
'ResourceQuota'
]
auth_mode = hookenv.config('authorization-mode')
if 'Node' in auth_mode:
admission_control.append('NodeRestriction')
api_opts['authorization-mode'] = auth_mode
kube_version = get_version('kube-apiserver')
if kube_version < (1, 6):
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
admission_control_pre_1_9.remove('DefaultTolerationSeconds')
if kube_version < (1, 9):
api_opts['admission-control'] = ','.join(admission_control_pre_1_9)
else:
api_opts['admission-control'] = ','.join(admission_control)
if kube_version > (1, 6) and \
hookenv.config('enable-metrics'):
api_opts['requestheader-client-ca-file'] = ca_cert_path
api_opts['requestheader-allowed-names'] = 'client'
api_opts['requestheader-extra-headers-prefix'] = 'X-Remote-Extra-'
api_opts['requestheader-group-headers'] = 'X-Remote-Group'
api_opts['requestheader-username-headers'] = 'X-Remote-User'
api_opts['proxy-client-cert-file'] = client_cert_path
api_opts['proxy-client-key-file'] = client_key_path
api_opts['enable-aggregator-routing'] = 'true'
api_opts['client-ca-file'] = ca_cert_path
if is_state('endpoint.aws.ready'):
api_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'gce'
api_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'openstack'
api_opts['cloud-config'] = str(cloud_config_path)
elif (is_state('endpoint.vsphere.ready') and
get_version('kube-apiserver') >= (1, 12)):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'vsphere'
api_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.azure.ready'):
cloud_config_path = _cloud_config_path('kube-apiserver')
api_opts['cloud-provider'] = 'azure'
api_opts['cloud-config'] = str(cloud_config_path)
audit_root = '/root/cdk/audit'
os.makedirs(audit_root, exist_ok=True)
audit_log_path = audit_root + '/audit.log'
api_opts['audit-log-path'] = audit_log_path
api_opts['audit-log-maxsize'] = '100'
api_opts['audit-log-maxbackup'] = '9'
audit_policy_path = audit_root + '/audit-policy.yaml'
audit_policy = hookenv.config('audit-policy')
if audit_policy:
write_audit_config_file(audit_policy_path, audit_policy)
api_opts['audit-policy-file'] = audit_policy_path
else:
remove_if_exists(audit_policy_path)
audit_webhook_config_path = audit_root + '/audit-webhook-config.yaml'
audit_webhook_config = hookenv.config('audit-webhook-config')
if audit_webhook_config:
write_audit_config_file(audit_webhook_config_path,
audit_webhook_config)
api_opts['audit-webhook-config-file'] = audit_webhook_config_path
else:
remove_if_exists(audit_webhook_config_path)
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver()
def configure_controller_manager():
controller_opts = {}
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
# Default to 3 minute resync. TODO: Make this configurable?
controller_opts['min-resync-period'] = '3m'
controller_opts['v'] = '2'
controller_opts['root-ca-file'] = ca_cert_path
controller_opts['logtostderr'] = 'true'
controller_opts['master'] = 'http://127.0.0.1:8080'
controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key'
if is_state('endpoint.aws.ready'):
controller_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'gce'
controller_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'openstack'
controller_opts['cloud-config'] = str(cloud_config_path)
elif (is_state('endpoint.vsphere.ready') and
get_version('kube-apiserver') >= (1, 12)):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'vsphere'
controller_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.azure.ready'):
cloud_config_path = _cloud_config_path('kube-controller-manager')
controller_opts['cloud-provider'] = 'azure'
controller_opts['cloud-config'] = str(cloud_config_path)
configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args')
restart_controller_manager()
def configure_scheduler():
scheduler_opts = {}
scheduler_opts['v'] = '2'
scheduler_opts['logtostderr'] = 'true'
scheduler_opts['master'] = 'http://127.0.0.1:8080'
configure_kubernetes_service('kube-scheduler', scheduler_opts,
'scheduler-extra-args')
restart_scheduler()
def setup_basic_auth(password=None, username='admin', uid='admin',
groups=None):
'''Create the htacces file and the tokens.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
htaccess = os.path.join(root_cdk, 'basic_auth.csv')
if not password:
password = token_generator()
with open(htaccess, 'w') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"'.format(password,
username, uid, groups))
else:
stream.write('{0},{1},{2}'.format(password, username, uid))
def setup_tokens(token, username, user, groups=None):
'''Create a token file for kubernetes authentication.'''
root_cdk = '/root/cdk'
if not os.path.isdir(root_cdk):
os.makedirs(root_cdk)
known_tokens = os.path.join(root_cdk, 'known_tokens.csv')
if not token:
token = token_generator()
with open(known_tokens, 'a') as stream:
if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token,
username,
user,
groups))
else:
stream.write('{0},{1},{2}\n'.format(token, username, user))
def get_password(csv_fname, user):
'''Get the password of user within the csv file provided.'''
root_cdk = '/root/cdk'
tokens_fname = os.path.join(root_cdk, csv_fname)
if not os.path.isfile(tokens_fname):
return None
with open(tokens_fname, 'r') as stream:
for line in stream:
record = line.split(',')
if record[1] == user:
return record[0]
return None
def get_token(username):
"""Grab a token from the static file if present. """
return get_password('known_tokens.csv', username)
def set_token(password, save_salt):
''' Store a token so it can be recalled later by token_generator.
param: password - the password to be stored
param: save_salt - the key to store the value of the token.'''
db.set(save_salt, password)
return db.get(save_salt)
def token_generator(length=32):
''' Generate a random token for use in passwords and account tokens.
param: length - the length of the token to generate'''
alpha = string.ascii_letters + string.digits
token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))
return token
@retry(times=3, delay_secs=10)
def all_kube_system_pods_running():
''' Check pod status in the kube-system namespace. Returns True if all
pods are running, False otherwise. '''
cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
result = json.loads(output)
except CalledProcessError:
hookenv.log('failed to get kube-system pod status')
return False
hookenv.log('Checking system pods status: {}'.format(', '.join(
'='.join([pod['metadata']['name'], pod['status']['phase']])
for pod in result['items'])))
all_pending = all(pod['status']['phase'] == 'Pending'
for pod in result['items'])
if is_state('endpoint.gcp.ready') and all_pending:
poke_network_unavailable()
return False
# All pods must be Running or Evicted (which should re-spawn)
all_running = all(pod['status']['phase'] == 'Running' or
pod['status'].get('reason', '') == 'Evicted'
for pod in result['items'])
return all_running
def poke_network_unavailable():
"""
Work around https://github.com/kubernetes/kubernetes/issues/44254 by
manually poking the status into the API server to tell the nodes they have
a network route.
This is needed because kubelet sets the NetworkUnavailable flag and expects
the network plugin to clear it, which only kubenet does. There is some
discussion about refactoring the affected code but nothing has happened
in a while.
"""
cmd = ['kubectl', 'get', 'nodes', '-o', 'json']
try:
output = check_output(cmd).decode('utf-8')
nodes = json.loads(output)['items']
except CalledProcessError:
hookenv.log('failed to get kube-system nodes')
return
except (KeyError, json.JSONDecodeError) as e:
hookenv.log('failed to parse kube-system node status '
'({}): {}'.format(e, output), hookenv.ERROR)
return
for node in nodes:
node_name = node['metadata']['name']
url = 'http://localhost:8080/api/v1/nodes/{}/status'.format(node_name)
with urlopen(url) as response:
code = response.getcode()
body = response.read().decode('utf8')
if code != 200:
hookenv.log('failed to get node status from {} [{}]: {}'.format(
url, code, body), hookenv.ERROR)
return
try:
node_info = json.loads(body)
conditions = node_info['status']['conditions']
i = [c['type'] for c in conditions].index('NetworkUnavailable')
if conditions[i]['status'] == 'True':
hookenv.log('Clearing NetworkUnavailable from {}'.format(
node_name))
conditions[i] = {
"type": "NetworkUnavailable",
"status": "False",
"reason": "RouteCreated",
"message": "Manually set through k8s api",
}
req = Request(url, method='PUT',
data=json.dumps(node_info).encode('utf8'),
headers={'Content-Type': 'application/json'})
with urlopen(req) as response:
code = response.getcode()
body = response.read().decode('utf8')
if code not in (200, 201, 202):
hookenv.log('failed to update node status [{}]: {}'.format(
code, body), hookenv.ERROR)
return
except (json.JSONDecodeError, KeyError):
hookenv.log('failed to parse node status: {}'.format(body),
hookenv.ERROR)
return
def apiserverVersion():
cmd = 'kube-apiserver --version'.split()
version_string = check_output(cmd).decode('utf-8')
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
def touch(fname):
try:
os.utime(fname, None)
except OSError:
open(fname, 'a').close()
def getStorageBackend():
storage_backend = hookenv.config('storage-backend')
if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend')
return storage_backend
@when('leadership.is_leader')
@when_not('leadership.set.cluster_tag')
def create_cluster_tag():
cluster_tag = 'kubernetes-{}'.format(token_generator().lower())
leader_set(cluster_tag=cluster_tag)
@when('leadership.set.cluster_tag',
'kube-control.connected')
@when_not('kubernetes-master.cluster-tag-sent')
def send_cluster_tag():
cluster_tag = leader_get('cluster_tag')
kube_control = endpoint_from_flag('kube-control.connected')
kube_control.set_cluster_tag(cluster_tag)
set_state('kubernetes-master.cluster-tag-sent')
@when_not('kube-control.connected')
def clear_cluster_tag_sent():
remove_state('kubernetes-master.cluster-tag-sent')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-master.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kube-apiserver')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-master.cloud.blocked')
else:
remove_state('kubernetes-master.cloud.blocked')
set_state('kubernetes-master.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('leadership.set.cluster_tag')
@when_not('kubernetes-master.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
cluster_tag = leader_get('cluster_tag')
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
'k8s.io/role/master': 'true',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
cloud.enable_load_balancer_management()
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
'k8s-io-role-master': 'master',
})
cloud.enable_object_storage_management()
cloud.enable_security_management()
elif is_state('endpoint.azure.joined'):
cloud = endpoint_from_flag('endpoint.azure.joined')
cloud.tag_instance({
'k8s-io-cluster-name': cluster_tag,
'k8s-io-role-master': 'master',
})
cloud.enable_object_storage_management()
cloud.enable_security_management()
cloud.enable_instance_inspection()
cloud.enable_network_management()
cloud.enable_dns_management()
cloud.enable_block_storage_management()
set_state('kubernetes-master.cloud.request-sent')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
def clear_cloud_flags():
remove_state('kubernetes-master.cloud.pending')
remove_state('kubernetes-master.cloud.request-sent')
remove_state('kubernetes-master.cloud.blocked')
remove_state('kubernetes-master.cloud.ready')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready',
'endpoint.vsphere.ready',
'endpoint.azure.ready')
@when_not('kubernetes-master.cloud.blocked',
'kubernetes-master.cloud.ready')
def cloud_ready():
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kube-apiserver')
_write_gcp_snap_config('kube-controller-manager')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kube-apiserver')
_write_openstack_snap_config('kube-controller-manager')
elif is_state('endpoint.vsphere.ready'):
_write_vsphere_snap_config('kube-apiserver')
_write_vsphere_snap_config('kube-controller-manager')
elif is_state('endpoint.azure.ready'):
_write_azure_snap_config('kube-apiserver')
_write_azure_snap_config('kube-controller-manager')
remove_state('kubernetes-master.cloud.pending')
set_state('kubernetes-master.cloud.ready')
remove_state('kubernetes-master.components.started') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _cdk_addons_template_path():
return Path('/snap/cdk-addons/current/templates')
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def _write_vsphere_snap_config(component):
# vsphere requires additional cloud config
vsphere = endpoint_from_flag('endpoint.vsphere.ready')
# NB: vsphere provider will ask kube-apiserver and -controller-manager to
# find a uuid from sysfs unless a global config value is set. Our strict
# snaps cannot read sysfs, so let's do it in the charm. An invalid uuid is
# not fatal for storage, but it will muddy the logs; try to get it right.
uuid_file = '/sys/class/dmi/id/product_uuid'
try:
with open(uuid_file, 'r') as f:
uuid = f.read().strip()
except IOError as err:
hookenv.log("Unable to read UUID from sysfs: {}".format(err))
uuid = 'UNKNOWN'
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'insecure-flag = true',
'datacenters = "{}"'.format(vsphere.datacenter),
'vm-uuid = "VMware-{}"'.format(uuid),
'[VirtualCenter "{}"]'.format(vsphere.vsphere_ip),
'user = {}'.format(vsphere.user),
'password = {}'.format(vsphere.password),
'[Workspace]',
'server = {}'.format(vsphere.vsphere_ip),
'datacenter = "{}"'.format(vsphere.datacenter),
'default-datastore = "{}"'.format(vsphere.datastore),
'folder = "kubernetes"',
'resourcepool-path = ""',
'[Disk]',
'scsicontrollertype = "pvscsi"',
]))
def _write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
}))
| apache-2.0 |
jmighion/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py | 7 | 25742 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: ec2_vpc_route_table
short_description: Manage route tables for AWS virtual private clouds
description:
- Manage route tables for AWS virtual private clouds
version_added: "2.0"
author: Robert Estelle (@erydo), Rob White (@wimnat)
options:
lookup:
description:
- "Look up route table by either tags or by route table ID. Non-unique tag lookup will fail.
If no tags are specified then no lookup for an existing route table is performed and a new
route table will be created. To change tags of a route table or delete a route table,
you must look up by id."
required: false
default: tag
choices: [ 'tag', 'id' ]
propagating_vgw_ids:
description:
- "Enable route propagation from virtual gateways specified by ID."
default: None
required: false
purge_routes:
version_added: "2.3"
description:
- "Purge existing routes that are not found in routes."
required: false
default: 'true'
aliases: []
purge_subnets:
version_added: "2.3"
description:
- "Purge existing subnets that are not found in subnets."
required: false
default: 'true'
aliases: []
route_table_id:
description:
- "The ID of the route table to update or delete."
required: false
default: null
routes:
description:
- "List of routes in the route table.
Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
'instance_id', 'interface_id', or 'vpc_peering_connection_id'.
If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states."
required: false
default: None
state:
description:
- "Create or destroy the VPC route table"
required: false
default: present
choices: [ 'present', 'absent' ]
subnets:
description:
- "An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'."
required: false
tags:
description:
- "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are
used to uniquely identify route tables within a VPC when the route_table_id is not supplied."
required: false
default: null
aliases: [ "resource_tags" ]
vpc_id:
description:
- "VPC ID of the VPC in which to create the route table."
required: true
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Basic creation example:
- name: Set up public subnet route table
ec2_vpc_route_table:
vpc_id: vpc-1245678
region: us-west-1
tags:
Name: Public
subnets:
- "{{ jumpbox_subnet.subnet.id }}"
- "{{ frontend_subnet.subnet.id }}"
- "{{ vpn_subnet.subnet_id }}"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ igw.gateway_id }}"
register: public_route_table
- name: Set up NAT-protected route table
ec2_vpc_route_table:
vpc_id: vpc-1245678
region: us-west-1
tags:
Name: Internal
subnets:
- "{{ application_subnet.subnet.id }}"
- 'Database Subnet'
- '10.0.0.0/8'
routes:
- dest: 0.0.0.0/0
instance_id: "{{ nat.instance_id }}"
register: nat_route_table
'''
import re
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
try:
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
class AnsibleRouteTableException(Exception):
def __init__(self, message, error_traceback=None):
self.message = message
self.error_traceback = error_traceback
class AnsibleIgwSearchException(AnsibleRouteTableException):
pass
class AnsibleTagCreationException(AnsibleRouteTableException):
pass
class AnsibleSubnetSearchException(AnsibleRouteTableException):
pass
CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$')
SUBNET_RE = re.compile('^subnet-[A-z0-9]+$')
ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$')
def find_subnets(vpc_conn, vpc_id, identified_subnets):
"""
Finds a list of subnets, each identified either by a raw ID, a unique
'Name' tag, or a CIDR such as 10.0.0.0/8.
Note that this function is duplicated in other ec2 modules, and should
potentially be moved into potentially be moved into a shared module_utils
"""
subnet_ids = []
subnet_names = []
subnet_cidrs = []
for subnet in (identified_subnets or []):
if re.match(SUBNET_RE, subnet):
subnet_ids.append(subnet)
elif re.match(CIDR_RE, subnet):
subnet_cidrs.append(subnet)
else:
subnet_names.append(subnet)
subnets_by_id = []
if subnet_ids:
subnets_by_id = vpc_conn.get_all_subnets(
subnet_ids, filters={'vpc_id': vpc_id})
for subnet_id in subnet_ids:
if not any(s.id == subnet_id for s in subnets_by_id):
raise AnsibleSubnetSearchException(
'Subnet ID "{0}" does not exist'.format(subnet_id))
subnets_by_cidr = []
if subnet_cidrs:
subnets_by_cidr = vpc_conn.get_all_subnets(
filters={'vpc_id': vpc_id, 'cidr': subnet_cidrs})
for cidr in subnet_cidrs:
if not any(s.cidr_block == cidr for s in subnets_by_cidr):
raise AnsibleSubnetSearchException(
'Subnet CIDR "{0}" does not exist'.format(cidr))
subnets_by_name = []
if subnet_names:
subnets_by_name = vpc_conn.get_all_subnets(
filters={'vpc_id': vpc_id, 'tag:Name': subnet_names})
for name in subnet_names:
matching_count = len([1 for s in subnets_by_name if s.tags.get('Name') == name])
if matching_count == 0:
raise AnsibleSubnetSearchException(
'Subnet named "{0}" does not exist'.format(name))
elif matching_count > 1:
raise AnsibleSubnetSearchException(
'Multiple subnets named "{0}"'.format(name))
return subnets_by_id + subnets_by_cidr + subnets_by_name
def find_igw(vpc_conn, vpc_id):
"""
Finds the Internet gateway for the given VPC ID.
Raises an AnsibleIgwSearchException if either no IGW can be found, or more
than one found for the given VPC.
Note that this function is duplicated in other ec2 modules, and should
potentially be moved into potentially be moved into a shared module_utils
"""
igw = vpc_conn.get_all_internet_gateways(
filters={'attachment.vpc-id': vpc_id})
if not igw:
raise AnsibleIgwSearchException('No IGW found for VPC {0}'.
format(vpc_id))
elif len(igw) == 1:
return igw[0].id
else:
raise AnsibleIgwSearchException('Multiple IGWs found for VPC {0}'.
format(vpc_id))
def get_resource_tags(vpc_conn, resource_id):
return dict((t.name, t.value) for t in
vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def tags_match(match_tags, candidate_tags):
return all((k in candidate_tags and candidate_tags[k] == v
for k, v in match_tags.items()))
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if tags == cur_tags:
return {'changed': False, 'tags': cur_tags}
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
if to_add:
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
except EC2ResponseError as e:
raise AnsibleTagCreationException(
message='Unable to update tags for {0}, error: {1}'.format(resource_id, e),
error_traceback=traceback.format_exc())
def get_route_table_by_id(vpc_conn, vpc_id, route_table_id):
route_table = None
route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id})
if route_tables:
route_table = route_tables[0]
return route_table
def get_route_table_by_tags(vpc_conn, vpc_id, tags):
count = 0
route_table = None
route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id})
for table in route_tables:
this_tags = get_resource_tags(vpc_conn, table.id)
if tags_match(tags, this_tags):
route_table = table
count += 1
if count > 1:
raise RuntimeError("Tags provided do not identify a unique route table")
else:
return route_table
def route_spec_matches_route(route_spec, route):
key_attr_map = {
'destination_cidr_block': 'destination_cidr_block',
'gateway_id': 'gateway_id',
'instance_id': 'instance_id',
'interface_id': 'interface_id',
'vpc_peering_connection_id': 'vpc_peering_connection_id',
}
# This is a workaround to catch managed NAT gateways as they do not show
# up in any of the returned values when describing route tables.
# The caveat of doing it this way is that if there was an existing
# route for another nat gateway in this route table there is not a way to
# change to another nat gateway id. Long term solution would be to utilise
# boto3 which is a very big task for this module or to update boto.
if route_spec.get('gateway_id') and 'nat-' in route_spec['gateway_id']:
if route.destination_cidr_block == route_spec['destination_cidr_block']:
if all((not route.gateway_id, not route.instance_id, not route.interface_id, not route.vpc_peering_connection_id)):
return True
for k in key_attr_map:
if k in route_spec:
if route_spec[k] != getattr(route, k):
return False
return True
def route_spec_matches_route_cidr(route_spec, route):
cidr_attr = 'destination_cidr_block'
return route_spec[cidr_attr] == getattr(route, cidr_attr)
def rename_key(d, old_key, new_key):
d[new_key] = d[old_key]
del d[old_key]
def index_of_matching_route(route_spec, routes_to_match):
for i, route in enumerate(routes_to_match):
if route_spec_matches_route(route_spec, route):
return i
elif route_spec_matches_route_cidr(route_spec, route):
return "replace"
def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids,
check_mode, purge_routes):
routes_to_match = list(route_table.routes)
route_specs_to_create = []
route_specs_to_recreate = []
for route_spec in route_specs:
i = index_of_matching_route(route_spec, routes_to_match)
if i is None:
route_specs_to_create.append(route_spec)
elif i == "replace":
route_specs_to_recreate.append(route_spec)
else:
del routes_to_match[i]
# NOTE: As of boto==2.38.0, the origin of a route is not available
# (for example, whether it came from a gateway with route propagation
# enabled). Testing for origin == 'EnableVgwRoutePropagation' is more
# correct than checking whether the route uses a propagating VGW.
# The current logic will leave non-propagated routes using propagating
# VGWs in place.
routes_to_delete = []
if purge_routes:
for r in routes_to_match:
if r.gateway_id:
if r.gateway_id != 'local' and not r.gateway_id.startswith('vpce-'):
if not propagating_vgw_ids or r.gateway_id not in propagating_vgw_ids:
routes_to_delete.append(r)
else:
routes_to_delete.append(r)
changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate)
if changed:
for route in routes_to_delete:
try:
vpc_conn.delete_route(route_table.id,
route.destination_cidr_block,
dry_run=check_mode)
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
pass
for route_spec in route_specs_to_create:
try:
vpc_conn.create_route(route_table.id,
dry_run=check_mode,
**route_spec)
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
pass
for route_spec in route_specs_to_recreate:
if not check_mode:
vpc_conn.replace_route(route_table.id,
**route_spec)
return {'changed': bool(changed)}
def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id,
check_mode):
route_tables = vpc_conn.get_all_route_tables(
filters={'association.subnet_id': subnet_id, 'vpc_id': vpc_id}
)
for route_table in route_tables:
if route_table.id is None:
continue
for a in route_table.associations:
if a.main:
continue
if a.subnet_id == subnet_id:
if route_table.id == route_table_id:
return {'changed': False, 'association_id': a.id}
else:
if check_mode:
return {'changed': True}
vpc_conn.disassociate_route_table(a.id)
association_id = vpc_conn.associate_route_table(route_table_id, subnet_id)
return {'changed': True, 'association_id': association_id}
def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets,
check_mode, purge_subnets):
current_association_ids = [a.id for a in route_table.associations if not a.main]
new_association_ids = []
changed = False
for subnet in subnets:
result = ensure_subnet_association(
vpc_conn, vpc_id, route_table.id, subnet.id, check_mode)
changed = changed or result['changed']
if changed and check_mode:
return {'changed': True}
new_association_ids.append(result['association_id'])
if purge_subnets:
to_delete = [a_id for a_id in current_association_ids
if a_id not in new_association_ids]
for a_id in to_delete:
changed = True
vpc_conn.disassociate_route_table(a_id, dry_run=check_mode)
return {'changed': changed}
def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids,
check_mode):
# NOTE: As of boto==2.38.0, it is not yet possible to query the existing
# propagating gateways. However, EC2 does support this as shown in its API
# documentation. For now, a reasonable proxy for this is the presence of
# propagated routes using the gateway in the route table. If such a route
# is found, propagation is almost certainly enabled.
changed = False
for vgw_id in propagating_vgw_ids:
for r in list(route_table.routes):
if r.gateway_id == vgw_id:
return {'changed': False}
changed = True
vpc_conn.enable_vgw_route_propagation(route_table.id,
vgw_id,
dry_run=check_mode)
return {'changed': changed}
def ensure_route_table_absent(connection, module):
lookup = module.params.get('lookup')
route_table_id = module.params.get('route_table_id')
tags = module.params.get('tags')
vpc_id = module.params.get('vpc_id')
purge_subnets = module.params.get('purge_subnets')
if lookup == 'tag':
if tags is not None:
try:
route_table = get_route_table_by_tags(connection, vpc_id, tags)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'tag': {0}".format(e.message),
exception=traceback.format_exc())
except RuntimeError as e:
module.fail_json(msg=e.args[0], exception=traceback.format_exc())
else:
route_table = None
elif lookup == 'id':
try:
route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'id': {0}".format(e.message),
exception=traceback.format_exc())
if route_table is None:
return {'changed': False}
# disassociate subnets before deleting route table
ensure_subnet_associations(connection, vpc_id, route_table, [], module.check_mode, purge_subnets)
try:
connection.delete_route_table(route_table.id, dry_run=module.check_mode)
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
pass
else:
module.fail_json(msg="Error deleting route table: {0}".format(e.message),
exception=traceback.format_exc())
return {'changed': True}
def get_route_table_info(route_table):
# Add any routes to array
routes = []
for route in route_table.routes:
routes.append(route.__dict__)
route_table_info = {'id': route_table.id,
'routes': routes,
'tags': route_table.tags,
'vpc_id': route_table.vpc_id}
return route_table_info
def create_route_spec(connection, module, vpc_id):
routes = module.params.get('routes')
for route_spec in routes:
rename_key(route_spec, 'dest', 'destination_cidr_block')
if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
igw = find_igw(connection, vpc_id)
route_spec['gateway_id'] = igw
return routes
def ensure_route_table_present(connection, module):
lookup = module.params.get('lookup')
propagating_vgw_ids = module.params.get('propagating_vgw_ids')
purge_routes = module.params.get('purge_routes')
purge_subnets = module.params.get('purge_subnets')
route_table_id = module.params.get('route_table_id')
subnets = module.params.get('subnets')
tags = module.params.get('tags')
vpc_id = module.params.get('vpc_id')
try:
routes = create_route_spec(connection, module, vpc_id)
except AnsibleIgwSearchException as e:
module.fail_json(msg="Failed to find the Internet gateway for the given VPC ID {0}: {1}".format(vpc_id, e[0]),
exception=traceback.format_exc())
changed = False
tags_valid = False
if lookup == 'tag':
if tags is not None:
try:
route_table = get_route_table_by_tags(connection, vpc_id, tags)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'tag': {0}".format(e.message),
exception=traceback.format_exc())
except RuntimeError as e:
module.fail_json(msg=e.args[0], exception=traceback.format_exc())
else:
route_table = None
elif lookup == 'id':
try:
route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
except EC2ResponseError as e:
module.fail_json(msg="Error finding route table with lookup 'id': {0}".format(e.message),
exception=traceback.format_exc())
# If no route table returned then create new route table
if route_table is None:
try:
route_table = connection.create_route_table(vpc_id, module.check_mode)
changed = True
except EC2ResponseError as e:
if e.error_code == 'DryRunOperation':
module.exit_json(changed=True)
module.fail_json(msg="Failed to create route table: {0}".format(e.message),
exception=traceback.format_exc())
if routes is not None:
try:
result = ensure_routes(connection, route_table, routes,
propagating_vgw_ids, module.check_mode,
purge_routes)
changed = changed or result['changed']
except EC2ResponseError as e:
module.fail_json(msg="Error while updating routes: {0}".format(e.message),
exception=traceback.format_exc())
if propagating_vgw_ids is not None:
result = ensure_propagation(connection, route_table,
propagating_vgw_ids,
check_mode=module.check_mode)
changed = changed or result['changed']
if not tags_valid and tags is not None:
result = ensure_tags(connection, route_table.id, tags,
add_only=True, check_mode=module.check_mode)
route_table.tags = result['tags']
changed = changed or result['changed']
if subnets:
associated_subnets = []
try:
associated_subnets = find_subnets(connection, vpc_id, subnets)
except EC2ResponseError as e:
raise AnsibleRouteTableException(
message='Unable to find subnets for route table {0}, error: {1}'
.format(route_table, e),
error_traceback=traceback.format_exc()
)
try:
result = ensure_subnet_associations(connection, vpc_id, route_table,
associated_subnets,
module.check_mode,
purge_subnets)
changed = changed or result['changed']
except EC2ResponseError as e:
raise AnsibleRouteTableException(
message='Unable to associate subnets for route table {0}, error: {1}'
.format(route_table, e),
error_traceback=traceback.format_exc()
)
module.exit_json(changed=changed, route_table=get_route_table_info(route_table))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
lookup=dict(default='tag', required=False, choices=['tag', 'id']),
propagating_vgw_ids=dict(default=None, required=False, type='list'),
purge_routes=dict(default=True, type='bool'),
purge_subnets=dict(default=True, type='bool'),
route_table_id=dict(default=None, required=False),
routes=dict(default=[], required=False, type='list'),
state=dict(default='present', choices=['present', 'absent']),
subnets=dict(default=None, required=False, type='list'),
tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
vpc_id=dict(default=None, required=True)
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
lookup = module.params.get('lookup')
route_table_id = module.params.get('route_table_id')
state = module.params.get('state', 'present')
if lookup == 'id' and route_table_id is None:
module.fail_json(msg="You must specify route_table_id if lookup is set to id")
try:
if state == 'present':
result = ensure_route_table_present(connection, module)
elif state == 'absent':
result = ensure_route_table_absent(connection, module)
except AnsibleRouteTableException as e:
if e.error_traceback:
module.fail_json(msg=e.message, exception=e.error_traceback)
module.fail_json(msg=e.message)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
marcioreyes/bgui | bgui/text_input.py | 6 | 15841 | """
This module defines the following constants:
*InputText options*
* BGUI_INPUT_NONE = 0
* BGUI_INPUT_SELECT_ALL = 1
* BGUI_INPUT_DEFAULT = BGUI_INPUT_NONE
"""
from .widget import Widget, WeakMethod, BGUI_DEFAULT, BGUI_CENTERY, \
BGUI_NO_FOCUS, BGUI_MOUSE_ACTIVE, BGUI_MOUSE_CLICK, BGUI_MOUSE_RELEASE, \
BGUI_NO_NORMALIZE
from .key_defs import *
from .label import Label
from .frame import Frame
import time
# InputText options
BGUI_INPUT_NONE = 0
BGUI_INPUT_SELECT_ALL = 1
BGUI_INPUT_DEFAULT = BGUI_INPUT_NONE
class TextInput(Widget):
"""Widget for getting text input"""
theme_section = 'TextInput'
theme_options = {
'TextColor': (1, 1, 1, 1),
'FrameColor': (0, 0, 0, 0),
'BorderSize': 0,
'BorderColor': (0, 0, 0, 0),
'HighlightColor': (0.6, 0.6, 0.6, 0.5),
'InactiveTextColor': (1, 1, 1, 1),
'InactiveFrameColor': (0, 0, 0, 0),
'InactiveBorderSize': 0,
'InactiveBorderColor': (0, 0, 0, 0),
'InactiveHighlightColor': (0.6, 0.6, 0.6, 0.5),
'LabelSubTheme': '',
}
def __init__(self, parent, name=None, text="", prefix="", font=None, pt_size=None, color=None,
aspect=None, size=[1, 1], pos=[0, 0], sub_theme='', input_options=BGUI_INPUT_DEFAULT, options=BGUI_DEFAULT):
"""
:param parent: the widget's parent
:param name: the name of the widget
:param text: the text to display (this can be changed later via the text property)
:param prefix: prefix text displayed before user input, cannot be edited by user (this can be changed later via the prefix property)
:param font: the font to use
:param pt_size: the point size of the text to draw
:param color: color of the font for this widget
:param aspect: constrain the widget size to a specified aspect ratio
:param size: a tuple containing the width and height
:param pos: a tuple containing the x and y position
:param sub_theme: name of a sub_theme defined in the theme file (similar to CSS classes)
:param options: various other options
"""
Widget.__init__(self, parent, name, aspect, size, pos, sub_theme, options)
self.text_prefix = prefix
self.pos = len(text)
self.input_options = input_options
self.colors = {}
#create widgets
self.frame = Frame(self, size=[1, 1], options=BGUI_NO_FOCUS | BGUI_DEFAULT | BGUI_CENTERY)
self.highlight = Frame(self, size=self.frame.size, border=0, options=BGUI_NO_FOCUS | BGUI_CENTERY | BGUI_NO_NORMALIZE)
self.cursor = Frame(self, size=[1, 1], border=0, options=BGUI_NO_FOCUS | BGUI_CENTERY | BGUI_NO_NORMALIZE)
self.label = Label(self, text=text, font=font, pt_size=pt_size, sub_theme=self.theme['LabelSubTheme'], options=BGUI_NO_FOCUS | BGUI_DEFAULT)
#Color and setting initialization
self.colormode = 0
theme = self.theme
self.colors["text"] = [None, None]
self.colors["text"][0] = theme['InactiveTextColor']
self.colors["text"][1] = theme['TextColor']
self.colors["frame"] = [None, None]
self.colors["frame"][0] = theme['InactiveFrameColor']
self.colors["frame"][1] = theme['FrameColor']
self.colors["border"] = [None, None]
self.colors["border"][0] = theme['InactiveBorderColor']
self.colors["border"][1] = theme['BorderColor']
self.colors["highlight"] = [None, None]
self.colors["highlight"][0] = theme['HighlightColor']
self.colors["highlight"][1] = theme['HighlightColor']
self.border_size = [None, None]
self.border_size[0] = theme['InactiveBorderSize']
self.border_size[1] = theme['BorderSize']
self.swapcolors(0)
#gauge height of the drawn font
fd = self.system.textlib.dimensions(self.label.fontid, "Egj/}|^,")
py = .5 - (fd[1] / self.size[1] / 2)
px = fd[1] / self.size[0] - fd[1] / 1.5 / self.size[0]
self.label.position = [px, py]
self.fd = self.system.textlib.dimensions(self.label.fontid, self.text_prefix)[0] + fd[1] / 3.2
self.frame.size = [1, 1]
self.frame.position = [0, 0]
self.slice = [len(text), len(text)]
self.slice_direction = 0
self.mouse_slice_start = 0
self.mouse_slice_end = 0
#create the char width list
self._update_char_widths()
#initial call to update_selection
self.selection_refresh = 1
self.just_activated = 0
self._active = 0 # internal active state to avoid confusion from parent active chain
#blinking cursor
self.time = time.time()
#double/triple click functionality
self.click_counter = 0
self.single_click_time = 0.0
self.double_click_time = 0.0
# On Enter callback
self._on_enter_key = None
@property
def text(self):
return self.label.text
@text.setter
def text(self, value):
#setter intended for external access, internal changes can just change self.label.text
self.label.text = value
self._update_char_widths()
self.slice = [0, 0]
self.update_selection()
@property
def prefix(self):
return self.text_prefix
@prefix.setter
def prefix(self, value):
self.fd = self.system.textlib.dimensions(self.label.fontid, value)[0] + fd[1] / 3.2
self.text_prefix = value
@property
def on_enter_key(self):
"""A callback for when the enter key is pressed while the TextInput has focus"""
return self._on_enter_key
@on_enter_key.setter
def on_enter_key(self, value):
self._on_enter_key = WeakMethod(value)
#utility functions
def _update_char_widths(self):
self.char_widths = []
for char in self.text:
self.char_widths.append(self.system.textlib.dimensions(self.label.fontid, char * 20)[0] / 20)
def select_all(self):
"""Change the selection to include all of the text"""
self.slice = [0, len(self.text)]
self.update_selection()
def select_none(self):
"""Change the selection to include none of the text"""
self.slice = [0, 0]
self.update_selection()
#Activation Code
def activate(self):
if self.frozen:
return
self.system.focused_widget = self
self.swapcolors(1)
self.colormode = 1
if self.input_options & BGUI_INPUT_SELECT_ALL:
self.slice = [0, len(self.text)]
self.slice_direction = -1
self.just_activated = 1
self._active = 1
def deactivate(self):
self.system.focused_widget = self.system
self.swapcolors(0)
self.colormode = 0
self.just_activated = 0
self._active = 0
def swapcolors(self, state=0): # 0 inactive 1 active
self.frame.colors = [self.colors["frame"][state]] * 4
self.frame.border = self.border_size[state]
self.frame.border_color = self.colors["border"][state]
self.highlight.colors = [self.colors["highlight"][state]] * 4
self.label.color = self.colors["text"][state]
if state == 0:
self.cursor.colors = [[0.0, 0.0, 0.0, 0.0]] * 4
else:
self.cursor.colors = [self.colors["text"][state]] * 4
#Selection Code
def update_selection(self):
left = self.fd + self.system.textlib.dimensions(self.label.fontid, self.text[:self.slice[0]])[0]
right = self.fd + self.system.textlib.dimensions(self.label.fontid, self.text[:self.slice[1]])[0]
self.highlight.position = [left, 1]
self.highlight.size = [right - left, self.frame.size[1] * .8]
if self.slice_direction in [0, -1]:
self.cursor.position = [left, 1]
else:
self.cursor.position = [right, 1]
self.cursor.size = [2, self.frame.size[1] * .8]
def find_mouse_slice(self, pos):
cmc = self.calc_mouse_cursor(pos)
mss = self.mouse_slice_start
self.mouse_slice_end = cmc
if cmc < mss:
self.slice_direction = -1
self.slice = [self.mouse_slice_end, self.mouse_slice_start]
elif cmc > mss:
self.slice_direction = 1
self.slice = [self.mouse_slice_start, self.mouse_slice_end]
else:
self.slice_direction = 0
self.slice = [self.mouse_slice_start, self.mouse_slice_start]
self.selection_refresh = 1
def calc_mouse_cursor(self, pos):
adj_pos = pos[0] - (self.position[0] + self.fd)
find_slice = 0
i = 0
for entry in self.char_widths:
if find_slice + entry > adj_pos:
if abs((find_slice + entry) - adj_pos) >= abs(adj_pos - find_slice):
return i
else:
return i + 1
else:
find_slice += entry
i += 1
self.time = time.time() - 0.501
return i
def _handle_mouse(self, pos, event):
"""Extend function's behaviour by providing focus to unfrozen inactive TextInput,
swapping out colors.
"""
if self.frozen:
return
if event == BGUI_MOUSE_CLICK:
self.mouse_slice_start = self.calc_mouse_cursor(pos)
if not self._active:
self.activate()
if not self.input_options & BGUI_INPUT_SELECT_ALL:
self.find_mouse_slice(pos)
elif event == BGUI_MOUSE_ACTIVE:
if not self.just_activated or self.just_activated and not self.input_options & BGUI_INPUT_SELECT_ALL:
self.find_mouse_slice(pos)
if event == BGUI_MOUSE_RELEASE:
self.selection_refresh = 1
if self.slice[0] == self.slice[1]:
self.slice_direction = 0
self.just_activated = 0
#work out single / double / triple clicks
if self.click_counter == 0:
self.single_click_time = time.time()
self.click_counter = 1
elif self.click_counter == 1:
if time.time() - self.single_click_time < .2:
self.click_counter = 2
self.double_click_time = time.time()
words = self.text.split(" ")
i = 0
for entry in words:
if self.slice[0] < i + len(entry):
self.slice = [i, i + len(entry) + 1]
break
i += len(entry) + 1
else:
self.click_counter = 1
self.single_click_time = time.time()
elif self.click_counter == 2:
if time.time() - self.double_click_time < .2:
self.click_counter = 3
self.slice = [0, len(self.text)]
self.slice_direction = -1
else:
self.click_counter = 1
self.single_click_time = time.time()
elif self.click_counter == 3:
self.single_click_time = time.time()
self.click_counter = 1
self.time = time.time()
Widget._handle_mouse(self, pos, event)
def _handle_key(self, key, is_shifted):
"""Handle any keyboard input"""
if self != self.system.focused_widget:
return
# Try char to int conversion for alphanumeric keys... kinda hacky though
try:
key = ord(key)
except:
pass
if is_shifted:
sh = 0 #used for slicing
else:
sh = 1
slice_len = abs(self.slice[0] - self.slice[1])
x, y = 0, 0
if key == BACKSPACEKEY:
if slice_len != 0:
self.label.text = self.text[:self.slice[0]] + self.text[self.slice[1]:]
self.char_widths = self.char_widths[:self.slice[0]] + self.char_widths[self.slice[1]:]
self.slice = [self.slice[0], self.slice[0]]
#handle char length list
elif self.slice[0] > 0:
self.label.text = self.text[:self.slice[0] - 1] + self.text[self.slice[1]:]
self.slice = [self.slice[0] - 1, self.slice[1] - 1]
elif key == DELKEY:
if slice_len != 0:
self.label.text = self.text[:self.slice[0]] + self.text[self.slice[1]:]
self.char_widths = self.char_widths[:self.slice[0]] + self.char_widths[self.slice[1]:]
self.slice = [self.slice[0], self.slice[0]]
elif self.slice[1] < len(self.text):
self.label.text = self.text[:self.slice[0]] + self.text[self.slice[1] + 1:]
elif key == LEFTARROWKEY:
slice_len = abs(self.slice[0] - self.slice[1])
if (self.slice_direction in [-1, 0]):
if is_shifted and self.slice[0] > 0:
self.slice = [self.slice[0] - 1, self.slice[1]]
self.slice_direction = -1
elif is_shifted:
pass
else:
if slice_len > 0:
self.slice = [self.slice[0], self.slice[0]]
elif self.slice[0] > 0:
self.slice = [self.slice[0] - 1, self.slice[0] - 1]
self.slice_direction = 0
elif self.slice_direction == 1:
if is_shifted:
self.slice = [self.slice[0], self.slice[1] - 1]
else:
self.slice = [self.slice[0], self.slice[0]]
if self.slice[0] - self.slice[1] == 0:
self.slice_direction = 0
elif key == RIGHTARROWKEY:
slice_len = abs(self.slice[0] - self.slice[1])
if (self.slice_direction in [1, 0]):
if is_shifted and self.slice[1] < len(self.text):
self.slice = [self.slice[0], self.slice[1] + 1]
self.slice_direction = 1
elif is_shifted:
pass
else:
if slice_len > 0:
self.slice = [self.slice[1], self.slice[1]]
elif self.slice[1] < len(self.text):
self.slice = [self.slice[1] + 1, self.slice[1] + 1]
self.slice_direction = 0
elif self.slice_direction == -1:
if is_shifted:
self.slice = [self.slice[0] + 1, self.slice[1]]
else:
self.slice = [self.slice[1], self.slice[1]]
if self.slice[0] - self.slice[1] == 0:
self.slice_direction = 0
else:
char = None
if ord(AKEY) <= key <= ord(ZKEY):
if is_shifted: char = chr(key - 32)
else: char = chr(key)
elif ord(ZEROKEY) <= key <= ord(NINEKEY):
if not is_shifted: char = chr(key)
else:
key = chr(key)
if key == ZEROKEY: char = ")"
elif key == ONEKEY: char = "!"
elif key == TWOKEY: char = "@"
elif key == THREEKEY: char = "#"
elif key == FOURKEY: char = "$"
elif key == FIVEKEY: char = "%"
elif key == SIXKEY: char = "^"
elif key == SEVENKEY: char = "&"
elif key == EIGHTKEY: char = "*"
elif key == NINEKEY: char = "("
elif PAD0 <= key <= PAD9:
char = str(key - PAD0)
elif key == PADPERIOD: char = "."
elif key == PADSLASHKEY: char = "/"
elif key == PADASTERKEY: char = "*"
elif key == PADMINUS: char = "-"
elif key == PADPLUSKEY: char = "+"
elif key == SPACEKEY: char = " "
#elif key == TABKEY: char = "\t"
elif key in (ENTERKEY, PADENTER):
if self.on_enter_key:
self.on_enter_key(self)
elif not is_shifted:
if key == ACCENTGRAVEKEY: char = "`"
elif key == MINUSKEY: char = "-"
elif key == EQUALKEY: char = "="
elif key == LEFTBRACKETKEY: char = "["
elif key == RIGHTBRACKETKEY: char = "]"
elif key == BACKSLASHKEY: char = "\\"
elif key == SEMICOLONKEY: char = ";"
elif key == QUOTEKEY: char = "'"
elif key == COMMAKEY: char = ","
elif key == PERIODKEY: char = "."
elif key == SLASHKEY: char = "/"
else:
if key == ACCENTGRAVEKEY: char = "~"
elif key == MINUSKEY: char = "_"
elif key == EQUALKEY: char = "+"
elif key == LEFTBRACKETKEY: char = "{"
elif key == RIGHTBRACKETKEY: char = "}"
elif key == BACKSLASHKEY: char = "|"
elif key == SEMICOLONKEY: char = ":"
elif key == QUOTEKEY: char = '"'
elif key == COMMAKEY: char = "<"
elif key == PERIODKEY: char = ">"
elif key == SLASHKEY: char = "?"
if char:
#need option to limit text to length of box
#need to replace all selected text with new char
#need copy place somewhere
self.label.text = self.text[:self.slice[0]] + char + self.text[self.slice[1]:]
self.char_widths = self.char_widths[:self.slice[0]] + [self.system.textlib.dimensions(self.label.fontid, char * 20)[0] / 20] + self.char_widths[self.slice[1]:]
self.slice = [self.slice[0] + 1, self.slice[0] + 1]
self.slice_direction = 0
#update selection widgets after next draw call
self.selection_refresh = 1
#ensure cursor is not hidden
self.time = time.time()
def _draw(self):
temp = self.text
self.label.text = self.text_prefix + temp
if self == self.system.focused_widget and self._active == 0:
self.activate()
# Now draw the children
Widget._draw(self)
self.label.text = temp
if self.colormode == 1 and self.system.focused_widget != self:
self._active = 0
self.swapcolors(0)
self.virgin = 1
self.colormode = 0
#selection code needs to be called after draw, which is tracked internally to TextInput
if self.selection_refresh == 1:
self.update_selection()
self.selection_refresh = 0
#handle blinking cursor
if self.slice[0] - self.slice[1] == 0 and self._active:
if time.time() - self.time > 1.0:
self.time = time.time()
elif time.time() - self.time > 0.5:
self.cursor.colors = [[0.0, 0.0, 0.0, 0.0]] * 4
else:
self.cursor.colors = [self.colors["text"][1]] * 4
else:
self.cursor.colors = [[0.0, 0.0, 0.0, 0.0]] * 4
| mit |
jrha/aquilon | lib/python2.6/aquilon/worker/commands/del_room.py | 4 | 1168 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq del room`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.del_location import CommandDelLocation
class CommandDelRoom(CommandDelLocation):
required_parameters = ["room"]
def render(self, session, room, **arguments):
return CommandDelLocation.render(self, session=session, name=room,
type='room', **arguments)
| apache-2.0 |
hongzhouye/frankenstein | pyscf_be/pysd_atom.py | 1 | 33331 | """
PySCF-based Schmidt decomposition for Atom-based fragments
"""
import os
import time
import numpy as np
import scipy.linalg as slg
import h5py
import multiprocessing as mtproc
from pyscf import scf
from frankenstein.pyscf_be.fragpart import PART
from frankenstein.pyscf_be.pysd import (schmidt_decomposition_pyrhf, pySD,
get_lao)
from frankenstein.tools.io_utils import prtvar, prthd
from frankenstein import molecule
from frankenstein.pyscf_be.ao2mo_nibath import ao2mo_nibath_fast_h5
from frankenstein.tools.tensor_utils import (get_cano_orth_mat, symm_orth,
dot_gen, gs_orth)
from frankenstein.pyscf_be.fast_ao2mo import ao2mo_be, veff_build_be
class NotFoundError(Exception):
pass
def get_atom_SO(pymf, msites, Clao, S):
return schmidt_decomposition_pyrhf(pymf, msites, Clao, S=S,
skip_TE=True)[:2]
def get_atom_SOs(pymf, part, S):
nf_by_atom = [None] * part.nmotif
Ts_atom = [None] * part.nmotif
for m in range(part.nmotif):
msites = part.get_motifsites(m)
nf_by_atom[m], Ts_atom[m] = get_atom_SO(pymf, msites, part.Clao, S)
return nf_by_atom, Ts_atom
def get_atom_SO_(m):
pass
def get_atom_SOs_mp(nproc, pymf, part, S):
global get_atom_SO_
def get_atom_SO_(m):
msites = part.get_motifsites(m)
nf, T = get_atom_SO(pymf, msites, part.Clao, S)
return nf, T
with mtproc.Pool(nproc) as pool:
res = pool.map(get_atom_SO_, range(part.nmotif))
nf_by_atom = [r[0] for r in res]
Ts_atom = [r[1] for r in res]
return nf_by_atom, Ts_atom
def get_frag_SO(pymf, fsites, Clao, S, Ttil, nfs, nbs, orb_idx_by_atom,
atom_idx_F, thr_bath, verbose=0):
nao = S.shape[0]
rets = schmidt_decomposition_pyrhf(pymf, fsites, Clao, S=S,
skip_TE=True, ret_Cs=True)
nf_F, T_F = rets[:2]
Cs = rets[-1]
Stil = Ttil.T @ S @ Ttil
etil, util = np.linalg.eigh(Stil)
if verbose > 2:
prtvar("Stil eigvals", etil[:5], fmt="{:.3E}")
if np.min(etil) > thr_bath:
if verbose > 2:
prtvar("Orth type", "symm", "{:s}")
Util_F = (util*(1./etil)) @ util.T @ Ttil.T @ \
S @ T_F
C_F = T_F.T @ S @ Cs
else:
if verbose > 2:
prtvar("Orth type", "cano", "{:s}")
atom_idx_F = atom_idx_F
nf_tot = sum(nfs)
nb_tot = sum(nbs)
idxf = [i for a in atom_idx_F for i in orb_idx_by_atom[a]]
idxb = [i for i in range(nao) if not i in idxf]
# re-order columns --> group Tf and Tb
Rmat = get_Rmat(nfs, nbs)
# zero frag-bath bloc
Ttil_ = Clao.T @ S @ Ttil @ Rmat
Smat = np.eye(nf_tot+nb_tot)
Smat[:nf_tot,nf_tot:] = -Ttil_[idxf,nf_tot:]
Ttil_ = Ttil_ @ Smat
# cano-orth bath orbs
Tbtil_ = Ttil_[:,nf_tot:]
Tmat0 = get_cano_orth_mat(Tbtil_, thr=thr_bath)
Tbtil_ = Tbtil_ @ Tmat0
Tmat = slg.block_diag(np.eye(nf_tot), Tmat0)
nb_ent = Tmat0.shape[1]
if verbose > 2:
prtvar("rank", "%d/%d" % (nb_ent, nb_tot), "{:s}")
# relate atom bath orbs to frag bath orbs
Tb_ = Clao.T @ S @ T_F[:,nf_tot:]
if nb_ent == nb_tot:
Umat = slg.block_diag(np.eye(nf_tot), Tbtil_.T@Tb_)
else:
Sbtil_b_ = Tbtil_.T @ Tb_
ubtil_b_, lbtil_b_, vbtil_b_T = np.linalg.svd(
Sbtil_b_, full_matrices=True)
if not np.allclose(lbtil_b_, np.ones_like(lbtil_b_)):
raise RuntimeError("Atom bath orbs do not fully span frag bath orbs.\n%s"%(str(lbtil_b_)))
Umat = slg.block_diag(np.eye(nf_tot), ubtil_b_)
T_F[:,nf_tot:] = T_F[:,nf_tot:] @ vbtil_b_T.T
Util_F = Rmat @ Smat @ Tmat @ Umat
C_F = T_F.T @ S @ Cs
return nf_F, T_F, Util_F, C_F
def get_frag_SOs(pymf, part, nf_by_atom, T_by_atom, S, thr_bath, verbose):
atom_idx_by_frag = [frag.indlist for frag in part.fraglist]
orb_idx_by_atom = [part.get_motifsites(m) for m in range(part.nmotif)]
nf_by_frag = [None] * part.nfrag
T_by_frag = [None] * part.nfrag
Util_by_frag = [None] * part.nfrag
C_by_frag = [None] * part.nfrag
for F in range(part.nfrag):
if verbose > 2:
prtvar("Fragment", F, "{:d}")
fsites = part.get_fragsites(F)
atom_idx_F = atom_idx_by_frag[F]
Ttil_F = np.hstack([T_by_atom[a] for a in atom_idx_F])
nfs_F = [nf_by_atom[a] for a in atom_idx_F]
nbs_F = [T_by_atom[a].shape[1]-nf_by_atom[a] for a in atom_idx_F]
nf_by_frag[F], T_by_frag[F], Util_by_frag[F], C_by_frag[F] = \
get_frag_SO(pymf, fsites, part.Clao, S, Ttil_F, nfs_F, nbs_F,
orb_idx_by_atom, atom_idx_F, thr_bath, verbose)
if verbose > 2:
print(flush=True)
return nf_by_frag, T_by_frag, Util_by_frag, C_by_frag
def get_frag_SO_(F):
pass
def get_frag_SOs_mp(nproc, pymf, part, nf_by_atom, T_by_atom, S, thr_bath,
verbose):
with mtproc.Manager() as manager:
nf_by_atom = manager.list(nf_by_atom)
atom_idx_by_frag_shared = manager.list(
[frag.indlist for frag in part.fraglist])
orb_idx_by_atom_shared = manager.list(
[part.get_motifsites(m) for m in range(part.nmotif)])
T_by_atom_shared = manager.list(T_by_atom)
Clao_shared = mtproc.Array("d", part.Clao.ravel())
Clao_shape = part.Clao.shape
global get_frag_SO_
def get_frag_SO_(F):
fsites = part.get_fragsites(F)
atom_idx_F = atom_idx_by_frag_shared[F]
nfs_F = [nf_by_atom[a] for a in atom_idx_F]
Ttil_F = np.hstack([T_by_atom_shared[a] for a in atom_idx_F])
nbs_F = [T_by_atom_shared[a].shape[1]-nf_by_atom[a]
for a in atom_idx_F]
Clao = np.frombuffer(Clao_shared.get_obj()).reshape(*Clao_shape)
nf_F, T_F, Util_F, C_F = get_frag_SO(pymf, fsites, Clao, S,
Ttil_F, nfs_F, nbs_F, orb_idx_by_atom_shared, atom_idx_F,
thr_bath, verbose=0)
print("frag %d/%d done" % (F, part.nfrag), flush=True)
return nf_F, T_F, Util_F, C_F
with mtproc.Pool(nproc) as pool:
res = pool.map(get_frag_SO_, range(part.nfrag))
nf_by_frag = [r[0] for r in res]
T_by_frag = [r[1] for r in res]
Util_by_frag = [r[2] for r in res]
C_by_frag = [r[3] for r in res]
return nf_by_frag, T_by_frag, Util_by_frag, C_by_frag
def get_atfg_SOs(pymf, part, thr_bath, S=None, nproc=1, verbose=0):
if verbose > 2:
hstr = "Get atom & fragment Schmidt orbitals"
prthd(hstr)
if S is None: S = pymf.get_ovlp()
if nproc == 1:
nf_by_atom, T_by_atom = get_atom_SOs(pymf, part, S)
nf_by_frag, T_by_frag, Util_by_frag, C_by_frag = get_frag_SOs(
pymf, part, nf_by_atom, T_by_atom, S, thr_bath, verbose)
else:
nf_by_atom, T_by_atom = get_atom_SOs_mp(nproc, pymf, part, S)
nf_by_frag, T_by_frag, Util_by_frag, C_by_frag = get_frag_SOs_mp(
nproc, pymf, part, nf_by_atom, T_by_atom, S, thr_bath, verbose)
if verbose > 2:
print("-"*(len(hstr)+4)+"\n", flush=True)
return (nf_by_atom, T_by_atom, nf_by_frag, T_by_frag, Util_by_frag,
C_by_frag)
def get_Rmat(nfs, nbs):
nf_all = sum(nfs)
nb_all = sum(nbs)
natom_F = len(nfs)
aa = np.zeros([nf_all+nb_all,nf_all])
row_shift = 0
col_shift = 0
for a in range(natom_F):
aa[row_shift:row_shift+nfs[a],col_shift:col_shift+nfs[a]] = \
np.eye(nfs[a])
row_shift += nfs[a] + nbs[a]
col_shift += nfs[a]
bb = np.zeros([nf_all+nb_all,nb_all])
row_shift = 0
col_shift = 0
for a in range(natom_F):
row_shift += nfs[a]
bb[row_shift:row_shift+nbs[a],col_shift:col_shift+nbs[a]] = \
np.eye(nbs[a])
row_shift += nbs[a]
col_shift += nbs[a]
return np.hstack([aa,bb])
def kernel_mos_frag_(F):
pass
class pySDATOM:
"""
"""
def __init__(self, pymf, natom_per_frag, Clao=None, Ccore=None, **kwargs):
"""Constructor of pySDATOM
"""
if not isinstance(pymf, scf.hf.RHF):
raise ValueError("pymf must be a PySCF RHF instance.")
# PySCF automatically computes eri when memory is enough
# We do not want this as it gets copied in multiprocessing
if not pymf._eri is None:
pymf._eri = None
self.pymf = pymf
self.natom_per_frag = natom_per_frag
self.Clao = Clao
self.Ccore = Ccore
# these properties will be set value by "kernel"
self.part = None
self._msds = []
# these properties can be set at initialization
# for partition
self.matchtypelist = [["intra", "1epop"]]
self.bL_dict = None
self.maxent_lao = False
self.democratic = True
self.democratic_tol = None
self.frzcore = True
self.frzcore_mo = False
self.nonovlp = False
self.motif_conn_base_inp = None
self.atomlist_by_motif_inp = None
self.badlist_inp = None
self.goodlist_inp = None
self.regular = False
self.by_dist = False
self.by_bond = False
self.keep_symm = False
self.keep_even = False
self.intermatch_by_score = True
# for Schmidt decomposition
self.cutoff = 1.E-5 # thresh for unentangled bath
self.frag_eye = True # set coeff of frag-frag block to identity?
self.addvirt = True # complete entangled bath from virtual space?
self.balance_fb = True # force Nf == Nb?
self.incore = True # hold integral incore when solving impurity?
self.nibath = False # using non-interacting bath?
self.scr_path = None # path to scratch dir for storing ERIs
self._Vsfile = None # name of file for storing ERIs (type: h5py)
self._Vsinter = None # name of file for storing intermediates
self.Vsnames = [] # dataname for each fragment in the ERI file
self._swpfile = None
# for fast ao2mo
self.fast_ao2mo = False
self.thr_bath = 1.E-6
self.use_schwz_shlpr = False
self.thr_shlpr = 1.E-10
self.thr_shlpr_core = None
self.thr_schwz = 1.E-10
self.max_disk = 50000 # in MB
self.max_mem = 4000 # in MB
self.auxbasis = None # auxiliary basis
self.nproc = 1 # multiprocessing
self.verbose = pymf.verbose
self.__dict__.update(kwargs)
if self.thr_shlpr_core is None:
# a relatively conservative one
self.thr_shlpr_core = min(self.thr_shlpr, 1.E-6)
# initialize
self.initialize()
# build
self.kernel()
# print SD info
self.__str__()
def __str__(self):
if self.verbose > 0:
hstr = "pySDATOM : A class for atom-based Schmidt decomposition"
hlen = len(hstr) + 4
print("=" * hlen, flush=True)
print(" {:s}".format(hstr), flush=True)
print("-" * (hlen), flush=True)
prtvar("cutoff", self.cutoff, "{:.2E}")
prtvar("frag_eye", self.frag_eye, None)
prtvar("addvirt", self.addvirt, None)
prtvar("balance_fb", self.balance_fb, None)
prtvar("incore", self.incore, None)
prtvar("nibath", self.nibath, None)
print("", flush=True)
prtvar("# of frag orbs", self.nf_by_frag, "{:d}")
prtvar("# of bath orbs", self.nb_by_frag, "{:d}")
prtvar("# of env orbs", self.nenv_by_frag, "{:d}")
prtvar("# of Schmidt orbs", self.nsao_by_frag, "{:d}")
prtvar("# of active SOs", self.nact_by_frag, "{:d}")
prtvar("Vsfile", self.Vsfile, "{:s}")
prtvar("Vsnames", self.Vsnames, "{:s}")
prtvar("required disk [GB]", self.disk_space, "{:.3f}")
print("", flush=True)
t_sd = self.t_sd if hasattr(self, "t_sd") \
else np.sum([self[i].t_sd for i in range(self.nfrag)])
t_core = self.t_core if hasattr(self, "t_core") \
else np.sum([self[i].t_core for i in range(self.nfrag)])
t_mos = self.t_mos if hasattr(self, "t_mos") \
else np.sum([self[i].t_mos for i in range(self.nfrag)])
t_xform = self.t_xform if hasattr(self, "t_xform") \
else np.sum([self[i].t_xform for i in range(self.nfrag)])
t_tot = t_sd + t_core + t_mos + t_xform
prtvar("t_wall (sd)", t_sd, "{:.6f}")
prtvar("t_wall (core)", t_core, "{:.6f}")
prtvar("t_wall (mos)", t_mos, "{:.6f}")
prtvar("t_wall (xform)", t_xform, "{:.6f}")
prtvar("t_wall (sd-all)", t_tot, "{:.6f}")
print("-" * (hlen), flush=True)
print("", flush=True)
return ""
# properties
@property
def ncore(self):
return 0 if self.Ccore is None else self.Ccore.shape[1]
@property
def nlao(self):
return self.Clao.shape[1]
@property
def nocc(self):
return self.pymf.mol.nelectron//2 - self.ncore
@property
def nfrag(self):
return self.part.nfrag
@property
def Vsfile(self):
return "/".join([self.scr_path, self._Vsfile])
@property
def swpfile(self):
return "/".join([self.scr_path, self._swpfile])
@property
def nf_by_frag(self):
return [self[i].nf for i in range(self.nfrag)]
@property
def nb_by_frag(self):
return [self[i].nb for i in range(self.nfrag)]
@property
def nenv_by_frag(self):
return [self[i].nenv for i in range(self.nfrag)]
@property
def nsao_by_frag(self):
return (np.asarray(self.nf_by_frag) +
np.asarray(self.nb_by_frag)).tolist()
@property
def nact_by_frag(self):
return [self[i].nact for i in range(self.nfrag)]
@property
def disk_space(self):
return np.sum([(n*(n+1)/2)**2.*8/1024**3. for n in self.nact_by_frag])
@property
def max_mem_percore(self):
return self.max_mem / float(self.nproc)
@property
def max_mem_percore_safe(self):
return self.max_mem_percore * 0.8
def __getitem__(self, key):
return self._msds[key]
def delete_erifile(self):
if os.path.isfile(self.Vsfile):
if self.verbose > 0:
print("""Removing file "{:s}".""".format(self.Vsfile))
os.remove(self.Vsfile)
def initialize(self):
if self.scr_path is None:
try:
from frankenstein.default_params import scr_path
except:
scr_path = None
if scr_path is None:
scr_path = os.getcwd()
self.scr_path = scr_path
def random_char(n):
import random
import string
return ''.join(random.choice(string.ascii_letters)
for x in range(n))
tmplabel = random_char(5)
if self._Vsfile is None:
self._Vsfile = "__eris{:s}.h5".format(tmplabel)
if self._Vsinter is None:
self._Vsinter = "__eris_inter{:s}.h5".format(tmplabel)
if self._swpfile is None:
self._swpfile = "__swp{:s}.h5".format(tmplabel)
def kernel(self):
# partition LAOs into overlapping fragments of given size
self.part = PART(self.pymf.mol, self.natom_per_frag, self.matchtypelist,
Clao=self.Clao,
Chf=self.pymf.mo_coeff,
maxent_lao=self.maxent_lao,
bL_dict=self.bL_dict,
democratic=self.democratic,
democratic_tol=self.democratic_tol,
nonovlp=self.nonovlp,
frzcore=self.frzcore,
frzcore_mo=self.frzcore_mo,
regular=self.regular,
by_dist=self.by_dist,
by_bond=self.by_bond,
keep_symm=self.keep_symm,
keep_even=self.keep_even,
intermatch_by_score=self.intermatch_by_score,
motif_conn_base_inp=self.motif_conn_base_inp,
atomlist_by_motif_inp=self.atomlist_by_motif_inp,
badlist_inp=self.badlist_inp,
goodlist_inp=self.goodlist_inp)
self.part.__str__(verbose=self.verbose)
# Clao gets reordered in PART. Thus we update it here.
self.Clao = self.part.Clao.copy()
if self.Ccore is None and not self.part.Ccore is None:
self.Ccore = self.part.Ccore.copy()
if self.nlao + self.ncore != self.pymf.mol.nao_nr():
raise ValueError("nlao (%d) + ncore (%d) != nao (%d)" % (
self.nlao, self.ncore, self.pymf.mol.nao_nr()
))
# perform Schmidt decomposition for each fragment
for A in range(self.part.nfrag):
fragsites = self.part.get_fragsites(A)
msd = pySD(self.pymf, fragsites,
Clao=self.Clao,
build=False,
cutoff=self.cutoff,
frag_eye=self.frag_eye,
addvirt=self.addvirt,
balance_fb=self.balance_fb,
incore=self.incore,
nibath=self.nibath,
fraglabel="{:d}".format(A),
Vsfile=self.Vsfile,
swpfile=self.swpfile,
max_mem=self.max_mem_percore_safe,
verbose=self.verbose)
if not self.nibath and self.fast_ao2mo:
msd.Clao = None # free memory
self.Vsnames.append(msd.Vsname)
self._msds.append(msd)
if self.nibath:
self.kernel_nibath()
else:
if self.fast_ao2mo:
self.kernel_ibath_fast()
else:
self.kernel_ibath()
def kernel_nibath(self):
""" For non-interacting bath, there is plenty of room for optimization.
Thus, we write a separate kernel function for it.
"""
assert(self.nibath)
# Schmidt decomposition
for msd in self._msds:
msd.kernel_sd()
# build core and xform ao integrals
start = time.time()
Pbath = self.pymf.make_rdm1()
Gcore = self.pymf.get_veff(dm=Pbath)
for m in self._msds:
m.Gcore = Gcore
m.Gcores = m.T.T @ Gcore @ m.T
m.Ebath = 0. # this is wrong, but fine
end = time.time()
self.t_core = end - start
start = time.time()
orb_idx = [[s for sl in motif.sitelist for s in sl]
for motif in self.part.motiflist]
atom_idx = [frag.indlist for frag in self.part.fraglist]
mol = molecule.MOL(self.pymf.mol.atom, self.pymf.mol.basis)
ao2mo_nibath_fast_h5(mol, self.Clao, atom_idx, orb_idx,
self.Vsfile, self.Vsnames, self.scr_path,
mode=2, # sparse
max_disk=self.max_disk,
max_mem=self.max_mem)
end = time.time()
self.t_xform = end - start
for m in self._msds:
m.remove_dc_Gcore()
m.hs = m.T.T @ m.h @ m.T + m.Gcores
m.kernel_mos()
m.built = True
def kernel_ibath(self):
assert(not self.nibath)
# Schmidt decomposition
for msd in self._msds:
msd.kernel_sd()
# build core and xform ao integrals
start = time.time()
if self.Ccore is None:
Gcore0 = 0.
Ebath0 = 0.
else:
Pcore0 = self.Ccore @ self.Ccore.T * 2.
Gcore0 = self.pymf.get_veff(dm=Pcore0)
Ebath0 = np.einsum("ij,ij->",
Pcore0, 2*self.pymf.get_hcore()+Gcore0) * 0.5
self.Ecore = Ebath0
for m in self._msds:
m.Gcore0 = Gcore0
m.Gcores0 = np.zeros([m.nsao]*2) if isinstance(Gcore0,float) \
else m.T.T @ m.Gcore0 @ m.T
m.Ecore0 = Ebath0
hasbath = np.where([not m.TE is None for m in self._msds])[0]
print("Build pure bath Hamiltonian for the following fragments...\n", flush=True)
print(hasbath, flush=True)
nhasbath = len(hasbath)
if nhasbath == 0:
for m in self._msds:
m.Gcore = 0.
m.Gcores = np.zeros([m.nsao]*2)
m.Ebath = 0.
else:
nao = self.pymf.mol.nao_nr()
Pbath_list = np.empty([nhasbath,nao,nao])
for A,im in enumerate(hasbath):
Pbath_list[A] = self._msds[im].PE*2.
Gcore_list = self.pymf.get_veff(dm=Pbath_list)
A = 0
for im,m in enumerate(self._msds):
if im in hasbath:
m.Gcore = Gcore_list[A]
m.Gcores = m.T.T @ m.Gcore @ m.T
m.Ebath = 0.5 * np.einsum("ij,ji",
2*m.h+m.Gcore, Pbath_list[A])
A += 1
else:
m.Gcore = 0.
m.Gcores = np.zeros([m.nsao]*2)
m.Ebath = 0.
end = time.time()
self.t_core = end - start
start = time.time()
for im,m in enumerate(self._msds):
print("ERI xform for fragment %d..."%im, flush=True)
m.xform_hV()
end = time.time()
self.t_xform = end - start
# determine mo and set built to True
for im,m in enumerate(self._msds):
print("HF for fragment %d..."%im, flush=True)
m.kernel_mos()
m.built = True
def kernel_ibath_fast(self):
assert(not self.nibath)
from frankenstein.tools.perf_utils import current_memory
S = self.pymf.get_ovlp()
h = self.pymf.get_hcore()
# Obtain atom and fragment SOs
# @@HY: note that the pure env orbitals TEs_frag are not required at all! So we do not bother computing them, which save huge memory for large molecules!
print(" Getting atom & fragment Schmidt orbitals...\n", flush=True)
start = time.time()
# check if exists
try:
nf_by_atom, Ts_atom, nf_by_frag, Ts_frag, Utils_frag, Cs_frag = \
self.read_SOs()
print(" Valid atom & fragment SOs are found. Skip SD.\n",
flush=True)
except NotFoundError:
nf_by_atom, Ts_atom, nf_by_frag, Ts_frag, Utils_frag, Cs_frag = \
get_atfg_SOs(self.pymf, self.part, self.thr_bath, S=S,
nproc=self.nproc, verbose=self.verbose)
print(" Saving atom & fragment SOs to file...\n", flush=True)
self.save_SOs(nf_by_atom, Ts_atom, nf_by_frag, Ts_frag,
Utils_frag, Cs_frag)
end = time.time()
self.t_sd = end - start
# fake Schmidt decomposition for each pySD object
atom_idx_by_frag = [frag.indlist for frag in self.part.fraglist]
for F in range(self.nfrag):
m = self._msds[F]
m.nf = nf_by_frag[F]
m._nsao = Ts_frag[F].shape[1]
m._nenv = max(0, self.nocc-m.nf)
m.ssv = None
m.mo_coeff = None
m.mo_coeffs = Cs_frag[F]
m.nact = Utils_frag[F].shape[1]
m._nenv += m._nsao - m.nact
prtvar("Before ERI xform", current_memory(), "{:.2f}", lspace=0)
print(flush=True)
# xform ERI
start = time.time()
# check if exists
if self.check_Vsfile():
if self.verbose > 0:
print(" Valid ERI file is found. Skip AO2MO xform.\n",
flush=True)
else:
feri = self._Vsfile
finter = self._Vsinter
scr_path = self.scr_path
pymol = self.pymf.mol
mol = molecule.MOL_fast(pymol.atom, pymol.basis)
part = self.part
fragnames = self.Vsnames
use_schwz_shlpr = self.use_schwz_shlpr
thr_shlpr = self.thr_shlpr
thr_schwz = self.thr_schwz
max_mem = self.max_mem
max_disk = self.max_disk
auxbasis = self.auxbasis
Ts_atom_ = self.reorder_ao(Ts_atom)
ao2mo_be(
feri, finter, scr_path,
mol, part,
Ts_atom_, Utils_frag, fragnames,
use_schwz_shlpr, thr_shlpr, thr_schwz,
max_mem, max_disk,
auxbasis)
end = time.time()
self.t_xform = end - start
prtvar("After ERI xform", current_memory(), "{:.2f}", lspace=0)
print(flush=True)
# build core
start = time.time()
# check if any fragments have pure env
envlist = [im for im,m in enumerate(self._msds) if m.nenv > 0]
if len(envlist) == 0:
flag_env = False
else:
mo_act = self.pymf.mo_coeff[:,self.ncore:self.ncore+self.nocc]
Pact = mo_act @ mo_act.T
flag_env = True
# check if frozen core MOs
if self.Ccore is None:
flag_core = False
else:
Pcore0 = self.Ccore @ self.Ccore.T
flag_core = True
prtvar("Before building Gs", current_memory(), "{:.2f}", lspace=0)
print(flush=True)
# calculate core + env Fock matrix (this saves some CPU time...)
Gact = Gcore0 = None
has_env, has_core = self.check_Gs()
if (has_env == flag_env) and (has_core == flag_core):
print(" Valid core file is found. Skip core build.\n",
flush=True)
with h5py.File(self.Vsfile, "r") as f:
if flag_env:
Gact = f["Gact"][()]
if flag_core:
Gcore0 = f["Gcore0"][()]
else:
print(" Computing contributions from frozen core and pure env...\n", flush=True)
if not "mol" in locals():
mol = molecule.MOL_fast(self.pymf.mol.atom, self.pymf.mol.basis)
mol.S = S
params_ = (mol, self.use_schwz_shlpr, self.thr_shlpr_core,
self.thr_schwz,)
if flag_env and flag_core:
Gact, Gcore0 = self.pymf.get_veff(
dm=np.asarray([Pact,Pcore0])*2.)
elif flag_env and not flag_core:
Gact = self.pymf.get_veff(dm=Pact*2.)
elif not flag_env and flag_core:
Gcore0 = self.pymf.get_veff(dm=Pcore0*2.)
else:
pass
self.pymf._eri = None
# write to file for restart
self.save_Gs(Gact, Gcore0)
prtvar("After building Gs", current_memory(), "{:.2f}", lspace=0)
print(flush=True)
# core
if flag_core:
Ecore0 = np.einsum("ij,ij->", Pcore0, 2*h+Gcore0)
else:
Gcore0 = None
Ecore0 = 0.
self.Ecore = Ecore0
for im,m in enumerate(self._msds):
T = Ts_frag[im]
m.Gcores0 = np.zeros([m.nsao]*2) if Gcore0 is None \
else T.T @ Gcore0 @ T
m.Ecore0 = Ecore0
# pure env
for im,m in enumerate(self._msds):
if im in envlist:
T = Ts_frag[im]
m.Gcores = T.T @ Gact @ T
Tact = T[:,:m.nact]
Csf = Tact.T @ S @ mo_act
Psf = Csf @ Csf.T
Vsf = m.restore_eri(1, pad0=False)
Gsf = 2.*np.einsum("pqrs,rs->pq", Vsf, Psf) - \
np.einsum("psrq,sr->pq", Vsf, Psf)
m.Gcores[:m.nact,:m.nact] -= Gsf
else:
m.Gcores = np.zeros([m.nsao]*2)
m.Ebath = 0.
prtvar("After building Gss", current_memory(), "{:.2f}", lspace=0)
print(flush=True)
# finish the xform of 1e
for im,m in enumerate(self._msds):
T = Ts_frag[im]
m.hs = T.T @ h @ T + m.Gcores + m.Gcores0
prtvar("After building hs", current_memory(), "{:.2f}", lspace=0)
print(flush=True)
end = time.time()
self.t_core = end - start
# determine mo and set built to True
print(" Solving HF for all fragments...\n", flush=True)
start = time.time()
if self.nproc == 1:
for im,m in enumerate(self._msds):
m.kernel_mos(S=S)
m.built = True
print("frag %d/%d done" % (im, self.nfrag), flush=True)
else:
global kernel_mos_frag_
from frankenstein.pyscf_be.pysd import get_pymfs
def kernel_mos_frag_(F):
m = self._msds[F]
pymfs = get_pymfs(m)
pymfs._eri = None
mo_coeffs = pymfs.mo_coeff.copy()
mo_energy = pymfs.mo_energy.copy()
Ps_rhf = (mo_coeffs*m.mo_occ) @ mo_coeffs.T
Es_rhf = pymfs.e_tot - pymfs.energy_nuc()
pymfs = None
print("frag %d/%d done" % (F, self.nfrag), flush=True)
return mo_coeffs, mo_energy, Ps_rhf, Es_rhf
with mtproc.Pool(self.nproc) as pool:
res = pool.map(kernel_mos_frag_, range(self.part.nfrag))
for im,m in enumerate(self._msds):
m.mo_coeffs = res[im][0]
m.mo_energy = res[im][1]
m.Ps_rhf = res[im][2]
m.Es_rhf = res[im][3]
m.built = True
end = time.time()
self.t_mos = end - start
print(flush=True)
def check_Vsfile(self):
if not os.path.isfile(self.Vsfile):
return False
valid_Vsfile = True
with h5py.File(self.Vsfile, "r") as f:
for F, Vsname in enumerate(self.Vsnames):
if not Vsname in f:
valid_Vsfile = False
break
if f[Vsname].size != self._msds[F].Vs_size_expt:
valid_Vsfile = False
break
return valid_Vsfile
def check_Gs(self):
valid_Gs = [False] * 2
if not os.path.isfile(self.Vsfile):
return valid_Gs
nao = self.pymf.mol.nao_nr()
with h5py.File(self.Vsfile, "r") as f:
if "Gact" in f:
if f["Gact"].size == nao**2:
valid_Gs[0] = True
if "Gcore0" in f:
if f["Gcore0"].size == nao**2:
valid_Gs[1] = True
return valid_Gs
def save_Gs(self, Gact, Gcore0):
with h5py.File(self.Vsfile, "a") as f:
if not Gact is None:
if "Gact" in f:
del f["Gact"]
f.create_dataset("Gact", data=Gact)
if not Gcore0 is None:
if "Gcore0" in f:
del f["Gcore0"]
f.create_dataset("Gcore0", data=Gcore0)
def read_SOs(self):
try:
with h5py.File(self.Vsfile, "r") as f:
nf_by_atom = f["SOs/atom/nf"][()].tolist()
natom = len(nf_by_atom)
Ts_atom = [f["SOs/atom/T%d"%a][()] for a in range(natom)]
nf_by_frag = f["SOs/frag/nf"][()].tolist()
nfrag = len(nf_by_frag)
Ts_frag = [f["SOs/frag/T%d"%F][()] for F in range(nfrag)]
Utils_frag = [f["SOs/frag/U%d"%F][()] for F in range(nfrag)]
Cs_frag = [f["SOs/frag/C%d"%F][()] for F in range(nfrag)]
return (nf_by_atom, Ts_atom, nf_by_frag, Ts_frag, Utils_frag,
Cs_frag)
except:
raise NotFoundError
def save_SOs(self, nf_by_atom, Ts_atom, nf_by_frag, Ts_frag, Utils_frag,
Cs_frag):
with h5py.File(self.Vsfile, "a") as f:
if "SOs" in f:
pass
g = f.create_group("SOs")
ga = g.create_group("atom")
ga.create_dataset("nf", data=nf_by_atom)
for a in range(len(Ts_atom)):
ga.create_dataset("T%d"%a, data=Ts_atom[a])
gf = g.create_group("frag")
gf.create_dataset("nf", data=nf_by_frag)
for F in range(len(Ts_frag)):
gf.create_dataset("T%d"%F, data=Ts_frag[F])
gf.create_dataset("U%d"%F, data=Utils_frag[F])
gf.create_dataset("C%d"%F, data=Cs_frag[F])
def reorder_ao(self, Ts_atom):
pymol = self.pymf.mol
Zs = pymol.atom_charges()
idx = np.where(np.logical_and(Zs>10.5,Zs<18.5))[0]
if len(idx) == 0:
return Ts_atom
from frankenstein.tools.pyscf_utils import reorder_pyscf_ao
U = reorder_pyscf_ao(pymol, None, "U")
return [U.T @ T for T in Ts_atom]
if __name__ == "__main__":
from frankenstein.tools.pyscf_utils import get_pymol
from pyscf import scf
# geom = "../tests/geom/c2h4.zmat"
geom = "../../../../geom/benchmark/polyacene/geom/2.xyz"
basis = "sto-3g"
pymol = get_pymol(geom, basis)
pymf = scf.RHF(pymol)
pymf.kernel()
msdatm = pySDATOM(pymf, 3)
| bsd-3-clause |
vipmike007/tp-qemu | qemu/tests/rv_video.py | 9 | 3710 | """
rv_video.py - Starts video player
Video is played in a loop, usually kill_app
test should be called later to close totem.
Requires: binaries Xorg, totem, gnome-session
Test starts video player
"""
import logging
import os
import time
import re
from autotest.client.shared import error
from virttest import utils_misc
from virttest import remote
def launch_totem(guest_session, params):
"""
Launch Totem player
:param guest_vm - vm object
"""
totem_version = guest_session.cmd_output("totem --version")
logging.info("Totem version: %s" % totem_version)
# repeat parameters for totem
logging.info("Set up video repeat to '%s' to the Totem.",
params.get("repeat_video"))
# Check for RHEL6 or RHEL7
# RHEL7 uses gsettings and RHEL6 uses gconftool-2
try:
release = guest_session.cmd("cat /etc/redhat-release")
logging.info("Redhat Release: %s" % release)
except:
raise error.TestNAError("Test is only currently supported on "
"RHEL and Fedora operating systems")
cmd = "export DISPLAY=:0.0"
guest_session.cmd(cmd)
if "release 6." in release:
cmd = "gconftool-2 --set /apps/totem/repeat -t bool"
totem_params = "--display=:0.0 --play"
else:
cmd = "gsettings set org.gnome.totem repeat"
totem_params = ""
if params.get("repeat_video", "no") == "yes":
cmd += " true"
else:
cmd += " false"
guest_session.cmd(cmd)
cmd = "export DISPLAY=:0.0"
guest_session.cmd(cmd)
# Fullscreen parameters for totem
if params.get("fullscreen", "no") == "yes":
fullscreen = " --fullscreen "
else:
fullscreen = ""
cmd = "nohup totem %s %s --display=:0.0 &> /dev/null &" \
% (fullscreen, params.get("destination_video_file_path"))
guest_session.cmd(cmd)
time.sleep(10)
cmd = "pgrep totem"
pid = guest_session.cmd_output(cmd)
if pid:
logging.info("PID: %s" % pid)
if not re.search("^(\d+)", pid):
logging.info("Could not find Totem running! Try starting again!")
# Sometimes totem doesn't start properly; try again
cmd = "nohup totem %s %s --display=:0.0 &> /dev/null &" \
% (fullscreen, params.get("destination_video_file_path"))
guest_session.cmd(cmd)
cmd = "pgrep totem"
pid = guest_session.cmd_output(cmd)
logging.info("PID: %s" % pid)
def deploy_video_file(test, vm_obj, params):
"""
Deploy video file into destination on vm
:param vm_obj - vm object
:param params: Dictionary with the test parameters.
"""
source_video_file = params.get("source_video_file")
video_dir = os.path.join("deps", source_video_file)
video_path = utils_misc.get_path(test.virtdir, video_dir)
remote.copy_files_to(vm_obj.get_address(), 'scp',
params.get("username"),
params.get("password"),
params.get("shell_port"),
video_path,
params.get("destination_video_file_path"))
def run(test, params, env):
"""
Test of video through spice
:param test: KVM test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
guest_vm = env.get_vm(params["guest_vm"])
guest_vm.verify_alive()
guest_session = guest_vm.wait_for_login(
timeout=int(params.get("login_timeout", 360)))
deploy_video_file(test, guest_vm, params)
launch_totem(guest_session, params)
guest_session.close()
| gpl-2.0 |
yukchou/linux-sunxi-1 | modules/wifi/ar6003/AR6kSDK.build_3.1_RC.514/host/tools/athbtfilter/bluez/testscripts/bthstereoheadset.py | 185 | 1575 | #!/usr/bin/python
import dbus
import os
import sys
def printusage():
print 'bthstereoheadset.py <options>'
print ' create - create a stereo headset'
print ' start - connect sink'
print ' stop - disconnect sink'
return
headsetAddress = os.getenv("BTSTEREO_HEADSET")
print 'BT Stereo Headset Is : => %s' % headsetAddress
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object('org.bluez', '/org/bluez'), 'org.bluez.Manager')
bus_id = manager.ActivateService('audio')
audio = dbus.Interface(bus.get_object(bus_id, '/org/bluez/audio'), 'org.bluez.audio.Manager')
if len(sys.argv) == 1 :
printusage()
elif len(sys.argv) > 1 and sys.argv[1] == 'create' :
path = audio.CreateDevice(headsetAddress)
audio.ChangeDefaultDevice(path)
sink = dbus.Interface (bus.get_object(bus_id, path), 'org.bluez.audio.Sink')
sink.Connect()
print 'Stereo Headset Connect Done'
elif len(sys.argv) > 1 and sys.argv[1] == 'start' :
path = audio.DefaultDevice()
sink = dbus.Interface (bus.get_object(bus_id, path), 'org.bluez.audio.Sink')
if not sink.IsConnected() :
sink.Connect()
print 'Audio connected'
elif len(sys.argv) > 1 and sys.argv[1] == 'stop' :
path = audio.DefaultDevice()
sink = dbus.Interface (bus.get_object(bus_id, path), 'org.bluez.audio.Sink')
try :
sink.Disconnect()
except dbus.exceptions.DBusException:
print 'Disconnect Failed'
print 'Stereo headset disconnect complete'
elif len(sys.argv) > 1 and sys.argv[1] == 'delete' :
path = audio.DefaultDevice()
print 'Deleting: %s ' % path
audio.RemoveDevice(path)
| gpl-2.0 |
timduru/platform-external-chromium_org | third_party/bintrees/bintrees/avltree.py | 156 | 10803 | #!/usr/bin/env python
#coding:utf-8
# Author: mozman (python version)
# Purpose: avl tree module (Julienne Walker's unbounded none recursive algorithm)
# source: http://eternallyconfuzzled.com/tuts/datastructures/jsw_tut_avl.aspx
# Created: 01.05.2010
# Copyright (c) 2010-2013 by Manfred Moitzi
# License: MIT License
# Conclusion of Julienne Walker
# AVL trees are about as close to optimal as balanced binary search trees can
# get without eating up resources. You can rest assured that the O(log N)
# performance of binary search trees is guaranteed with AVL trees, but the extra
# bookkeeping required to maintain an AVL tree can be prohibitive, especially
# if deletions are common. Insertion into an AVL tree only requires one single
# or double rotation, but deletion could perform up to O(log N) rotations, as
# in the example of a worst case AVL (ie. Fibonacci) tree. However, those cases
# are rare, and still very fast.
# AVL trees are best used when degenerate sequences are common, and there is
# little or no locality of reference in nodes. That basically means that
# searches are fairly random. If degenerate sequences are not common, but still
# possible, and searches are random then a less rigid balanced tree such as red
# black trees or Andersson trees are a better solution. If there is a significant
# amount of locality to searches, such as a small cluster of commonly searched
# items, a splay tree is theoretically better than all of the balanced trees
# because of its move-to-front design.
from __future__ import absolute_import
from .treemixin import TreeMixin
from array import array
__all__ = ['AVLTree']
MAXSTACK = 32
class Node(object):
""" Internal object, represents a treenode """
__slots__ = ['left', 'right', 'balance', 'key', 'value']
def __init__(self, key=None, value=None):
self.left = None
self.right = None
self.key = key
self.value = value
self.balance = 0
def __getitem__(self, key):
""" x.__getitem__(key) <==> x[key], where key is 0 (left) or 1 (right) """
return self.left if key == 0 else self.right
def __setitem__(self, key, value):
""" x.__setitem__(key, value) <==> x[key]=value, where key is 0 (left) or 1 (right) """
if key == 0:
self.left = value
else:
self.right = value
def free(self):
"""Remove all references."""
self.left = None
self.right = None
self.key = None
self.value = None
def height(node):
return node.balance if node is not None else -1
def jsw_single(root, direction):
other_side = 1 - direction
save = root[other_side]
root[other_side] = save[direction]
save[direction] = root
rlh = height(root.left)
rrh = height(root.right)
slh = height(save[other_side])
root.balance = max(rlh, rrh) + 1
save.balance = max(slh, root.balance) + 1
return save
def jsw_double(root, direction):
other_side = 1 - direction
root[other_side] = jsw_single(root[other_side], other_side)
return jsw_single(root, direction)
class AVLTree(TreeMixin):
"""
AVLTree implements a balanced binary tree with a dict-like interface.
see: http://en.wikipedia.org/wiki/AVL_tree
In computer science, an AVL tree is a self-balancing binary search tree, and
it is the first such data structure to be invented. In an AVL tree, the
heights of the two child subtrees of any node differ by at most one;
therefore, it is also said to be height-balanced. Lookup, insertion, and
deletion all take O(log n) time in both the average and worst cases, where n
is the number of nodes in the tree prior to the operation. Insertions and
deletions may require the tree to be rebalanced by one or more tree rotations.
The AVL tree is named after its two inventors, G.M. Adelson-Velskii and E.M.
Landis, who published it in their 1962 paper "An algorithm for the
organization of information."
AVLTree() -> new empty tree.
AVLTree(mapping) -> new tree initialized from a mapping
AVLTree(seq) -> new tree initialized from seq [(k1, v1), (k2, v2), ... (kn, vn)]
see also TreeMixin() class.
"""
def __init__(self, items=None):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signature """
self._root = None
self._count = 0
if items is not None:
self.update(items)
def clear(self):
""" T.clear() -> None. Remove all items from T. """
def _clear(node):
if node is not None:
_clear(node.left)
_clear(node.right)
node.free()
_clear(self._root)
self._count = 0
self._root = None
@property
def count(self):
""" count of items """
return self._count
@property
def root(self):
""" root node of T """
return self._root
def _new_node(self, key, value):
""" Create a new treenode """
self._count += 1
return Node(key, value)
def insert(self, key, value):
""" T.insert(key, value) <==> T[key] = value, insert key, value into Tree """
if self._root is None:
self._root = self._new_node(key, value)
else:
node_stack = [] # node stack
dir_stack = array('I') # direction stack
done = False
top = 0
node = self._root
# search for an empty link, save path
while True:
if key == node.key: # update existing item
node.value = value
return
direction = 1 if key > node.key else 0
dir_stack.append(direction)
node_stack.append(node)
if node[direction] is None:
break
node = node[direction]
# Insert a new node at the bottom of the tree
node[direction] = self._new_node(key, value)
# Walk back up the search path
top = len(node_stack) - 1
while (top >= 0) and not done:
direction = dir_stack[top]
other_side = 1 - direction
topnode = node_stack[top]
left_height = height(topnode[direction])
right_height = height(topnode[other_side])
# Terminate or rebalance as necessary */
if left_height - right_height == 0:
done = True
if left_height - right_height >= 2:
a = topnode[direction][direction]
b = topnode[direction][other_side]
if height(a) >= height(b):
node_stack[top] = jsw_single(topnode, other_side)
else:
node_stack[top] = jsw_double(topnode, other_side)
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node_stack[top]
else:
self._root = node_stack[0]
done = True
# Update balance factors
topnode = node_stack[top]
left_height = height(topnode[direction])
right_height = height(topnode[other_side])
topnode.balance = max(left_height, right_height) + 1
top -= 1
def remove(self, key):
""" T.remove(key) <==> del T[key], remove item <key> from tree """
if self._root is None:
raise KeyError(str(key))
else:
node_stack = [None] * MAXSTACK # node stack
dir_stack = array('I', [0] * MAXSTACK) # direction stack
top = 0
node = self._root
while True:
# Terminate if not found
if node is None:
raise KeyError(str(key))
elif node.key == key:
break
# Push direction and node onto stack
direction = 1 if key > node.key else 0
dir_stack[top] = direction
node_stack[top] = node
node = node[direction]
top += 1
# Remove the node
if (node.left is None) or (node.right is None):
# Which child is not null?
direction = 1 if node.left is None else 0
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node[direction]
else:
self._root = node[direction]
node.free()
self._count -= 1
else:
# Find the inorder successor
heir = node.right
# Save the path
dir_stack[top] = 1
node_stack[top] = node
top += 1
while heir.left is not None:
dir_stack[top] = 0
node_stack[top] = heir
top += 1
heir = heir.left
# Swap data
node.key = heir.key
node.value = heir.value
# Unlink successor and fix parent
xdir = 1 if node_stack[top - 1].key == node.key else 0
node_stack[top - 1][xdir] = heir.right
heir.free()
self._count -= 1
# Walk back up the search path
top -= 1
while top >= 0:
direction = dir_stack[top]
other_side = 1 - direction
topnode = node_stack[top]
left_height = height(topnode[direction])
right_height = height(topnode[other_side])
b_max = max(left_height, right_height)
# Update balance factors
topnode.balance = b_max + 1
# Terminate or rebalance as necessary
if (left_height - right_height) == -1:
break
if (left_height - right_height) <= -2:
a = topnode[other_side][direction]
b = topnode[other_side][other_side]
if height(a) <= height(b):
node_stack[top] = jsw_single(topnode, direction)
else:
node_stack[top] = jsw_double(topnode, direction)
# Fix parent
if top != 0:
node_stack[top - 1][dir_stack[top - 1]] = node_stack[top]
else:
self._root = node_stack[0]
top -= 1
| bsd-3-clause |
nidy86/rpy | Raspbian/sensors/ObservedDistanceSensor.py | 1 | 2113 | #Bibliotheken einbinden
import sys
sys.path += ['../util']
from Observer import Observer, Observable
import RPi.GPIO as GPIO
import time
class ObservedDistanceSensor:
def measure(self):
# setze Trigger auf HIGH
GPIO.output(self.GPIO_TRIGGER, True)
# setze Trigger nach 0.01ms aus LOW
time.sleep(0.00001)
GPIO.output(self.GPIO_TRIGGER, False)
StartZeit = time.time()
StopZeit = time.time()
# speichere Startzeit
while GPIO.input(self.GPIO_ECHO) == 0:
StartZeit = time.time()
# speichere Ankunftszeit
while GPIO.input(self.GPIO_ECHO) == 1:
StopZeit = time.time()
# Zeit Differenz zwischen Start und Ankunft
TimeElapsed = StopZeit - StartZeit
# mit der Schallgeschwindigkeit (34300 cm/s) multiplizieren
# und durch 2 teilen, da hin und zurueck
distance = (TimeElapsed * 34300) / 2
return distance
def close(self):
self.running = 0
GPIO.cleanup()
def __init__(self,name,trigger,echo):
self.name = name
self.GPIO_TRIGGER = trigger
self.GPIO_ECHO = echo
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(self.GPIO_ECHO, GPIO.IN)
self.running = 0
self.openObserver = ObservedDistanceSensor.OpenObserver(self)
self.closeObserver = ObservedDistanceSensor.CloseObserver(self)
# An inner class for observing openings:
class OpenObserver(Observer):
def __init__(self, outer):
self.outer = outer
self.outer.running = 1
def update(self, observable, arg):
try:
while self.outer.running==1:
dist = self.outer.measure()
print (self.outer.name + ": Gemessene Entfernung = %.1f cm" % dist)
time.sleep(1)
# Another inner class for closings:
class CloseObserver(Observer):
def __init__(self, outer):
self.outer = outer
def update(self, observable, arg):
print("Abstandsmessung '"+self.outer.name + "' wird abgeschaltet.")
self.outer.close()
| gpl-3.0 |
Vajnar/linux-stable-hx4700 | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
alsrgv/tensorflow | tensorflow/contrib/slim/python/slim/data/parallel_reader.py | 2 | 11673 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements a parallel data reader with queues and optional shuffling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import gfile
from tensorflow.python.summary import summary
from tensorflow.python.training import input as tf_input
from tensorflow.python.training import queue_runner
class ParallelReader(io_ops.ReaderBase):
"""Reader class that uses multiple readers in parallel to improve speed.
See ReaderBase for supported methods.
"""
def __init__(self,
reader_class,
common_queue,
num_readers=4,
reader_kwargs=None):
"""ParallelReader creates num_readers instances of the reader_class.
Each instance is created by calling the `reader_class` function passing
the arguments specified in `reader_kwargs` as in:
reader_class(**read_kwargs)
When you read from a ParallelReader, with its `read()` method,
you just dequeue examples from the `common_queue`.
The readers will read different files in parallel, asynchronously enqueueing
their output into `common_queue`. The `common_queue.dtypes` must be
[tf.string, tf.string]
Because each reader can read from a different file, the examples in the
`common_queue` could be from different files. Due to the asynchronous
reading there is no guarantee that all the readers will read the same
number of examples.
If the `common_queue` is a shuffling queue, then the examples are shuffled.
Usage:
common_queue = tf.queue.RandomShuffleQueue(
capacity=256,
min_after_dequeue=128,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(tf.compat.v1.TFRecordReader, common_queue)
common_queue = tf.queue.FIFOQueue(
capacity=256,
dtypes=[tf.string, tf.string])
p_reader = ParallelReader(readers, common_queue, num_readers=2)
Args:
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to
[tf.string, tf.string]. Must be one of the data_flow_ops.Queues
instances, ex. `tf.queue.FIFOQueue()`, `tf.queue.RandomShuffleQueue()`,
...
num_readers: a integer, number of instances of reader_class to create.
reader_kwargs: an optional dict of kwargs to create the readers.
Raises:
TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].
"""
if len(common_queue.dtypes) != 2:
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
for dtype in common_queue.dtypes:
if not dtype.is_compatible_with(tf_dtypes.string):
raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')
reader_kwargs = reader_kwargs or {}
self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]
self._common_queue = common_queue
@property
def num_readers(self):
return len(self._readers)
@property
def common_queue(self):
return self._common_queue
def read(self, queue, name=None):
"""Returns the next record (key, value pair) produced by the reader.
The multiple reader instances are all configured to `read()` from the
filenames listed in `queue` and enqueue their output into the `common_queue`
passed to the constructor, and this method returns the next record dequeued
from that `common_queue`.
Readers dequeue a work unit from `queue` if necessary (e.g. when a
reader needs to start reading from a new file since it has finished with
the previous file).
A queue runner for enqueuing in the `common_queue` is automatically added
to the TF QueueRunners collection.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
name: A name for the operation (optional).
Returns:
The next record (i.e. (key, value pair)) from the common_queue.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue(name=name)
def read_up_to(self, queue, num_records, name=None):
"""Returns up to num_records (key, value pairs) produced by a reader.
Will dequeue a work unit from queue if necessary (e.g., when the
Reader needs to start reading from a new file since it has
finished with the previous file).
It may return less than num_records even before the last batch.
**Note** This operation is not supported by all types of `common_queue`s.
If a `common_queue` does not support `dequeue_up_to()`, then a
`tf.errors.UnimplementedError` is raised.
Args:
queue: A Queue or a mutable string Tensor representing a handle to a
Queue, with string work items.
num_records: Number of records to read.
name: A name for the operation (optional).
Returns:
A tuple of Tensors (keys, values) from common_queue.
keys: A 1-D string Tensor.
values: A 1-D string Tensor.
"""
self._configure_readers_by(queue)
return self._common_queue.dequeue_up_to(num_records, name)
def _configure_readers_by(self, queue):
enqueue_ops = []
for reader in self._readers:
enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(self._common_queue, enqueue_ops))
def num_records_produced(self, name=None):
"""Returns the number of records this reader has produced.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_records = [r.num_records_produced() for r in self._readers]
return math_ops.add_n(num_records, name=name)
def num_work_units_completed(self, name=None):
"""Returns the number of work units this reader has finished processing.
Args:
name: A name for the operation (optional).
Returns:
An int64 Tensor.
"""
num_work_units = [r.num_work_units_completed() for r in self._readers]
return math_ops.add_n(num_work_units, name=name)
def parallel_read(data_sources,
reader_class,
num_epochs=None,
num_readers=4,
reader_kwargs=None,
shuffle=True,
dtypes=None,
capacity=256,
min_after_dequeue=128,
seed=None,
scope=None):
"""Reads multiple records in parallel from data_sources using n readers.
It uses a ParallelReader to read from multiple files in parallel using
multiple readers created using `reader_class` with `reader_kwargs'.
If shuffle is True the common_queue would be a RandomShuffleQueue otherwise
it would be a FIFOQueue.
Usage:
data_sources = ['path_to/train*']
key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader
num_epochs: The number of times each data source is read. If left as None,
the data will be cycled through indefinitely.
num_readers: a integer, number of Readers to create.
reader_kwargs: an optional dict, of kwargs for the reader.
shuffle: boolean, whether should shuffle the files and the records by using
RandomShuffleQueue as common_queue.
dtypes: A list of types. The length of dtypes must equal the number of
elements in each record. If it is None it will default to [tf.string,
tf.string] for (key, value).
capacity: integer, capacity of the common_queue.
min_after_dequeue: integer, minimum number of records in the common_queue
after dequeue. Needed for a good shuffle.
seed: A seed for RandomShuffleQueue.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'parallel_read'):
filename_queue = tf_input.string_input_producer(
data_files,
num_epochs=num_epochs,
shuffle=shuffle,
seed=seed,
name='filenames')
dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]
if shuffle:
common_queue = data_flow_ops.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_after_dequeue,
dtypes=dtypes,
seed=seed,
name='common_queue')
else:
common_queue = data_flow_ops.FIFOQueue(
capacity=capacity, dtypes=dtypes, name='common_queue')
summary.scalar(
'fraction_of_%d_full' % capacity,
math_ops.cast(common_queue.size(), tf_dtypes.float32) * (1. / capacity))
return ParallelReader(
reader_class,
common_queue,
num_readers=num_readers,
reader_kwargs=reader_kwargs).read(filename_queue)
def single_pass_read(data_sources, reader_class, reader_kwargs=None,
scope=None):
"""Reads sequentially the data_sources using the reader, doing a single pass.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.
reader_kwargs: an optional dict, of kwargs for the reader.
scope: Optional name scope for the ops.
Returns:
key, value: a tuple of keys and values from the data_source.
"""
data_files = get_data_files(data_sources)
with ops.name_scope(scope, 'single_pass_read'):
filename_queue = tf_input.string_input_producer(
data_files, num_epochs=1, shuffle=False, capacity=1, name='filenames')
reader_kwargs = reader_kwargs or {}
return reader_class(**reader_kwargs).read(filename_queue)
def get_data_files(data_sources):
"""Get data_files from data_sources.
Args:
data_sources: a list/tuple of files or the location of the data, i.e.
/path/to/train@128, /path/to/train* or /tmp/.../train*
Returns:
a list of data_files.
Raises:
ValueError: if data files are not found
"""
if isinstance(data_sources, (list, tuple)):
data_files = []
for source in data_sources:
data_files += get_data_files(source)
else:
if '*' in data_sources or '?' in data_sources or '[' in data_sources:
data_files = gfile.Glob(data_sources)
else:
data_files = [data_sources]
if not data_files:
raise ValueError('No data files found in %s' % (data_sources,))
return data_files
| apache-2.0 |
acsone/odoo | addons/document/report/__init__.py | 444 | 1068 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import document_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Pikecillo/genna | external/PyXML-0.8.4/xml/sax/writer.py | 10 | 18860 | """SAX document handlers that support output generation of XML, SGML,
and XHTML.
This module provides three different groups of objects: the actual SAX
document handlers that drive the output, DTD information containers,
and syntax descriptors (of limited public use in most cases).
Output Drivers
--------------
The output drivers conform to the SAX C<DocumentHandler> protocol.
They can be used anywhere a C<DocumentHandler> is used. Two drivers
are provided: a `basic' driver which creates a fairly minimal output
without much intelligence, and a `pretty-printing' driver that
performs pretty-printing with nice indentation and the like. Both can
optionally make use of DTD information and syntax objects.
DTD Information Containers
--------------------------
Each DTD information object provides an attribute C<syntax> which
describes the expected output syntax; an alternate can be provided to
the output drivers if desired.
Syntax Descriptors
------------------
Syntax descriptor objects provide several attributes which describe
the various lexical components of XML & SGML markup. The attributes
have names that reflect the shorthand notation from the SGML world,
but the values are strings which give the appropriate characters for
the markup language being described. The one addition is the
C<empty_stagc> attribute which should be used to end the start tag of
elements which have no content. This is needed to properly support
XML and XHTML.
"""
__version__ = '$Revision: 1.9 $'
import string
import xml.parsers.xmlproc.dtdparser
import xml.parsers.xmlproc.xmlapp
from xml.sax.saxutils import escape
DEFAULT_LINELENGTH = 74
class Syntax:
com = "--" # comment start or end
cro = "&#" # character reference open
refc = ";" # reference close
dso = "[" # declaration subset open
dsc = "]" # declaration subset close
ero = "&" # entity reference open
lit = '"' # literal start or end
lit_quoted = '"' # quoted literal
lita = "'" # literal start or end (alternative)
mdo = "<!" # markup declaration open
mdc = ">" # markup declaration close
msc = "]]" # marked section close
pio = "<?" # processing instruciton open
stago = "<" # start tag open
etago = "</" # end tag open
tagc = ">" # tag close
vi = "=" # value indicator
def __init__(self):
if self.__class__ is Syntax:
raise RuntimeError, "Syntax must be subclassed to be used!"
class SGMLSyntax(Syntax):
empty_stagc = ">"
pic = ">" # processing instruction close
net = "/" # null end tag
class XMLSyntax(Syntax):
empty_stagc = "/>"
pic = "?>" # processing instruction close
net = None # null end tag not supported
class XHTMLSyntax(XMLSyntax):
empty_stagc = " />"
class DoctypeInfo:
syntax = XMLSyntax()
fpi = None
sysid = None
def __init__(self):
self.__empties = {}
self.__elements_only = {}
self.__attribs = {}
def is_empty(self, gi):
return self.__empties.has_key(gi)
def get_empties_list(self):
return self.__empties.keys()
def has_element_content(self, gi):
return self.__elements_only.has_key(gi)
def get_element_containers_list(self):
return self.__elements_only.keys()
def get_attributes_list(self, gi):
return self.__attribs.get(gi, {}).keys()
def get_attribute_info(self, gi, attr):
return self.__attribs[gi][attr]
def add_empty(self, gi):
self.__empties[gi] = 1
def add_element_container(self, gi):
self.__elements_only[gi] = gi
def add_attribute_defn(self, gi, attr, type, decl, default):
try:
d = self.__attribs[gi]
except KeyError:
d = self.__attribs[gi] = {}
if not d.has_key(attr):
d[attr] = (type, decl, default)
else:
print "<%s> attribute %s already defined" % (gi, attr)
def load_pubtext(self, pubtext):
raise NotImplementedError, "sublasses must implement load_pubtext()"
class _XMLDTDLoader(xml.parsers.xmlproc.xmlapp.DTDConsumer):
def __init__(self, info, parser):
self.info = info
xml.parsers.xmlproc.xmlapp.DTDConsumer.__init__(self, parser)
self.new_attribute = info.add_attribute_defn
def new_element_type(self, gi, model):
if model[0] == "|" and model[1][0] == ("#PCDATA", ""):
# no action required
pass
elif model == ("", [], ""):
self.info.add_empty(gi)
else:
self.info.add_element_container(gi)
class XMLDoctypeInfo(DoctypeInfo):
def load_pubtext(self, sysid):
parser = xml.parsers.xmlproc.dtdparser.DTDParser()
loader = _XMLDTDLoader(self, parser)
parser.set_dtd_consumer(loader)
parser.parse_resource(sysid)
class XHTMLDoctypeInfo(XMLDoctypeInfo):
# Bogus W3C cruft requires the extra space when terminating empty elements.
syntax = XHTMLSyntax()
class SGMLDoctypeInfo(DoctypeInfo):
syntax = SGMLSyntax()
import re
__element_prefix_search = re.compile("<!ELEMENT", re.IGNORECASE).search
__element_prefix_len = len("<!ELEMENT")
del re
def load_pubtext(self, sysid):
#
# Really should build a proper SGML DTD parser!
#
pubtext = open(sysid).read()
m = self.__element_prefix_search(pubtext)
while m:
pubtext = pubtext[m.end():]
if pubtext and pubtext[0] in string.whitespace:
pubtext = string.lstrip(pubtext)
else:
continue
gi, pubtext = string.split(pubtext, None, 1)
pubtext = string.lstrip(pubtext)
# maybe need to remove/collect tag occurance specifiers
# ...
raise NotImplementedError, "implementation incomplete"
#
m = self.__element_prefix_search(pubtext)
class XmlWriter:
"""Basic XML output handler."""
def __init__(self, fp, standalone=None, dtdinfo=None,
syntax=None, linelength=None, encoding='iso-8859-1'):
self._offset = 0
self._packing = 1
self._flowing = 1
self._write = fp.write
self._dtdflowing = None
self._prefix = ''
self._encoding = encoding
self.__stack = []
self.__lang = None
self.__pending_content = 0
self.__pending_doctype = 1
self.__standalone = standalone
self.__dtdinfo = dtdinfo
if syntax is None:
if dtdinfo:
syntax = dtdinfo.syntax
else:
syntax = XMLSyntax()
self.__syntax = syntax
self.indentation = 0
self.indentEndTags = 0
if linelength is None:
self.lineLength = DEFAULT_LINELENGTH
else:
self.lineLength = linelength
def setDocumentLocator(self, locator):
self.locator = locator
def startDocument(self):
if self.__syntax.pic == "?>":
lit = self.__syntax.lit
s = '%sxml version=%s1.0%s encoding%s%s%s%s' % (
self.__syntax.pio, lit, lit, self.__syntax.vi, lit,
self._encoding, lit)
if self.__standalone:
s = '%s standalone%s%s%s%s' % (
s, self.__syntax.vi, lit, self.__standalone, lit)
self._write("%s%s\n" % (s, self.__syntax.pic))
def endDocument(self):
if self.__stack:
raise RuntimeError, "open element stack cannot be empty on close"
def startElement(self, tag, attrs={}):
if self.__pending_doctype:
self.handle_doctype(tag)
self._check_pending_content()
self.__pushtag(tag)
self.__check_flowing(tag, attrs)
if attrs.has_key("xml:lang"):
self.__lang = attrs["xml:lang"]
del attrs["xml:lang"]
if self._packing:
prefix = ""
elif self._flowing:
prefix = self._prefix[:-self.indentation]
else:
prefix = ""
stag = "%s%s%s" % (prefix, self.__syntax.stago, tag)
prefix = "%s %s" % (prefix, (len(tag) * " "))
lit = self.__syntax.lit
lita = self.__syntax.lita
vi = self.__syntax.vi
a = ''
if self._flowing != self.__stack[-1][0]:
if self._dtdflowing is not None \
and self._flowing == self._dtdflowing:
pass
else:
a = ' xml:space%s%s%s%s' \
% (vi, lit, ["default", "preserve"][self._flowing], lit)
if self.__lang != self.__stack[-1][1]:
a = '%s xml:lang%s%s%s%s' % (a, vi, lit, self.lang, lit)
line = stag + a
self._offset = self._offset + len(line)
a = ''
for k, v in attrs.items():
if v is None:
continue
v = str(v)
if string.find(v, lit) == -1:
a = ' %s%s%s%s%s' % (k, vi, lit, escape(str(v)), lit)
elif string.find(v, lita) == -1:
a = ' %s%s%s%s%s' % (k, vi, lita, escape(str(v)), lita)
else:
a = ' %s%s%s%s%s' % (k, vi, lit,
escape(str(v), {lit:self.__syntax.lit_quoted}),
lita)
if (self._offset + len(a)) > self.lineLength:
self._write(line + "\n")
line = prefix + a
self._offset = len(line)
else:
line = line + a
self._offset = self._offset + len(a)
self._write(line)
self.__pending_content = 1
if ( self.__dtdinfo and not
(self.__dtdinfo.has_element_content(tag)
or self.__dtdinfo.is_empty(tag))):
self._packing = 1
def endElement(self, tag):
if self.__pending_content:
if self._flowing:
self._write(self.__syntax.empty_stagc)
if self._packing:
self._offset = self._offset \
+ len(self.__syntax.empty_stagc)
else:
self._write("\n")
self._offset = 0
else:
self._write(self.__syntax.empty_stagc)
self._offset = self._offset + len(self.__syntax.empty_stagc)
self.__pending_content = 0
self.__poptag(tag)
return
depth = len(self.__stack)
if depth == 1 or self._packing or not self._flowing:
prefix = ''
else:
prefix = self._prefix[:-self.indentation] \
+ (" " * self.indentEndTags)
self.__poptag(tag)
self._write("%s%s%s%s" % (
prefix, self.__syntax.etago, tag, self.__syntax.tagc))
if self._packing:
self._offset = self._offset + len(tag) + 3
else:
self._write("\n")
self._offset = 0
def characters(self, data, start, length):
data = data[start: start+length]
if data:
self._check_pending_content()
data = escape(data)
if "\n" in data:
p = string.find(data, "\n")
self._offset = len(data) - (p + 1)
else:
self._offset = self._offset + len(data)
self._check_pending_content()
self._write(data)
def comment(self, data, start, length):
data = data[start: start+length]
self._check_pending_content()
s = "%s%s%s%s%s" % (self.__syntax.mdo, self.__syntax.com,
data, self.__syntax.com, self.__syntax.mdc)
p = string.rfind(s, "\n")
if self._packing:
if p >= 0:
self._offset = len(s) - (p + 1)
else:
self._offset = self._offset + len(s)
else:
self._write("%s%s\n" % (self._prefix, s))
self._offset = 0
def ignorableWhitespace(self, data, start, length):
pass
def processingInstruction(self, target, data):
self._check_pending_content()
s = "%s%s %s%s" % (self.__syntax.pio, target, data, self.__syntax.pic)
prefix = self._prefix[:-self.indentation] \
+ (" " * self.indentEndTags)
if "\n" in s:
p = string.rfind(s, "\n")
if self._flowing and not self._packing:
self._write(prefix + s + "\n")
self._offset = 0
else:
self._write(s)
self._offset = len(s) - (p + 1)
elif self._flowing and not self._packing:
self._write(prefix + s + "\n")
self._offset = 0
else:
self._write(s)
self._offset = self._offset + len(s)
# This doesn't actually have a SAX equivalent, so we'll use it as
# an internal helper.
def handle_doctype(self, root):
self.__pending_doctype = 0
if self.__dtdinfo:
fpi = self.__dtdinfo.fpi
sysid = self.__dtdinfo.sysid
else:
fpi = sysid = None
lit = self.__syntax.lit
isxml = self.__syntax.pic == "?>"
if isxml and sysid:
s = '%sDOCTYPE %s\n' % (self.__syntax.mdo, root)
if fpi:
s = s + ' PUBLIC %s%s%s\n' % (lit, fpi, lit)
s = s + ' %s%s%s>\n' % (lit, sysid, lit)
else:
s = s + ' SYSTEM %s%s%s>\n' % (lit, sysid, lit)
self._write(s)
self._offset = 0
elif not isxml:
s = "%sDOCTYPE %s" % (self.__syntax.mdo, root)
if fpi:
s = '%s\n PUBLIC %s%s%s' % (s, lit, fpi, lit)
if sysid:
s = '%s\n SYSTEM %s%s%s' % (s, lit, sysid, lit)
self._write("%s%s\n" % (s, self.__syntax.mdc))
self._offset = 0
def handle_cdata(self, data):
self._check_pending_content()
# There should be a better way to generate '[CDATA['
start = self.__syntax.mdo + "[CDATA["
end = self.__syntax.msc + self.__syntax.mdc
s = "%s%s%s" % (start, escape(data), end)
if self._packing:
if "\n" in s:
rpos = string.rfind(s, "\n")
self._offset = len(s) - (rpos + 1) + len(end)
else:
self._offset = self._offset + len(s) + len(start + end)
self._write(s)
else:
self._offset = 0
self._write(s + "\n")
# Internal helper methods.
def __poptag(self, tag):
state = self.__stack.pop()
self._flowing, self.__lang, expected_tag, \
self._packing, self._dtdflowing = state
if tag != expected_tag:
raise RuntimeError, \
"expected </%s>, got </%s>" % (expected_tag, tag)
self._prefix = self._prefix[:-self.indentation]
def __pushtag(self, tag):
self.__stack.append((self._flowing, self.__lang, tag,
self._packing, self._dtdflowing))
self._prefix = self._prefix + " " * self.indentation
def __check_flowing(self, tag, attrs):
"""Check the contents of attrs and the DTD information to determine
whether the following content should be flowed.
tag -- general identifier of the element being opened
attrs -- attributes dictionary as reported by the parser or
application
This sets up both the _flowing and _dtdflowing (object) attributes.
"""
docspec = dtdspec = None
if self.__dtdinfo:
try:
info = self.__dtdinfo.get_attribute_info(tag, "xml:space")
except KeyError:
info = None
if info is not None:
self._flowing = info[2] != "preserve"
self._dtdflowing = self._flowing
if attrs.has_key("xml:space"):
self._flowing = attrs["xml:space"] != "preserve"
del attrs["xml:space"]
def _check_pending_content(self):
if self.__pending_content:
s = self.__syntax.tagc
if self._flowing and not self._packing:
s = s + "\n"
self._offset = 0
else:
self._offset = self._offset + len(s)
self._write(s)
self.__pending_content = 0
class PrettyPrinter(XmlWriter):
"""Pretty-printing XML output handler."""
def __init__(self, fp, standalone=None, dtdinfo=None,
syntax=None, linelength=None,
indentation=2, endtagindentation=None):
XmlWriter.__init__(self, fp, standalone=standalone, dtdinfo=dtdinfo,
syntax=syntax, linelength=linelength)
self.indentation = indentation
if endtagindentation is not None:
self.indentEndTags = endtagindentation
else:
self.indentEndTags = indentation
def characters(self, data, start, length):
data = data[start: start + length]
if not data:
return
self._check_pending_content()
data = escape(data)
if not self._flowing:
self._write(data)
return
words = string.split(data)
begspace = data[0] in string.whitespace
endspace = words and (data[-1] in string.whitespace)
prefix = self._prefix
if len(prefix) > 40:
prefix = " "
offset = self._offset
L = []
append = L.append
if begspace:
append(" ")
offset = offset + 1
ws = ""
ws_len = 0
while words:
w = words[0]
del words[0]
if (offset + ws_len + len(w)) > self.lineLength:
append("\n")
append(prefix)
append(w)
offset = len(prefix) + len(w)
else:
append(ws)
ws, ws_len = " ", 1
append(w)
offset = offset + 1 + len(w)
if endspace:
append(" ")
offset = offset + 1
self._offset = offset
self._write(string.join(L, ""))
| gpl-2.0 |
EdLogan18/logan-repository | plugin.video.SportsDevil/lib/utils/pyDes.py | 54 | 32294 | #############################################################################
# Documentation #
#############################################################################
# Author: Todd Whiteman
# Date: 16th March, 2009
# Verion: 2.0.0
# License: Public Domain - free to do as you wish
# Homepage: http://twhiteman.netfirms.com/des.html
#
# This is a pure python implementation of the DES encryption algorithm.
# It's pure python to avoid portability issues, since most DES
# implementations are programmed in C (for performance reasons).
#
# Triple DES class is also implemented, utilising the DES base. Triple DES
# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key.
#
# See the README.txt that should come with this python module for the
# implementation methods used.
#
# Thanks to:
# * David Broadwell for ideas, comments and suggestions.
# * Mario Wolff for pointing out and debugging some triple des CBC errors.
# * Santiago Palladino for providing the PKCS5 padding technique.
# * Shaya for correcting the PAD_PKCS5 triple des CBC errors.
#
"""A pure python implementation of the DES and TRIPLE DES encryption algorithms.
Class initialization
--------------------
pyDes.des(key, [mode], [IV], [pad], [padmode])
pyDes.triple_des(key, [mode], [IV], [pad], [padmode])
key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes
for Triple DES
mode -> Optional argument for encryption type, can be either
pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Length must be 8 bytes.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use during
all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5)
to use during all encrypt/decrpt operations done with this instance.
I recommend to use PAD_PKCS5 padding, as then you never need to worry about any
padding issues, as the padding can be removed unambiguously upon decrypting
data that was encrypted using PAD_PKCS5 padmode.
Common methods
--------------
encrypt(data, [pad], [padmode])
decrypt(data, [pad], [padmode])
data -> Bytes to be encrypted/decrypted
pad -> Optional argument. Only when using padmode of PAD_NORMAL. For
encryption, adds this characters to the end of the data block when
data is not a multiple of 8 bytes. For decryption, will remove the
trailing characters that match this pad character from the last 8
bytes of the unencrypted data block.
padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL
or PAD_PKCS5). Defaults to PAD_NORMAL.
Example
-------
from pyDes import *
data = "Please encrypt my data"
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
print "Encrypted: %r" % d
print "Decrypted: %r" % k.decrypt(d)
assert k.decrypt(d, padmode=PAD_PKCS5) == data
See the module source (pyDes.py) for more examples of use.
You can also run the pyDes.py file without and arguments to see a simple test.
Note: This code was not written for high-end systems needing a fast
implementation, but rather a handy portable solution with small usage.
"""
import sys
# _pythonMajorVersion is used to handle Python2 and Python3 differences.
_pythonMajorVersion = sys.version_info[0]
# Modes of crypting / cyphering
ECB = 0
CBC = 1
# Modes of padding
PAD_NORMAL = 1
PAD_PKCS5 = 2
# PAD_PKCS5: is a method that will unambiguously remove all padding
# characters after decryption, when originally encrypted with
# this padding mode.
# For a good description of the PKCS5 padding technique, see:
# http://www.faqs.org/rfcs/rfc1423.html
# The base class shared by des and triple des.
class _baseDes(object):
def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
if IV:
IV = self._guardAgainstUnicode(IV)
if pad:
pad = self._guardAgainstUnicode(pad)
self.block_size = 8
# Sanity checking of arguments.
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if IV and len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
# Set the passed in variables
self._mode = mode
self._iv = IV
self._padding = pad
self._padmode = padmode
def getKey(self):
"""getKey() -> bytes"""
return self.__key
def setKey(self, key):
"""Will set the crypting key for this object."""
key = self._guardAgainstUnicode(key)
self.__key = key
def getMode(self):
"""getMode() -> pyDes.ECB or pyDes.CBC"""
return self._mode
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
self._mode = mode
def getPadding(self):
"""getPadding() -> bytes of length 1. Padding character."""
return self._padding
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
def getPadMode(self):
"""getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
return self._padmode
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
self._padmode = mode
def getIV(self):
"""getIV() -> bytes"""
return self._iv
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
if not IV or len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
IV = self._guardAgainstUnicode(IV)
self._iv = IV
def _padData(self, data, pad, padmode):
# Pad data depending on the mode
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode == PAD_NORMAL:
if len(data) % self.block_size == 0:
# No padding required.
return data
if not pad:
# Get the default padding.
pad = self.getPadding()
if not pad:
raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.")
data += (self.block_size - (len(data) % self.block_size)) * pad
elif padmode == PAD_PKCS5:
pad_len = 8 - (len(data) % self.block_size)
if _pythonMajorVersion < 3:
data += pad_len * chr(pad_len)
else:
data += bytes([pad_len] * pad_len)
return data
def _unpadData(self, data, pad, padmode):
# Unpad data depending on the mode.
if not data:
return data
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if padmode == PAD_NORMAL:
if not pad:
# Get the default padding.
pad = self.getPadding()
if pad:
data = data[:-self.block_size] + \
data[-self.block_size:].rstrip(pad)
elif padmode == PAD_PKCS5:
if _pythonMajorVersion < 3:
pad_len = ord(data[-1])
else:
pad_len = data[-1]
data = data[:-pad_len]
return data
def _guardAgainstUnicode(self, data):
# Only accept byte strings or ascii unicode values, otherwise
# there is no way to correctly decode the data into bytes.
if _pythonMajorVersion < 3:
if isinstance(data, unicode):
raise ValueError("pyDes can only work with bytes, not Unicode strings.")
else:
if isinstance(data, str):
# Only accept ascii unicode values.
try:
return data.encode('ascii')
except UnicodeEncodeError:
pass
raise ValueError("pyDes can only work with encoded strings, not Unicode.")
return data
#############################################################################
# DES #
#############################################################################
class des(_baseDes):
"""DES encryption/decrytpion class
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key,[mode], [IV])
key -> Bytes containing the encryption key, must be exactly 8 bytes
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
# Permutation and translation tables for DES
__pc1 = [56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# number left rotations of pc1
__left_rotations = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# permuted choice key (table 2)
__pc2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# initial permutation IP
__ip = [57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table for turning 32 bit blocks into 48 bits
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# The (in)famous S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# 32-bit permutation function P used on the output of the S-boxes
__p = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23,13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# final permutation IP^-1
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Type of crypting being done
ENCRYPT = 0x00
DECRYPT = 0x01
# Initialisation
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
# Sanity checking of arguments.
if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
_baseDes.__init__(self, mode, IV, pad, padmode)
self.key_size = 8
self.L = []
self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = []
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Must be 8 bytes."""
_baseDes.setKey(self, key)
self.__create_sub_keys()
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if _pythonMajorVersion < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if _pythonMajorVersion < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
# Transform the secret key, so that it is ready for data processing
# Create the 16 subkeys, K[1] - K[16]
def __create_sub_keys(self):
"""Create the 16 subkeys K[1] to K[16] from the given key"""
key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey()))
i = 0
# Split into Left and Right sections
self.L = key[:28]
self.R = key[28:]
while i < 16:
j = 0
# Perform circular left shifts
while j < des.__left_rotations[i]:
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
j += 1
# Create one of the 16 subkeys through pc2 permutation
self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R)
i += 1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = list(map(lambda x, y: x ^ y, block, iv))
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = list(map(lambda x, y: x ^ y, processed_block, iv))
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadMode(mode)
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
_baseDes.setIV(self, IV)
for key in (self.__key1, self.__key2, self.__key3):
key.setIV(IV)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
# Pad the data accordingly.
data = self._padData(data, pad, padmode)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
block = self.__key1.crypt(data[i:i+8], ENCRYPT)
block = self.__key2.crypt(block, DECRYPT)
block = self.__key3.crypt(block, ENCRYPT)
self.__key1.setIV(block)
self.__key2.setIV(block)
self.__key3.setIV(block)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
else:
data = self.__key1.crypt(data, ENCRYPT)
data = self.__key2.crypt(data, DECRYPT)
return self.__key3.crypt(data, ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode)
| gpl-2.0 |
IRSO/irsosav | nodejs.git/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| gpl-3.0 |
zahanm/foodpedia | django/db/models/fields/subclassing.py | 229 | 4356 | """
Convenience routines for creating non-trivial Field subclasses, as well as
backwards compatibility utilities.
Add SubfieldBase as the __metaclass__ for your Field subclass, implement
to_python() and the other necessary methods and everything will work seamlessly.
"""
from inspect import getargspec
from warnings import warn
def call_with_connection(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = ('connection' in arg_names or varkwargs)
if not updated:
warn("A Field class whose %s method hasn't been updated to take a "
"`connection` argument." % func.__name__,
DeprecationWarning, stacklevel=3)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, DeprecationWarning,
stacklevel=2)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
return func(*args, **kwargs)
return inner
def call_with_connection_and_prepared(func):
arg_names, varargs, varkwargs, defaults = getargspec(func)
updated = (
('connection' in arg_names or varkwargs) and
('prepared' in arg_names or varkwargs)
)
if not updated:
warn("A Field class whose %s method hasn't been updated to take "
"`connection` and `prepared` arguments." % func.__name__,
DeprecationWarning, stacklevel=3)
def inner(*args, **kwargs):
if 'connection' not in kwargs:
from django.db import connection
kwargs['connection'] = connection
warn("%s has been called without providing a connection argument. " %
func.__name__, DeprecationWarning,
stacklevel=2)
if updated:
return func(*args, **kwargs)
if 'connection' in kwargs:
del kwargs['connection']
if 'prepared' in kwargs:
del kwargs['prepared']
return func(*args, **kwargs)
return inner
class LegacyConnection(type):
"""
A metaclass to normalize arguments give to the get_db_prep_* and db_type
methods on fields.
"""
def __new__(cls, name, bases, attrs):
new_cls = super(LegacyConnection, cls).__new__(cls, name, bases, attrs)
for attr in ('db_type', 'get_db_prep_save'):
setattr(new_cls, attr, call_with_connection(getattr(new_cls, attr)))
for attr in ('get_db_prep_lookup', 'get_db_prep_value'):
setattr(new_cls, attr, call_with_connection_and_prepared(getattr(new_cls, attr)))
return new_cls
class SubfieldBase(LegacyConnection):
"""
A metaclass for custom Field subclasses. This ensures the model's attribute
has the descriptor protocol attached to it.
"""
def __new__(cls, name, bases, attrs):
new_class = super(SubfieldBase, cls).__new__(cls, name, bases, attrs)
new_class.contribute_to_class = make_contrib(
new_class, attrs.get('contribute_to_class')
)
return new_class
class Creator(object):
"""
A placeholder class that provides a way to set the attribute on the model.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance.')
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
def make_contrib(superclass, func=None):
"""
Returns a suitable contribute_to_class() method for the Field subclass.
If 'func' is passed in, it is the existing contribute_to_class() method on
the subclass and it is called before anything else. It is assumed in this
case that the existing contribute_to_class() calls all the necessary
superclass methods.
"""
def contribute_to_class(self, cls, name):
if func:
func(self, cls, name)
else:
super(superclass, self).contribute_to_class(cls, name)
setattr(cls, self.name, Creator(self))
return contribute_to_class
| bsd-3-clause |
40223142/cda11 | static/Brython3.1.0-20150301-090019/Lib/pprint.py | 634 | 12757 | # Author: Fred L. Drake, Jr.
# fdrake@acm.org
#
# This is a simple little module I wrote to make life easier. I didn't
# see anything quite like it in the library, though I may have overlooked
# something. I wrote this when I was trying to read some heavily nested
# tuples with fairly non-descriptive content. This is modeled very much
# after Lisp/Scheme - style pretty-printing of lists. If you find it
# useful, thank small children who sleep at night.
"""Support to pretty-print lists, tuples, & dictionaries recursively.
Very simple, but useful, especially in debugging data structures.
Classes
-------
PrettyPrinter()
Handle pretty-printing operations onto a stream using a configured
set of formatting parameters.
Functions
---------
pformat()
Format a Python object into a pretty-printed representation.
pprint()
Pretty-print a Python object to a stream [default is sys.stdout].
saferepr()
Generate a 'standard' repr()-like value, but protect against recursive
data structures.
"""
import sys as _sys
from collections import OrderedDict as _OrderedDict
from io import StringIO as _StringIO
__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
"PrettyPrinter"]
# cache these for faster access:
_commajoin = ", ".join
_id = id
_len = len
_type = type
def pprint(object, stream=None, indent=1, width=80, depth=None):
"""Pretty-print a Python object to a stream [default is sys.stdout]."""
printer = PrettyPrinter(
stream=stream, indent=indent, width=width, depth=depth)
printer.pprint(object)
def pformat(object, indent=1, width=80, depth=None):
"""Format a Python object into a pretty-printed representation."""
return PrettyPrinter(indent=indent, width=width, depth=depth).pformat(object)
def saferepr(object):
"""Version of repr() which can handle recursive data structures."""
return _safe_repr(object, {}, None, 0)[0]
def isreadable(object):
"""Determine if saferepr(object) is readable by eval()."""
return _safe_repr(object, {}, None, 0)[1]
def isrecursive(object):
"""Determine if object requires a recursive representation."""
return _safe_repr(object, {}, None, 0)[2]
class _safe_key:
"""Helper function for key functions when sorting unorderable objects.
The wrapped-object will fallback to an Py2.x style comparison for
unorderable types (sorting first comparing the type name and then by
the obj ids). Does not work recursively, so dict.items() must have
_safe_key applied to both the key and the value.
"""
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
try:
rv = self.obj.__lt__(other.obj)
except TypeError:
rv = NotImplemented
if rv is NotImplemented:
rv = (str(type(self.obj)), id(self.obj)) < \
(str(type(other.obj)), id(other.obj))
return rv
def _safe_tuple(t):
"Helper function for comparing 2-tuples"
return _safe_key(t[0]), _safe_key(t[1])
class PrettyPrinter:
def __init__(self, indent=1, width=80, depth=None, stream=None):
"""Handle pretty printing operations onto a stream using a set of
configured parameters.
indent
Number of spaces to indent for each level of nesting.
width
Attempted maximum number of columns in the output.
depth
The maximum depth to print out nested structures.
stream
The desired output stream. If omitted (or false), the standard
output stream available at construction will be used.
"""
indent = int(indent)
width = int(width)
assert indent >= 0, "indent must be >= 0"
assert depth is None or depth > 0, "depth must be > 0"
assert width, "width must be != 0"
self._depth = depth
self._indent_per_level = indent
self._width = width
if stream is not None:
self._stream = stream
else:
self._stream = _sys.stdout
def pprint(self, object):
self._format(object, self._stream, 0, 0, {}, 0)
self._stream.write("\n")
def pformat(self, object):
sio = _StringIO()
self._format(object, sio, 0, 0, {}, 0)
return sio.getvalue()
def isrecursive(self, object):
return self.format(object, {}, 0, 0)[2]
def isreadable(self, object):
s, readable, recursive = self.format(object, {}, 0, 0)
return readable and not recursive
def _format(self, object, stream, indent, allowance, context, level):
level = level + 1
import sys
sys.stderr.write(str(object))
objid = _id(object)
if objid in context:
stream.write(_recursion(object))
self._recursive = True
self._readable = False
return
rep = self._repr(object, context, level - 1)
typ = _type(object)
sepLines = _len(rep) > (self._width - 1 - indent - allowance)
write = stream.write
if self._depth and level > self._depth:
write(rep)
return
if sepLines:
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict):
write('{')
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
length = _len(object)
if length:
context[objid] = 1
indent = indent + self._indent_per_level
if issubclass(typ, _OrderedDict):
items = list(object.items())
else:
items = sorted(object.items(), key=_safe_tuple)
key, ent = items[0]
rep = self._repr(key, context, level)
write(rep)
write(': ')
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
if length > 1:
for key, ent in items[1:]:
rep = self._repr(key, context, level)
write(',\n%s%s: ' % (' '*indent, rep))
self._format(ent, stream, indent + _len(rep) + 2,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
write('}')
return
if ((issubclass(typ, list) and r is list.__repr__) or
(issubclass(typ, tuple) and r is tuple.__repr__) or
(issubclass(typ, set) and r is set.__repr__) or
(issubclass(typ, frozenset) and r is frozenset.__repr__)
):
length = _len(object)
if issubclass(typ, list):
write('[')
endchar = ']'
elif issubclass(typ, tuple):
write('(')
endchar = ')'
else:
if not length:
write(rep)
return
if typ is set:
write('{')
endchar = '}'
else:
write(typ.__name__)
write('({')
endchar = '})'
indent += len(typ.__name__) + 1
object = sorted(object, key=_safe_key)
if self._indent_per_level > 1:
write((self._indent_per_level - 1) * ' ')
if length:
context[objid] = 1
indent = indent + self._indent_per_level
self._format(object[0], stream, indent, allowance + 1,
context, level)
if length > 1:
for ent in object[1:]:
write(',\n' + ' '*indent)
self._format(ent, stream, indent,
allowance + 1, context, level)
indent = indent - self._indent_per_level
del context[objid]
if issubclass(typ, tuple) and length == 1:
write(',')
write(endchar)
return
write(rep)
def _repr(self, object, context, level):
repr, readable, recursive = self.format(object, context.copy(),
self._depth, level)
if not readable:
self._readable = False
if recursive:
self._recursive = True
return repr
def format(self, object, context, maxlevels, level):
"""Format object for a specific context, returning a string
and flags indicating whether the representation is 'readable'
and whether the object represents a recursive construct.
"""
return _safe_repr(object, context, maxlevels, level)
# Return triple (repr_string, isreadable, isrecursive).
def _safe_repr(object, context, maxlevels, level):
typ = _type(object)
if typ is str:
if 'locale' not in _sys.modules:
return repr(object), True, False
if "'" in object and '"' not in object:
closure = '"'
quotes = {'"': '\\"'}
else:
closure = "'"
quotes = {"'": "\\'"}
qget = quotes.get
sio = _StringIO()
write = sio.write
for char in object:
if char.isalpha():
write(char)
else:
write(qget(char, repr(char)[1:-1]))
return ("%s%s%s" % (closure, sio.getvalue(), closure)), True, False
r = getattr(typ, "__repr__", None)
if issubclass(typ, dict) and r is dict.__repr__:
if not object:
return "{}", True, False
objid = _id(object)
if maxlevels and level >= maxlevels:
return "{...}", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
saferepr = _safe_repr
items = sorted(object.items(), key=_safe_tuple)
for k, v in items:
krepr, kreadable, krecur = saferepr(k, context, maxlevels, level)
vrepr, vreadable, vrecur = saferepr(v, context, maxlevels, level)
append("%s: %s" % (krepr, vrepr))
readable = readable and kreadable and vreadable
if krecur or vrecur:
recursive = True
del context[objid]
return "{%s}" % _commajoin(components), readable, recursive
if (issubclass(typ, list) and r is list.__repr__) or \
(issubclass(typ, tuple) and r is tuple.__repr__):
if issubclass(typ, list):
if not object:
return "[]", True, False
format = "[%s]"
elif _len(object) == 1:
format = "(%s,)"
else:
if not object:
return "()", True, False
format = "(%s)"
objid = _id(object)
if maxlevels and level >= maxlevels:
return format % "...", False, objid in context
if objid in context:
return _recursion(object), False, True
context[objid] = 1
readable = True
recursive = False
components = []
append = components.append
level += 1
for o in object:
orepr, oreadable, orecur = _safe_repr(o, context, maxlevels, level)
append(orepr)
if not oreadable:
readable = False
if orecur:
recursive = True
del context[objid]
return format % _commajoin(components), readable, recursive
rep = repr(object)
return rep, (rep and not rep.startswith('<')), False
def _recursion(object):
return ("<Recursion on %s with id=%s>"
% (_type(object).__name__, _id(object)))
def _perfcheck(object=None):
import time
if object is None:
object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
p = PrettyPrinter()
t1 = time.time()
_safe_repr(object, {}, None, 0)
t2 = time.time()
p.pformat(object)
t3 = time.time()
print("_safe_repr:", t2 - t1)
print("pformat:", t3 - t2)
if __name__ == "__main__":
_perfcheck()
| gpl-3.0 |
dfc/beets | test/test_datequery.py | 25 | 4999 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Test for dbcore's date-based queries.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
from test import _common
from test._common import unittest
from datetime import datetime
import time
from beets.dbcore.query import _parse_periods, DateInterval, DateQuery
def _date(string):
return datetime.strptime(string, '%Y-%m-%dT%H:%M:%S')
class DateIntervalTest(unittest.TestCase):
def test_year_precision_intervals(self):
self.assertContains('2000..2001', '2000-01-01T00:00:00')
self.assertContains('2000..2001', '2001-06-20T14:15:16')
self.assertContains('2000..2001', '2001-12-31T23:59:59')
self.assertExcludes('2000..2001', '1999-12-31T23:59:59')
self.assertExcludes('2000..2001', '2002-01-01T00:00:00')
self.assertContains('2000..', '2000-01-01T00:00:00')
self.assertContains('2000..', '2099-10-11T00:00:00')
self.assertExcludes('2000..', '1999-12-31T23:59:59')
self.assertContains('..2001', '2001-12-31T23:59:59')
self.assertExcludes('..2001', '2002-01-01T00:00:00')
def test_day_precision_intervals(self):
self.assertContains('2000-06-20..2000-06-20', '2000-06-20T00:00:00')
self.assertContains('2000-06-20..2000-06-20', '2000-06-20T10:20:30')
self.assertContains('2000-06-20..2000-06-20', '2000-06-20T23:59:59')
self.assertExcludes('2000-06-20..2000-06-20', '2000-06-19T23:59:59')
self.assertExcludes('2000-06-20..2000-06-20', '2000-06-21T00:00:00')
def test_month_precision_intervals(self):
self.assertContains('1999-12..2000-02', '1999-12-01T00:00:00')
self.assertContains('1999-12..2000-02', '2000-02-15T05:06:07')
self.assertContains('1999-12..2000-02', '2000-02-29T23:59:59')
self.assertExcludes('1999-12..2000-02', '1999-11-30T23:59:59')
self.assertExcludes('1999-12..2000-02', '2000-03-01T00:00:00')
def test_unbounded_endpoints(self):
self.assertContains('..', date=datetime.max)
self.assertContains('..', date=datetime.min)
self.assertContains('..', '1000-01-01T00:00:00')
def assertContains(self, interval_pattern, date_pattern=None, date=None):
if date is None:
date = _date(date_pattern)
(start, end) = _parse_periods(interval_pattern)
interval = DateInterval.from_periods(start, end)
self.assertTrue(interval.contains(date))
def assertExcludes(self, interval_pattern, date_pattern):
date = _date(date_pattern)
(start, end) = _parse_periods(interval_pattern)
interval = DateInterval.from_periods(start, end)
self.assertFalse(interval.contains(date))
def _parsetime(s):
return time.mktime(datetime.strptime(s, '%Y-%m-%d %H:%M').timetuple())
class DateQueryTest(_common.LibTestCase):
def setUp(self):
super(DateQueryTest, self).setUp()
self.i.added = _parsetime('2013-03-30 22:21')
self.i.store()
def test_single_month_match_fast(self):
query = DateQuery('added', '2013-03')
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_single_month_nonmatch_fast(self):
query = DateQuery('added', '2013-04')
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
def test_single_month_match_slow(self):
query = DateQuery('added', '2013-03')
self.assertTrue(query.match(self.i))
def test_single_month_nonmatch_slow(self):
query = DateQuery('added', '2013-04')
self.assertFalse(query.match(self.i))
def test_single_day_match_fast(self):
query = DateQuery('added', '2013-03-30')
matched = self.lib.items(query)
self.assertEqual(len(matched), 1)
def test_single_day_nonmatch_fast(self):
query = DateQuery('added', '2013-03-31')
matched = self.lib.items(query)
self.assertEqual(len(matched), 0)
class DateQueryConstructTest(unittest.TestCase):
def test_long_numbers(self):
DateQuery('added', '1409830085..1412422089')
def test_too_many_components(self):
DateQuery('added', '12-34-56-78')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
| mit |
xyuanmu/XX-Net | python3.8.2/Lib/site-packages/pip/_internal/utils/setuptools_build.py | 9 | 1239 | import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List
# Shim to wrap setup.py invocation with setuptools
#
# We set sys.argv[0] to the path to the underlying setup.py file so
# setuptools / distutils don't take the path to the setup.py to be "-c" when
# invoking via the shim. This avoids e.g. the following manifest_maker
# warning: "warning: manifest_maker: standard file '-c' not found".
_SETUPTOOLS_SHIM = (
"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};"
"f=getattr(tokenize, 'open', open)(__file__);"
"code=f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
def make_setuptools_shim_args(setup_py_path, unbuffered_output=False):
# type: (str, bool) -> List[str]
"""
Get setuptools command arguments with shim wrapped setup file invocation.
:param setup_py_path: The path to setup.py to be wrapped.
:param unbuffered_output: If True, adds the unbuffered switch to the
argument list.
"""
args = [sys.executable]
if unbuffered_output:
args.append('-u')
args.extend(['-c', _SETUPTOOLS_SHIM.format(setup_py_path)])
return args
| bsd-2-clause |
Akrog/cinder | cinder/tests/api/v1/test_volumes.py | 1 | 48969 | # Copyright 2013 Josh Durgin
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
import mock
from oslo_config import cfg
import webob
from cinder.api import extensions
from cinder.api.v1 import volumes
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
from cinder.tests import fake_notifier
from cinder.tests.image import fake as fake_image
from cinder.volume import api as volume_api
NS = '{http://docs.openstack.org/api/openstack-block-storage/1.0/content}'
TEST_SNAPSHOT_UUID = '00000000-0000-0000-0000-000000000001'
CONF = cfg.CONF
def stub_snapshot_get(self, context, snapshot_id):
if snapshot_id != TEST_SNAPSHOT_UUID:
raise exception.NotFound
return {'id': snapshot_id,
'volume_id': 12,
'status': 'available',
'volume_size': 100,
'created_at': None,
'display_name': 'Default name',
'display_description': 'Default description', }
class VolumeApiTest(test.TestCase):
def setUp(self):
super(VolumeApiTest, self).setUp()
self.addCleanup(fake_notifier.reset)
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
fake_image.stub_out_image_service(self.stubs)
self.controller = volumes.VolumeController(self.ext_mgr)
self.flags(host='fake',
notification_driver=[fake_notifier.__name__])
self.stubs.Set(db, 'volume_get_all', stubs.stub_volume_get_all)
self.stubs.Set(db, 'service_get_all_by_topic',
stubs.stub_service_get_all_by_topic)
self.stubs.Set(volume_api.API, 'delete', stubs.stub_volume_delete)
def test_volume_create(self):
self.stubs.Set(volume_api.API, 'get', stubs.stub_volume_get)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'zone1:host1',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 100,
'encrypted': False}}
self.assertEqual(res_dict, expected)
def test_volume_create_with_type(self):
vol_type = CONF.default_volume_type
db.volume_type_create(context.get_admin_context(),
dict(name=vol_type, extra_specs={}))
db_vol_type = db.volume_type_get_by_name(context.get_admin_context(),
vol_type)
vol = {"size": 100,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1",
"volume_type": "FakeTypeName"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
# Raise 404 when type name isn't valid
self.assertRaises(webob.exc.HTTPNotFound, self.controller.create,
req, body)
# Use correct volume type name
vol.update(dict(volume_type=CONF.default_volume_type))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
self.assertIn('id', res_dict['volume'])
self.assertEqual(len(res_dict), 1)
self.assertEqual(res_dict['volume']['volume_type'],
db_vol_type['name'])
# Use correct volume type id
vol.update(dict(volume_type=db_vol_type['id']))
body.update(dict(volume=vol))
res_dict = self.controller.create(req, body)
self.assertIn('id', res_dict['volume'])
self.assertEqual(len(res_dict), 1)
self.assertEqual(res_dict['volume']['volume_type'],
db_vol_type['name'])
def test_volume_creation_fails_with_bad_size(self):
vol = {"size": '',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "zone1:host1"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req,
body)
def test_volume_creation_fails_with_bad_availability_zone(self):
vol = {"size": '1',
"name": "Volume Test Name",
"description": "Volume Test Desc",
"availability_zone": "zonen:hostn"}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v2/volumes')
self.assertRaises(exception.InvalidInput,
self.controller.create,
req, body)
def test_volume_create_with_image_id(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
test_id = "c905cedb-7281-47e4-8a62-f26bc5fc4c77"
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "nova",
"imageRef": test_id}
expected = {'volume': {'status': 'fakestatus',
'display_description': 'Volume Test Desc',
'availability_zone': 'nova',
'display_name': 'Volume Test Name',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'image_id': test_id,
'snapshot_id': None,
'source_volid': None,
'metadata': {},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': '1'}}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.create(req, body)
self.assertEqual(res_dict, expected)
def test_volume_create_with_image_id_is_integer(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": 1234}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_not_uuid_format(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": '1',
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": '12345'}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_create_with_image_id_with_empty_string(self):
self.stubs.Set(volume_api.API, "create", stubs.stub_volume_create)
self.ext_mgr.extensions = {'os-image-create': 'fake'}
vol = {"size": 1,
"display_name": "Volume Test Name",
"display_description": "Volume Test Desc",
"availability_zone": "cinder",
"imageRef": ''}
body = {"volume": vol}
req = fakes.HTTPRequest.blank('/v1/volumes')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body)
def test_volume_update(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_metadata(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
updates = {
"metadata": {"qos_max_iops": 2000}
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {"qos_max_iops": 2000,
"readonly": "False",
"attached_mode": "rw"},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1
}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_volume_update_with_admin_metadata(self):
self.stubs.Set(volume_api.API, "update", stubs.stub_volume_update)
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.update(req, '1', body)
expected = {'volume': {
'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'Updated Test Name',
'encrypted': False,
'attachments': [{
'id': '1',
'volume_id': '1',
'server_id': 'fakeuuid',
'host_name': None,
'device': '/'
}],
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1, 1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 2)
def test_update_empty_body(self):
body = {}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_invalid_body(self):
body = {'display_name': 'missing top level volume key'}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.update,
req, '1', body)
def test_update_not_found(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
updates = {
"display_name": "Updated Test Name",
}
body = {"volume": updates}
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.update,
req, '1', body)
def test_volume_list(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/volumes')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
req = fakes.HTTPRequest.blank('/v1/volumes')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_list_detail(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
self.stubs.Set(volume_api.API, 'get_all',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volumes
self.assertEqual(1, len(req.cached_resource()))
def test_volume_list_detail_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
req = fakes.HTTPRequest.blank('/v1/volumes/detail')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.index(req)
expected = {'volumes': [{'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}]}
self.assertEqual(res_dict, expected)
def test_volume_show(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
# Finally test that we cached the returned volume
self.assertIsNotNone(req.cached_resource_by_id('1'))
def test_volume_show_no_attachments(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, attach_status='detached')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [],
'bootable': 'false',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_bootable(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return (stubs.stub_volume(volume_id,
volume_glance_metadata=dict(foo='bar')))
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'true',
'volume_type': 'vol_type_name',
'snapshot_id': None,
'source_volid': None,
'metadata': {'attached_mode': 'rw',
'readonly': 'False'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req,
1)
# Finally test that we did not cache anything
self.assertIsNone(req.cached_resource_by_id('1'))
def test_volume_detail_limit_offset(self):
def volume_detail_limit_offset(is_admin):
def stub_volume_get_all_by_project(context, project_id, marker,
limit, sort_keys=None,
sort_dirs=None, filters=None,
viewable_admin_meta=False):
return [
stubs.stub_volume(1, display_name='vol1'),
stubs.stub_volume(2, display_name='vol2'),
]
self.stubs.Set(db, 'volume_get_all_by_project',
stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/volumes/detail?limit=2\
&offset=1',
use_admin_context=is_admin)
res_dict = self.controller.index(req)
volumes = res_dict['volumes']
self.assertEqual(len(volumes), 1)
self.assertEqual(volumes[0]['id'], 2)
# admin case
volume_detail_limit_offset(is_admin=True)
# non_admin case
volume_detail_limit_offset(is_admin=False)
def test_volume_show_with_admin_metadata(self):
volume = stubs.stub_volume("1")
del volume['name']
del volume['volume_type']
del volume['volume_type_id']
volume['metadata'] = {'key': 'value'}
db.volume_create(context.get_admin_context(), volume)
db.volume_admin_metadata_update(context.get_admin_context(), "1",
{"readonly": "True",
"invisible_key": "invisible_value"},
False)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
admin_ctx = context.RequestContext('admin', 'fakeproject', True)
req.environ['cinder.context'] = admin_ctx
res_dict = self.controller.show(req, '1')
expected = {'volume': {'status': 'fakestatus',
'display_description': 'displaydesc',
'availability_zone': 'fakeaz',
'display_name': 'displayname',
'encrypted': False,
'attachments': [{'device': '/',
'server_id': 'fakeuuid',
'host_name': None,
'id': '1',
'volume_id': '1'}],
'bootable': 'false',
'volume_type': None,
'snapshot_id': None,
'source_volid': None,
'metadata': {'key': 'value',
'readonly': 'True'},
'id': '1',
'created_at': datetime.datetime(1900, 1, 1,
1, 1, 1),
'size': 1}}
self.assertEqual(res_dict, expected)
def test_volume_show_with_encrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id='fake_id')
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['volume']['encrypted'], True)
def test_volume_show_with_unencrypted_volume(self):
def stub_volume_get(self, context, volume_id, **kwargs):
return stubs.stub_volume(volume_id, encryption_key_id=None)
self.stubs.Set(volume_api.API, 'get', stub_volume_get)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
res_dict = self.controller.show(req, 1)
self.assertEqual(res_dict['volume']['encrypted'], False)
def test_volume_delete(self):
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
resp = self.controller.delete(req, 1)
self.assertEqual(resp.status_int, 202)
def test_volume_delete_no_volume(self):
self.stubs.Set(volume_api.API, "get", stubs.stub_volume_get_notfound)
req = fakes.HTTPRequest.blank('/v1/volumes/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def test_admin_list_volumes_limited_to_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
req = fakes.HTTPRequest.blank('/v1/fake/volumes',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_admin_list_volumes_all_tenants(self):
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1',
use_admin_context=True)
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(3, len(res['volumes']))
def test_all_tenants_non_admin_gets_all_tenants(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/fake/volumes?all_tenants=1')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
def test_non_admin_get_by_project(self):
self.stubs.Set(db, 'volume_get_all_by_project',
stubs.stub_volume_get_all_by_project)
self.stubs.Set(db, 'volume_get', stubs.stub_volume_get_db)
req = fakes.HTTPRequest.blank('/v1/fake/volumes')
res = self.controller.index(req)
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_string(self, get_all):
req = mock.MagicMock()
req.GET.copy.return_value = {'display_name': 'Volume-573108026'}
context = mock.Mock()
req.environ = {'cinder.context': context}
self.controller._items(req, mock.Mock)
get_all.assert_called_once_with(
context, sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['created_at'], limit=None,
filters={'display_name': 'Volume-573108026'}, marker=None)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_list(self, get_all):
req = mock.MagicMock()
req.GET.copy.return_value = {'id': "['1', '2', '3']"}
context = mock.Mock()
req.environ = {'cinder.context': context}
self.controller._items(req, mock.Mock)
get_all.assert_called_once_with(
context, sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['created_at'], limit=None,
filters={'id': ['1', '2', '3']}, marker=None)
@mock.patch('cinder.volume.api.API.get_all')
def test_get_volumes_filter_with_expression(self, get_all):
req = mock.MagicMock()
req.GET.copy.return_value = {'id': "d+"}
context = mock.Mock()
req.environ = {'cinder.context': context}
self.controller._items(req, mock.Mock)
get_all.assert_called_once_with(
context, sort_dirs=['desc'], viewable_admin_meta=True,
sort_keys=['created_at'], limit=None, filters={'id': 'd+'},
marker=None)
class VolumeSerializerTest(test.TestCase):
def _verify_volume_attachment(self, attach, tree):
for attr in ('id', 'volume_id', 'server_id', 'device'):
self.assertEqual(str(attach[attr]), tree.get(attr))
def _verify_volume(self, vol, tree):
self.assertEqual(tree.tag, NS + 'volume')
for attr in ('id', 'status', 'size', 'availability_zone', 'created_at',
'display_name', 'display_description', 'volume_type',
'bootable', 'snapshot_id'):
self.assertEqual(str(vol[attr]), tree.get(attr))
for child in tree:
self.assertIn(child.tag, (NS + 'attachments', NS + 'metadata'))
if child.tag == 'attachments':
self.assertEqual(1, len(child))
self.assertEqual('attachment', child[0].tag)
self._verify_volume_attachment(vol['attachments'][0], child[0])
elif child.tag == 'metadata':
not_seen = set(vol['metadata'].keys())
for gr_child in child:
self.assertIn(gr_child.get("key"), not_seen)
self.assertEqual(str(vol['metadata'][gr_child.get("key")]),
gr_child.text)
not_seen.remove(gr_child.get('key'))
self.assertEqual(0, len(not_seen))
def test_volume_show_create_serializer(self):
serializer = volumes.VolumeTemplate()
raw_volume = dict(
id='vol_id',
status='vol_status',
size=1024,
availability_zone='vol_availability',
bootable='false',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol_id',
volume_id='vol_id',
server_id='instance_uuid',
device='/foo')],
display_name='vol_name',
display_description='vol_desc',
volume_type='vol_type',
snapshot_id='snap_id',
source_volid='source_volid',
metadata=dict(foo='bar',
baz='quux', ), )
text = serializer.serialize(dict(volume=raw_volume))
tree = etree.fromstring(text)
self._verify_volume(raw_volume, tree)
def test_volume_index_detail_serializer(self):
serializer = volumes.VolumesTemplate()
raw_volumes = [dict(id='vol1_id',
status='vol1_status',
size=1024,
availability_zone='vol1_availability',
bootable='true',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol1_id',
volume_id='vol1_id',
server_id='instance_uuid',
device='/foo1')],
display_name='vol1_name',
display_description='vol1_desc',
volume_type='vol1_type',
snapshot_id='snap1_id',
source_volid=None,
metadata=dict(foo='vol1_foo',
bar='vol1_bar', ), ),
dict(id='vol2_id',
status='vol2_status',
size=1024,
availability_zone='vol2_availability',
bootable='true',
created_at=datetime.datetime.now(),
attachments=[dict(id='vol2_id',
volume_id='vol2_id',
server_id='instance_uuid',
device='/foo2')],
display_name='vol2_name',
display_description='vol2_desc',
volume_type='vol2_type',
snapshot_id='snap2_id',
source_volid=None,
metadata=dict(foo='vol2_foo',
bar='vol2_bar', ), )]
text = serializer.serialize(dict(volumes=raw_volumes))
tree = etree.fromstring(text)
self.assertEqual(NS + 'volumes', tree.tag)
self.assertEqual(len(raw_volumes), len(tree))
for idx, child in enumerate(tree):
self._verify_volume(raw_volumes[idx], child)
class TestVolumeCreateRequestXMLDeserializer(test.TestCase):
def setUp(self):
super(TestVolumeCreateRequestXMLDeserializer, self).setUp()
self.deserializer = volumes.CreateDeserializer()
def test_minimal_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {"volume": {"size": "1", }, }
self.assertEqual(request['body'], expected)
def test_display_name(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
},
}
self.assertEqual(request['body'], expected)
def test_display_description(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
},
}
self.assertEqual(request['body'], expected)
def test_volume_type(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
},
}
self.assertEqual(request['body'], expected)
def test_availability_zone(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
},
}
self.assertEqual(request['body'], expected)
def test_metadata(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
display_name="Volume-xml"
size="1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"display_name": "Volume-xml",
"size": "1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_full_volume(self):
self_request = """
<volume xmlns="http://docs.openstack.org/compute/api/v1.1"
size="1"
display_name="Volume-xml"
display_description="description"
volume_type="289da7f8-6440-407c-9fb4-7db01ec49164"
availability_zone="us-east1">
<metadata><meta key="Type">work</meta></metadata></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164",
"availability_zone": "us-east1",
"metadata": {
"Type": "work",
},
},
}
self.assertEqual(request['body'], expected)
def test_imageref(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
imageRef="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"imageRef": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_snapshot_id(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
snapshot_id="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"snapshot_id": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
def test_source_volid(self):
self_request = """
<volume xmlns="http://docs.openstack.org/volume/api/v1"
size="1"
display_name="Volume-xml"
display_description="description"
source_volid="4a90189d-d702-4c7c-87fc-6608c554d737"></volume>"""
request = self.deserializer.deserialize(self_request)
expected = {
"volume": {
"size": "1",
"display_name": "Volume-xml",
"display_description": "description",
"source_volid": "4a90189d-d702-4c7c-87fc-6608c554d737",
},
}
self.assertEqual(expected, request['body'])
class VolumesUnprocessableEntityTestCase(test.TestCase):
"""Tests of places we throw 422 Unprocessable Entity from."""
def setUp(self):
super(VolumesUnprocessableEntityTestCase, self).setUp()
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.controller = volumes.VolumeController(self.ext_mgr)
def _unprocessable_volume_create(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, body)
def test_create_no_body(self):
self._unprocessable_volume_create(body=None)
def test_create_missing_volume(self):
body = {'foo': {'a': 'b'}}
self._unprocessable_volume_create(body=body)
def test_create_malformed_entity(self):
body = {'volume': 'string'}
self._unprocessable_volume_create(body=body)
| apache-2.0 |
MaralAfris/wasp-summer-school-team6 | drone/bebop_controller/src/drone_goal_publisher.py | 2 | 4226 | #! /usr/bin/env python
import roslib
import rospy
from controller import Controller, ActionStatus
from geometry_msgs.msg import PoseStamped
from bebop_controller.msg import *
from actionlib import SimpleActionClient
from BebopActionServer import BebopActionServer
from actionlib_msgs.msg import GoalStatus
from geometry_msgs.msg import Pose, PoseArray, Point, Quaternion
import time
prev_x = 0.0
prev_y = 0.0
def init_action_clients():
global ac_takeoff, ac_movebase, ac_land
ac_takeoff = SimpleActionClient('BebopTakeOffAction', BebopTakeOffAction)
ac_takeoff.wait_for_server()
ac_movebase = SimpleActionClient('BebopMoveBaseAction', BebopMoveBaseAction)
ac_movebase.wait_for_server()
ac_land = SimpleActionClient('BebopLandAction', BebopLandAction)
ac_land.wait_for_server()
def takeoff():
global ac_takeoff
takeoff = BebopTakeOffGoal()
ac_takeoff.send_goal(takeoff)
ac_takeoff.wait_for_result()
success = (ac_takeoff.get_state() == GoalStatus.SUCCEEDED)
return success
def land():
global ac_land
land = BebopLandGoal()
ac_land.send_goal(land)
ac_land.wait_for_result()
success = (ac_land.get_state() == GoalStatus.SUCCEEDED)
return success
def drone_action(data):
global ac_movebase, ac_land, ac_takeoff
global prev_x, prev_y
global pub_completed
# Action types from planner
move = 0
deliver = 1
pickup = 2
handover = 3
land = 4
takeoff = 5
drone_start_x = -2.15
drone_start_y = -2.00
# Extract the goal of the action
x_coord = data.poses[0].position.x - drone_start_x
y_coord = data.poses[0].position.y - drone_start_y
# Extract the goal of the action
###x_coord = data.poses[0].position.x
###y_coord = data.poses[0].position.y
action_type = data.poses[0].position.z
actionId = data.poses[0].orientation.z
success = False
goal = BebopMoveBaseGoal()
goal.target_pose = PoseStamped()
goal.target_pose.header.frame_id = "odom"
goal.target_pose.header.stamp = rospy.Time.now()
if action_type == move:
print "Trying to move!"
goal.target_pose.pose.position.x = x_coord
goal.target_pose.pose.position.y = y_coord
ac_movebase.send_goal(goal)
ac_movebase.wait_for_result()
success = (ac_movebase.get_state() == GoalStatus.SUCCEEDED)
prev_x = x_coord
prev_y = y_coord
elif action_type == takeoff:
print "Trying to take off!"
takeoff = BebopTakeOffGoal()
ac_takeoff.send_goal(takeoff)
ac_takeoff.wait_for_result()
success = (ac_takeoff.get_state() == GoalStatus.SUCCEEDED)
elif action_type == land:
print "Trying to land!"
land = BebopLandGoal()
ac_land.send_goal(land)
ac_land.wait_for_result()
success = (ac_land.get_state() == GoalStatus.SUCCEEDED)
else:
print "Trying to perform a fake action!"
goal.target_pose.pose.position.x = prev_x
goal.target_pose.pose.position.y = prev_y
ac_movebase.send_goal(goal)
ac_movebase.wait_for_result()
success = (ac_movebase.get_state() == GoalStatus.SUCCEEDED)
time.sleep(5)
# Construct result message
newPoseArray = PoseArray()
newPoseArray.poses.append(Pose())
newPoseArray.poses[0].orientation.z = actionId;
if success:
print "Successfully completed an action"
print actionId
print action_type
newPoseArray.poses[0].position.z = 0;
pub_completed.publish(newPoseArray)
else:
newPoseArray.poses[0].position.z = -1;
pub_completed.publish(newPoseArray)
def start():
global pub_completed
try:
rospy.init_node('drone_goal_publisher', anonymous=False)
init_action_clients()
#Assigin publisher that publishes the index of the goal just accomplished
pub_completed = rospy.Publisher('/drone_goal_completed', PoseArray, queue_size=1)
rospy.Subscriber("/list_of_drone_goals", PoseArray, drone_action)
rospy.spin()
except rospy.ROSInterruptException:
print "Execution interrupted, quitting"
if __name__ == '__main__':
start()
| gpl-3.0 |
gitcoinco/web | app/kudos/router.py | 1 | 4159 | # -*- coding: utf-8 -*-
"""Define dashboard specific DRF API routes.
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import django_filters.rest_framework
from rest_framework import routers, serializers, viewsets
from .models import Token, Wallet
class TokenSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Token
fields = ('id', 'created_on', 'modified_on', 'name', 'description', 'image', 'rarity',
'price_finney', 'num_clones_allowed', 'num_clones_in_wild', 'owner_address', 'tags')
class WalletSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Wallet
fields = ('address', 'profile_id')
class WalletViewSet(viewsets.ModelViewSet):
queryset = Wallet.objects.all().order_by('-id')
serializer_class = WalletSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
def get_queryset(self):
param_keys = self.request.query_params.keys()
queryset = Wallet.objects.all().order_by('-id')
# Filter by address
if 'address' in param_keys:
queryset = queryset.filter(address=self.request.query_params.get('address'))
# Filter by profile_id
if 'profile_id' in param_keys:
queryset = queryset.filter(profile__id=self.request.query_params.get('profile_id'))
# Filter by profile_id
if 'profile_handle' in param_keys:
queryset = queryset.filter(profile__handle=self.request.query_params.get('profile_handle'))
return queryset
class TokenViewSet(viewsets.ModelViewSet):
queryset = Token.objects.all().order_by('-id')
serializer_class = TokenSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend,)
# filter_fields = ('name', 'description', 'image', 'rarity', 'price_finney', 'num_clones_allowed',
# 'num_clones_in_wild', 'owner_address', 'tags')
def get_queryset(self):
param_keys = self.request.query_params.keys()
queryset = Token.objects.all().order_by('-id')
# Filter by owner_address
if 'owner_address' in param_keys:
queryset = queryset.filter(owner_address__iexact=self.request.query_params.get('owner_address'))
# Filter by name
if 'name' in param_keys:
queryset = queryset.filter(name__iexact=self.request.query_params.get('name'))
# Filter by rarity
if 'rarity' in param_keys:
queryset = queryset.filter(rarity__iexact=self.request.query_params.get('rarity'))
# Filter by price
if 'price_finney' in param_keys:
queryset = queryset.filter(price_finney__iexact=self.request.query_params.get('price_finney'))
# Filter by num_clones_allowed
if 'num_clones_allowed' in param_keys:
queryset = queryset.filter(num_clones_allowed__iexact=self.request.query_params.get('num_clones_allowed'))
# Filter by num_clones_in_wild
if 'num_clones_in_wild' in param_keys:
queryset = queryset.filter(num_clones_in_wild__iexact=self.request.query_params.get('num_clones_in_wild'))
# Filter by tags
if 'tags' in param_keys:
queryset = queryset.filter(tags__in=self.request.query_params.get('tags'))
return queryset
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'kudos', TokenViewSet)
router.register(r'wallet', WalletViewSet)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.