gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Field classes.
"""
from __future__ import unicode_literals
import copy
import datetime
import itertools
import os
import re
import sys
import uuid
from decimal import Decimal, DecimalException
from io import BytesIO
from django.core import validators
from django.core.exceptions import ValidationError
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES # NOQA
from django.forms.boundfield import BoundField
from django.forms.utils import from_current_timezone, to_current_timezone
from django.forms.widgets import (
FILE_INPUT_CONTRADICTION, CheckboxInput, ClearableFileInput, DateInput,
DateTimeInput, EmailInput, HiddenInput, MultipleHiddenInput,
NullBooleanSelect, NumberInput, Select, SelectMultiple,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, TextInput, TimeInput,
URLInput,
)
from django.utils import formats, six
from django.utils.dateparse import parse_duration
from django.utils.duration import duration_string
from django.utils.encoding import force_str, force_text
from django.utils.ipv6 import clean_ipv6_address
from django.utils.six.moves.urllib.parse import urlsplit, urlunsplit
from django.utils.translation import ugettext_lazy as _, ungettext_lazy
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'DurationField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'GenericIPAddressField', 'FilePathField',
'SlugField', 'TypedChoiceField', 'TypedMultipleChoiceField', 'UUIDField',
)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
# Add an 'invalid' entry to default_error_message if you want a specific
# field error message not raised by the field validators.
default_error_messages = {
'required': _('This field is required.'),
}
empty_values = list(validators.EMPTY_VALUES)
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text='', error_messages=None, show_hidden_initial=False,
validators=(), localize=False, disabled=False, label_suffix=None):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of additional validators to use
# localize -- Boolean that specifies if the field should be localized.
# disabled -- Boolean that specifies whether the field is disabled, that
# is its widget is shown in the form but not editable.
# label_suffix -- Suffix to be added to the label. Overrides
# form's label_suffix.
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
self.help_text = help_text
self.disabled = disabled
self.label_suffix = label_suffix
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = list(itertools.chain(self.default_validators, validators))
super(Field, self).__init__()
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in self.empty_values and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
if self.disabled:
return initial
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def has_changed(self, initial, data):
"""
Return True if data differs from initial.
"""
try:
data = self.to_python(data)
if hasattr(self, '_coerce'):
return self._coerce(data) != self._coerce(initial)
except ValidationError:
return True
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or initial value we get
# is None, replace it with ''.
initial_value = initial if initial is not None else ''
data_value = data if data is not None else ''
return initial_value != data_value
def get_bound_field(self, form, field_name):
"""
Return a BoundField instance that will be used when accessing the form
field in a template.
"""
return BoundField(form, self, field_name)
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
result.validators = self.validators[:]
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, strip=True, empty_value='', *args, **kwargs):
self.max_length = max_length
self.min_length = min_length
self.strip = strip
self.empty_value = empty_value
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(int(min_length)))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(int(max_length)))
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return self.empty_value
value = force_text(value)
if self.strip:
value = value.strip()
return value
def widget_attrs(self, widget):
attrs = super(CharField, self).widget_attrs(widget)
if self.max_length is not None and not widget.is_hidden:
# The HTML attribute is maxlength, not max_length.
attrs['maxlength'] = str(self.max_length)
if self.min_length is not None and not widget.is_hidden:
# The HTML attribute is minlength, not min_length.
attrs['minlength'] = str(self.min_length)
return attrs
class IntegerField(Field):
widget = NumberInput
default_error_messages = {
'invalid': _('Enter a whole number.'),
}
re_decimal = re.compile(r'\.0*\s*$')
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
if kwargs.get('localize') and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault('widget', super(IntegerField, self).widget)
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub('', force_text(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(IntegerField, self).widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs['min'] = self.min_value
if self.max_value is not None:
attrs['max'] = self.max_value
return attrs
class FloatField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(FloatField, self).validate(value)
# Check for NaN (which is the only thing not equal to itself) and +/- infinity
if value != value or value in (Decimal('Inf'), Decimal('-Inf')):
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def widget_attrs(self, widget):
attrs = super(FloatField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
attrs.setdefault('step', 'any')
return attrs
class DecimalField(IntegerField):
default_error_messages = {
'invalid': _('Enter a number.'),
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
super(DecimalField, self).__init__(max_value, min_value, *args, **kwargs)
self.validators.append(validators.DecimalValidator(max_digits, decimal_places))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = force_text(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in self.empty_values:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'], code='invalid')
def widget_attrs(self, widget):
attrs = super(DecimalField, self).widget_attrs(widget)
if isinstance(widget, NumberInput) and 'step' not in widget.attrs:
if self.decimal_places is not None:
# Use exponential notation for small values since they might
# be parsed as 0 otherwise. ref #20765
step = str(Decimal('1') / 10 ** self.decimal_places).lower()
else:
step = 'any'
attrs.setdefault('step', step)
return attrs
class BaseTemporalField(Field):
def __init__(self, input_formats=None, *args, **kwargs):
super(BaseTemporalField, self).__init__(*args, **kwargs)
if input_formats is not None:
self.input_formats = input_formats
def to_python(self, value):
# Try to coerce the value to unicode.
unicode_value = force_text(value, strings_only=True)
if isinstance(unicode_value, six.text_type):
value = unicode_value.strip()
# If unicode, try to strptime against each input format.
if isinstance(value, six.text_type):
for format in self.input_formats:
try:
return self.strptime(value, format)
except (ValueError, TypeError):
continue
raise ValidationError(self.error_messages['invalid'], code='invalid')
def strptime(self, value, format):
raise NotImplementedError('Subclasses must define this method.')
class DateField(BaseTemporalField):
widget = DateInput
input_formats = formats.get_format_lazy('DATE_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date.'),
}
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
return super(DateField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).date()
class TimeField(BaseTemporalField):
widget = TimeInput
input_formats = formats.get_format_lazy('TIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid time.')
}
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.time):
return value
return super(TimeField, self).to_python(value)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format).time()
class DateTimeField(BaseTemporalField):
widget = DateTimeInput
input_formats = formats.get_format_lazy('DATETIME_INPUT_FORMATS')
default_error_messages = {
'invalid': _('Enter a valid date/time.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.datetime):
value = to_current_timezone(value)
return value
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in self.empty_values:
return None
if isinstance(value, datetime.datetime):
return from_current_timezone(value)
if isinstance(value, datetime.date):
result = datetime.datetime(value.year, value.month, value.day)
return from_current_timezone(result)
result = super(DateTimeField, self).to_python(value)
return from_current_timezone(result)
def strptime(self, value, format):
return datetime.datetime.strptime(force_str(value), format)
class DurationField(Field):
default_error_messages = {
'invalid': _('Enter a valid duration.'),
}
def prepare_value(self, value):
if isinstance(value, datetime.timedelta):
return duration_string(value)
return value
def to_python(self, value):
if value in self.empty_values:
return None
if isinstance(value, datetime.timedelta):
return value
value = parse_duration(force_text(value))
if value is None:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
kwargs.setdefault('strip', False)
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
self._set_regex(regex)
def _get_regex(self):
return self._regex
def _set_regex(self, regex):
if isinstance(regex, six.string_types):
regex = re.compile(regex, re.UNICODE)
self._regex = regex
if hasattr(self, '_regex_validator') and self._regex_validator in self.validators:
self.validators.remove(self._regex_validator)
self._regex_validator = validators.RegexValidator(regex=regex)
self.validators.append(self._regex_validator)
regex = property(_get_regex, _set_regex)
class EmailField(CharField):
widget = EmailInput
default_validators = [validators.validate_email]
def __init__(self, *args, **kwargs):
super(EmailField, self).__init__(*args, strip=True, **kwargs)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _("No file was submitted. Check the encoding type on the form."),
'missing': _("No file was submitted."),
'empty': _("The submitted file is empty."),
'max_length': ungettext_lazy(
'Ensure this filename has at most %(max)d character (it has %(length)d).',
'Ensure this filename has at most %(max)d characters (it has %(length)d).',
'max'),
'contradiction': _('Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
self.allow_empty_file = kwargs.pop('allow_empty_file', False)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in self.empty_values:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if self.max_length is not None and len(file_name) > self.max_length:
params = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'], code='max_length', params=params)
if not file_name:
raise ValidationError(self.error_messages['invalid'], code='invalid')
if not self.allow_empty_file and not file_size:
raise ValidationError(self.error_messages['empty'], code='empty')
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'], code='contradiction')
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in self.empty_value; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
def has_changed(self, initial, data):
if data is None:
return False
return True
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(
"Upload a valid image. The file you uploaded was either not an "
"image or a corrupted image."
),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
from PIL import Image
# We need to get a file object for Pillow. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = BytesIO(data.read())
else:
file = BytesIO(data['content'])
try:
# load() could spot a truncated JPEG, but it loads the entire
# image in memory, which is a DoS vector. See #3848 and #18520.
image = Image.open(file)
# verify() must be called immediately after the constructor.
image.verify()
# Annotating so subclasses can reuse it for their own validation
f.image = image
# Pillow doesn't detect the MIME type of all formats. In those
# cases, content_type will be None.
f.content_type = Image.MIME.get(image.format)
except Exception:
# Pillow doesn't recognize it as an image.
six.reraise(ValidationError, ValidationError(
self.error_messages['invalid_image'],
code='invalid_image',
), sys.exc_info()[2])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
widget = URLInput
default_error_messages = {
'invalid': _('Enter a valid URL.'),
}
default_validators = [validators.URLValidator()]
def __init__(self, *args, **kwargs):
super(URLField, self).__init__(*args, strip=True, **kwargs)
def to_python(self, value):
def split_url(url):
"""
Returns a list of url parts via ``urlparse.urlsplit`` (or raises a
``ValidationError`` exception for certain).
"""
try:
return list(urlsplit(url))
except ValueError:
# urlparse.urlsplit can raise a ValueError with some
# misformatted URLs.
raise ValidationError(self.error_messages['invalid'], code='invalid')
value = super(URLField, self).to_python(value)
if value:
url_fields = split_url(value)
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
url_fields = split_url(urlunsplit(url_fields))
value = urlunsplit(url_fields)
return value
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if isinstance(value, six.string_types) and value.lower() in ('false', '0'):
value = False
else:
value = bool(value)
return super(BooleanField, self).to_python(value)
def validate(self, value):
if not value and self.required:
raise ValidationError(self.error_messages['required'], code='required')
def has_changed(self, initial, data):
# Sometimes data or initial may be a string equivalent of a boolean
# so we should run it through to_python first to get a boolean value
return self.to_python(initial) != self.to_python(data)
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, for 'true' and 'false',
which are likely to be returned by JavaScript serializations of forms,
and for '1' and '0', which is what a RadioField will submit. Unlike
the Booleanfield we need to explicitly check for True, because we are
not using the bool() function
"""
if value in (True, 'True', 'true', '1'):
return True
elif value in (False, 'False', 'false', '0'):
return False
else:
return None
def validate(self, value):
pass
class CallableChoiceIterator(object):
def __init__(self, choices_func):
self.choices_func = choices_func
def __iter__(self):
for e in self.choices_func():
yield e
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
super(ChoiceField, self).__init__(
required=required, widget=widget, label=label, initial=initial,
help_text=help_text, *args, **kwargs
)
self.choices = choices
def __deepcopy__(self, memo):
result = super(ChoiceField, self).__deepcopy__(memo)
result._choices = copy.deepcopy(self._choices, memo)
return result
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
if callable(value):
value = CallableChoiceIterator(value)
else:
value = list(value)
self._choices = self.widget.choices = value
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in self.empty_values:
return ''
return force_text(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
text_value = force_text(value)
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == k2 or text_value == force_text(k2):
return True
else:
if value == k or text_value == force_text(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validate that the value can be coerced to the right type (if not empty).
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
return value
def clean(self, value):
value = super(TypedChoiceField, self).clean(value)
return self._coerce(value)
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _('Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _('Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'], code='invalid_list')
return [force_text(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'], code='required')
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': val},
)
def has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = set(force_text(value) for value in initial)
data_set = set(force_text(value) for value in data)
return data_set != initial_set
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def _coerce(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
if value == self.empty_value or value in self.empty_values:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': choice},
)
return new_value
def clean(self, value):
value = super(TypedMultipleChoiceField, self).clean(value)
return self._coerce(value)
def validate(self, value):
if value != self.empty_value:
super(TypedMultipleChoiceField, self).validate(value)
elif self.required:
raise ValidationError(self.error_messages['required'], code='required')
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _('Enter a list of values.'),
'incomplete': _('Enter a complete value.'),
}
def __init__(self, fields=(), *args, **kwargs):
self.require_all_fields = kwargs.pop('require_all_fields', True)
super(MultiValueField, self).__init__(*args, **kwargs)
for f in fields:
f.error_messages.setdefault('incomplete',
self.error_messages['incomplete'])
if self.require_all_fields:
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not
# by those individual fields.
f.required = False
self.fields = fields
def __deepcopy__(self, memo):
result = super(MultiValueField, self).__deepcopy__(memo)
result.fields = tuple(x.__deepcopy__(memo) for x in self.fields)
return result
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in self.empty_values]:
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'], code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if field_value in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'], code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
def has_changed(self, initial, data):
if initial is None:
initial = ['' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
for field, initial, data in zip(self.fields, initial, data):
try:
initial = field.to_python(initial)
except ValidationError:
return True
if field.has_changed(initial, data):
return True
return False
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, allow_files=True,
allow_folders=False, required=True, widget=None, label=None,
initial=None, help_text='', *args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
self.allow_files, self.allow_folders = allow_files, allow_folders
super(FilePathField, self).__init__(
choices=(), required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs
)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
if self.allow_files:
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
if self.allow_folders:
for f in dirs:
if f == '__pycache__':
continue
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
if f == '__pycache__':
continue
full_file = os.path.join(self.path, f)
if (((self.allow_files and os.path.isfile(full_file)) or
(self.allow_folders and os.path.isdir(full_file))) and
(self.match is None or self.match_re.search(f))):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _('Enter a valid date.'),
'invalid_time': _('Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in self.empty_values:
raise ValidationError(self.error_messages['invalid_date'], code='invalid_date')
if data_list[1] in self.empty_values:
raise ValidationError(self.error_messages['invalid_time'], code='invalid_time')
result = datetime.datetime.combine(*data_list)
return from_current_timezone(result)
return None
class GenericIPAddressField(CharField):
def __init__(self, protocol='both', unpack_ipv4=False, *args, **kwargs):
self.unpack_ipv4 = unpack_ipv4
self.default_validators = validators.ip_address_validators(protocol, unpack_ipv4)[0]
super(GenericIPAddressField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value in self.empty_values:
return ''
value = value.strip()
if value and ':' in value:
return clean_ipv6_address(value, self.unpack_ipv4)
return value
class SlugField(CharField):
default_validators = [validators.validate_slug]
def __init__(self, *args, **kwargs):
self.allow_unicode = kwargs.pop('allow_unicode', False)
if self.allow_unicode:
self.default_validators = [validators.validate_unicode_slug]
super(SlugField, self).__init__(*args, **kwargs)
class UUIDField(CharField):
default_error_messages = {
'invalid': _('Enter a valid UUID.'),
}
def prepare_value(self, value):
if isinstance(value, uuid.UUID):
return value.hex
return value
def to_python(self, value):
value = super(UUIDField, self).to_python(value)
if value in self.empty_values:
return None
if not isinstance(value, uuid.UUID):
try:
value = uuid.UUID(value)
except ValueError:
raise ValidationError(self.error_messages['invalid'], code='invalid')
return value
|
|
#! /usr/bin/env python3
# This was forked from https://github.com/rustyrussell/lightning-payencode/tree/acc16ec13a3fa1dc16c07af6ec67c261bd8aff23
import re
import time
from hashlib import sha256
from binascii import hexlify
from decimal import Decimal
from typing import Optional, TYPE_CHECKING, Type
import random
import bitstring
from .bitcoin import hash160_to_b58_address, b58_address_to_hash160, TOTAL_COIN_SUPPLY_LIMIT_IN_BTC
from .segwit_addr import bech32_encode, bech32_decode, CHARSET
from . import segwit_addr
from . import constants
from .constants import AbstractNet
from . import ecc
from .bitcoin import COIN
if TYPE_CHECKING:
from .lnutil import LnFeatures
class LnInvoiceException(Exception): pass
class LnDecodeException(LnInvoiceException): pass
class LnEncodeException(LnInvoiceException): pass
# BOLT #11:
#
# A writer MUST encode `amount` as a positive decimal integer with no
# leading zeroes, SHOULD use the shortest representation possible.
def shorten_amount(amount):
""" Given an amount in bitcoin, shorten it
"""
# Convert to pico initially
amount = int(amount * 10**12)
units = ['p', 'n', 'u', 'm']
for unit in units:
if amount % 1000 == 0:
amount //= 1000
else:
break
else:
unit = ''
return str(amount) + unit
def unshorten_amount(amount) -> Decimal:
""" Given a shortened amount, convert it into a decimal
"""
# BOLT #11:
# The following `multiplier` letters are defined:
#
#* `m` (milli): multiply by 0.001
#* `u` (micro): multiply by 0.000001
#* `n` (nano): multiply by 0.000000001
#* `p` (pico): multiply by 0.000000000001
units = {
'p': 10**12,
'n': 10**9,
'u': 10**6,
'm': 10**3,
}
unit = str(amount)[-1]
# BOLT #11:
# A reader SHOULD fail if `amount` contains a non-digit, or is followed by
# anything except a `multiplier` in the table above.
if not re.fullmatch("\\d+[pnum]?", str(amount)):
raise LnDecodeException("Invalid amount '{}'".format(amount))
if unit in units.keys():
return Decimal(amount[:-1]) / units[unit]
else:
return Decimal(amount)
_INT_TO_BINSTR = {a: '0' * (5-len(bin(a)[2:])) + bin(a)[2:] for a in range(32)}
# Bech32 spits out array of 5-bit values. Shim here.
def u5_to_bitarray(arr):
b = ''.join(_INT_TO_BINSTR[a] for a in arr)
return bitstring.BitArray(bin=b)
def bitarray_to_u5(barr):
assert barr.len % 5 == 0
ret = []
s = bitstring.ConstBitStream(barr)
while s.pos != s.len:
ret.append(s.read(5).uint)
return ret
def encode_fallback(fallback: str, net: Type[AbstractNet]):
""" Encode all supported fallback addresses.
"""
wver, wprog_ints = segwit_addr.decode_segwit_address(net.SEGWIT_HRP, fallback)
if wver is not None:
wprog = bytes(wprog_ints)
else:
addrtype, addr = b58_address_to_hash160(fallback)
if addrtype == net.ADDRTYPE_P2PKH:
wver = 17
elif addrtype == net.ADDRTYPE_P2SH:
wver = 18
else:
raise LnEncodeException(f"Unknown address type {addrtype} for {net}")
wprog = addr
return tagged('f', bitstring.pack("uint:5", wver) + wprog)
def parse_fallback(fallback, net: Type[AbstractNet]):
wver = fallback[0:5].uint
if wver == 17:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2PKH)
elif wver == 18:
addr = hash160_to_b58_address(fallback[5:].tobytes(), net.ADDRTYPE_P2SH)
elif wver <= 16:
witprog = fallback[5:] # cut witver
witprog = witprog[:len(witprog) // 8 * 8] # can only be full bytes
witprog = witprog.tobytes()
addr = segwit_addr.encode_segwit_address(net.SEGWIT_HRP, wver, witprog)
else:
return None
return addr
BOLT11_HRP_INV_DICT = {net.BOLT11_HRP: net for net in constants.NETS_LIST}
# Tagged field containing BitArray
def tagged(char, l):
# Tagged fields need to be zero-padded to 5 bits.
while l.len % 5 != 0:
l.append('0b0')
return bitstring.pack("uint:5, uint:5, uint:5",
CHARSET.find(char),
(l.len / 5) / 32, (l.len / 5) % 32) + l
# Tagged field containing bytes
def tagged_bytes(char, l):
return tagged(char, bitstring.BitArray(l))
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
# Discard trailing bits, convert to bytes.
def trim_to_bytes(barr):
# Adds a byte if necessary.
b = barr.tobytes()
if barr.len % 8 != 0:
return b[:-1]
return b
# Try to pull out tagged data: returns tag, tagged data and remainder.
def pull_tagged(stream):
tag = stream.read(5).uint
length = stream.read(5).uint * 32 + stream.read(5).uint
return (CHARSET[tag], stream.read(length * 5), stream)
def lnencode(addr: 'LnAddr', privkey) -> str:
if addr.amount:
amount = addr.net.BOLT11_HRP + shorten_amount(addr.amount)
else:
amount = addr.net.BOLT11_HRP if addr.net else ''
hrp = 'ln' + amount
# Start with the timestamp
data = bitstring.pack('uint:35', addr.date)
tags_set = set()
# Payment hash
data += tagged_bytes('p', addr.paymenthash)
tags_set.add('p')
if addr.payment_secret is not None:
data += tagged_bytes('s', addr.payment_secret)
tags_set.add('s')
for k, v in addr.tags:
# BOLT #11:
#
# A writer MUST NOT include more than one `d`, `h`, `n` or `x` fields,
if k in ('d', 'h', 'n', 'x', 'p', 's'):
if k in tags_set:
raise LnEncodeException("Duplicate '{}' tag".format(k))
if k == 'r':
route = bitstring.BitArray()
for step in v:
pubkey, channel, feebase, feerate, cltv = step
route.append(bitstring.BitArray(pubkey) + bitstring.BitArray(channel) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv))
data += tagged('r', route)
elif k == 't':
pubkey, feebase, feerate, cltv = v
route = bitstring.BitArray(pubkey) + bitstring.pack('intbe:32', feebase) + bitstring.pack('intbe:32', feerate) + bitstring.pack('intbe:16', cltv)
data += tagged('t', route)
elif k == 'f':
data += encode_fallback(v, addr.net)
elif k == 'd':
# truncate to max length: 1024*5 bits = 639 bytes
data += tagged_bytes('d', v.encode()[0:639])
elif k == 'x':
expirybits = bitstring.pack('intbe:64', v)
expirybits = trim_to_min_length(expirybits)
data += tagged('x', expirybits)
elif k == 'h':
data += tagged_bytes('h', sha256(v.encode('utf-8')).digest())
elif k == 'n':
data += tagged_bytes('n', v)
elif k == 'c':
finalcltvbits = bitstring.pack('intbe:64', v)
finalcltvbits = trim_to_min_length(finalcltvbits)
data += tagged('c', finalcltvbits)
elif k == '9':
if v == 0:
continue
feature_bits = bitstring.BitArray(uint=v, length=v.bit_length())
feature_bits = trim_to_min_length(feature_bits)
data += tagged('9', feature_bits)
else:
# FIXME: Support unknown tags?
raise LnEncodeException("Unknown tag {}".format(k))
tags_set.add(k)
# BOLT #11:
#
# A writer MUST include either a `d` or `h` field, and MUST NOT include
# both.
if 'd' in tags_set and 'h' in tags_set:
raise ValueError("Cannot include both 'd' and 'h'")
if not 'd' in tags_set and not 'h' in tags_set:
raise ValueError("Must include either 'd' or 'h'")
# We actually sign the hrp, then data (padded to 8 bits with zeroes).
msg = hrp.encode("ascii") + data.tobytes()
privkey = ecc.ECPrivkey(privkey)
sig = privkey.sign_message(msg, is_compressed=False, algo=lambda x:sha256(x).digest())
recovery_flag = bytes([sig[0] - 27])
sig = bytes(sig[1:]) + recovery_flag
data += sig
return bech32_encode(segwit_addr.Encoding.BECH32, hrp, bitarray_to_u5(data))
class LnAddr(object):
def __init__(self, *, paymenthash: bytes = None, amount=None, net: Type[AbstractNet] = None, tags=None, date=None,
payment_secret: bytes = None):
self.date = int(time.time()) if not date else int(date)
self.tags = [] if not tags else tags
self.unknown_tags = []
self.paymenthash = paymenthash
self.payment_secret = payment_secret
self.signature = None
self.pubkey = None
self.net = constants.net if net is None else net # type: Type[AbstractNet]
self._amount = amount # type: Optional[Decimal] # in bitcoins
self._min_final_cltv_expiry = 18
@property
def amount(self) -> Optional[Decimal]:
return self._amount
@amount.setter
def amount(self, value):
if not (isinstance(value, Decimal) or value is None):
raise LnInvoiceException(f"amount must be Decimal or None, not {value!r}")
if value is None:
self._amount = None
return
assert isinstance(value, Decimal)
if value.is_nan() or not (0 <= value <= TOTAL_COIN_SUPPLY_LIMIT_IN_BTC):
raise LnInvoiceException(f"amount is out-of-bounds: {value!r} BTC")
if value * 10**12 % 10:
# max resolution is millisatoshi
raise LnInvoiceException(f"Cannot encode {value!r}: too many decimal places")
self._amount = value
def get_amount_sat(self) -> Optional[Decimal]:
# note that this has msat resolution potentially
if self.amount is None:
return None
return self.amount * COIN
def get_routing_info(self, tag):
# note: tag will be 't' for trampoline
r_tags = list(filter(lambda x: x[0] == tag, self.tags))
# strip the tag type, it's implicitly 'r' now
r_tags = list(map(lambda x: x[1], r_tags))
# if there are multiple hints, we will use the first one that works,
# from a random permutation
random.shuffle(r_tags)
return r_tags
def get_amount_msat(self) -> Optional[int]:
if self.amount is None:
return None
return int(self.amount * COIN * 1000)
def get_features(self) -> 'LnFeatures':
from .lnutil import LnFeatures
return LnFeatures(self.get_tag('9') or 0)
def __str__(self):
return "LnAddr[{}, amount={}{} tags=[{}]]".format(
hexlify(self.pubkey.serialize()).decode('utf-8') if self.pubkey else None,
self.amount, self.net.BOLT11_HRP,
", ".join([k + '=' + str(v) for k, v in self.tags])
)
def get_min_final_cltv_expiry(self) -> int:
return self._min_final_cltv_expiry
def get_tag(self, tag):
for k, v in self.tags:
if k == tag:
return v
return None
def get_description(self) -> str:
return self.get_tag('d') or ''
def get_expiry(self) -> int:
exp = self.get_tag('x')
if exp is None:
exp = 3600
return int(exp)
def is_expired(self) -> bool:
now = time.time()
# BOLT-11 does not specify what expiration of '0' means.
# we treat it as 0 seconds here (instead of never)
return now > self.get_expiry() + self.date
class SerializableKey:
def __init__(self, pubkey):
self.pubkey = pubkey
def serialize(self):
return self.pubkey.get_public_key_bytes(True)
def lndecode(invoice: str, *, verbose=False, net=None) -> LnAddr:
if net is None:
net = constants.net
decoded_bech32 = bech32_decode(invoice, ignore_long_length=True)
hrp = decoded_bech32.hrp
data = decoded_bech32.data
if decoded_bech32.encoding is None:
raise LnDecodeException("Bad bech32 checksum")
if decoded_bech32.encoding != segwit_addr.Encoding.BECH32:
raise LnDecodeException("Bad bech32 encoding: must be using vanilla BECH32")
# BOLT #11:
#
# A reader MUST fail if it does not understand the `prefix`.
if not hrp.startswith('ln'):
raise LnDecodeException("Does not start with ln")
if not hrp[2:].startswith(net.BOLT11_HRP):
raise LnDecodeException(f"Wrong Lightning invoice HRP {hrp[2:]}, should be {net.BOLT11_HRP}")
data = u5_to_bitarray(data)
# Final signature 65 bytes, split it off.
if len(data) < 65*8:
raise LnDecodeException("Too short to contain signature")
sigdecoded = data[-65*8:].tobytes()
data = bitstring.ConstBitStream(data[:-65*8])
addr = LnAddr()
addr.pubkey = None
m = re.search("[^\\d]+", hrp[2:])
if m:
addr.net = BOLT11_HRP_INV_DICT[m.group(0)]
amountstr = hrp[2+m.end():]
# BOLT #11:
#
# A reader SHOULD indicate if amount is unspecified, otherwise it MUST
# multiply `amount` by the `multiplier` value (if any) to derive the
# amount required for payment.
if amountstr != '':
addr.amount = unshorten_amount(amountstr)
addr.date = data.read(35).uint
while data.pos != data.len:
tag, tagdata, data = pull_tagged(data)
# BOLT #11:
#
# A reader MUST skip over unknown fields, an `f` field with unknown
# `version`, or a `p`, `h`, or `n` field which does not have
# `data_length` 52, 52, or 53 respectively.
data_length = len(tagdata) / 5
if tag == 'r':
# BOLT #11:
#
# * `r` (3): `data_length` variable. One or more entries
# containing extra routing information for a private route;
# there may be more than one `r` field, too.
# * `pubkey` (264 bits)
# * `short_channel_id` (64 bits)
# * `feebase` (32 bits, big-endian)
# * `feerate` (32 bits, big-endian)
# * `cltv_expiry_delta` (16 bits, big-endian)
route=[]
s = bitstring.ConstBitStream(tagdata)
while s.pos + 264 + 64 + 32 + 32 + 16 < s.len:
route.append((s.read(264).tobytes(),
s.read(64).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe))
addr.tags.append(('r',route))
elif tag == 't':
s = bitstring.ConstBitStream(tagdata)
e = (s.read(264).tobytes(),
s.read(32).uintbe,
s.read(32).uintbe,
s.read(16).uintbe)
addr.tags.append(('t', e))
elif tag == 'f':
fallback = parse_fallback(tagdata, addr.net)
if fallback:
addr.tags.append(('f', fallback))
else:
# Incorrect version.
addr.unknown_tags.append((tag, tagdata))
continue
elif tag == 'd':
addr.tags.append(('d', trim_to_bytes(tagdata).decode('utf-8')))
elif tag == 'h':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.tags.append(('h', trim_to_bytes(tagdata)))
elif tag == 'x':
addr.tags.append(('x', tagdata.uint))
elif tag == 'p':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.paymenthash = trim_to_bytes(tagdata)
elif tag == 's':
if data_length != 52:
addr.unknown_tags.append((tag, tagdata))
continue
addr.payment_secret = trim_to_bytes(tagdata)
elif tag == 'n':
if data_length != 53:
addr.unknown_tags.append((tag, tagdata))
continue
pubkeybytes = trim_to_bytes(tagdata)
addr.pubkey = pubkeybytes
elif tag == 'c':
addr._min_final_cltv_expiry = tagdata.uint
elif tag == '9':
features = tagdata.uint
addr.tags.append(('9', features))
from .lnutil import validate_features
validate_features(features)
else:
addr.unknown_tags.append((tag, tagdata))
if verbose:
print('hex of signature data (32 byte r, 32 byte s): {}'
.format(hexlify(sigdecoded[0:64])))
print('recovery flag: {}'.format(sigdecoded[64]))
print('hex of data for signing: {}'
.format(hexlify(hrp.encode("ascii") + data.tobytes())))
print('SHA256 of above: {}'.format(sha256(hrp.encode("ascii") + data.tobytes()).hexdigest()))
# BOLT #11:
#
# A reader MUST check that the `signature` is valid (see the `n` tagged
# field specified below).
addr.signature = sigdecoded[:65]
hrp_hash = sha256(hrp.encode("ascii") + data.tobytes()).digest()
if addr.pubkey: # Specified by `n`
# BOLT #11:
#
# A reader MUST use the `n` field to validate the signature instead of
# performing signature recovery if a valid `n` field is provided.
if not ecc.ECPubkey(addr.pubkey).verify_message_hash(sigdecoded[:64], hrp_hash):
raise LnDecodeException("bad signature")
pubkey_copy = addr.pubkey
class WrappedBytesKey:
serialize = lambda: pubkey_copy
addr.pubkey = WrappedBytesKey
else: # Recover pubkey from signature.
addr.pubkey = SerializableKey(ecc.ECPubkey.from_sig_string(sigdecoded[:64], sigdecoded[64], hrp_hash))
return addr
|
|
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Field handler classes.
The field handlers are meant to parse information from or do some other generic
action for a specific field type for the build_api script.
"""
from __future__ import print_function
import contextlib
import functools
import os
import shutil
import sys
from google.protobuf import message as protobuf_message
from chromite.api.controller import controller_util
from chromite.api.gen.chromiumos import common_pb2
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
class Error(Exception):
"""Base error class for the module."""
class InvalidResultPathError(Error):
"""Result path is invalid."""
class ChrootHandler(object):
"""Translate a Chroot message to chroot enter arguments and env."""
def __init__(self, clear_field):
self.clear_field = clear_field
def handle(self, message):
"""Parse a message for a chroot field."""
# Find the Chroot field. Search for the field by type to prevent it being
# tied to a naming convention.
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if isinstance(field, common_pb2.Chroot):
chroot = field
if self.clear_field:
message.ClearField(descriptor.name)
return self.parse_chroot(chroot)
return None
def parse_chroot(self, chroot_message):
"""Parse a Chroot message instance."""
return controller_util.ParseChroot(chroot_message)
def handle_chroot(message, clear_field=True):
"""Find and parse the chroot field, returning the Chroot instance.
Returns:
chroot_lib.Chroot
"""
handler = ChrootHandler(clear_field)
chroot = handler.handle(message)
if chroot:
return chroot
logging.warning('No chroot message found, falling back to defaults.')
return handler.parse_chroot(common_pb2.Chroot())
def handle_goma(message, chroot_path):
"""Find and parse the GomaConfig field, returning the Goma instance."""
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if isinstance(field, common_pb2.GomaConfig):
goma_config = field
return controller_util.ParseGomaConfig(goma_config, chroot_path)
return None
class PathHandler(object):
"""Handles copying a file or directory into or out of the chroot."""
INSIDE = common_pb2.Path.INSIDE
OUTSIDE = common_pb2.Path.OUTSIDE
def __init__(self, field, destination, delete, prefix=None, reset=True):
"""Path handler initialization.
Args:
field (common_pb2.Path): The Path message.
destination (str): The destination base path.
delete (bool): Whether the copied file(s) should be deleted on cleanup.
prefix (str|None): A path prefix to remove from the destination path
when moving files inside the chroot, or to add to the source paths when
moving files out of the chroot.
reset (bool): Whether to reset the state on cleanup.
"""
assert isinstance(field, common_pb2.Path)
assert field.path
assert field.location
self.field = field
self.destination = destination
self.prefix = prefix or ''
self.delete = delete
self.tempdir = None
self.reset = reset
# For resetting the state.
self._transferred = False
self._original_message = common_pb2.Path()
self._original_message.CopyFrom(self.field)
def transfer(self, direction):
"""Copy the file or directory to its destination.
Args:
direction (int): The direction files are being copied (into or out of
the chroot). Specifying the direction allows avoiding performing
unnecessary copies.
"""
if self._transferred:
return
assert direction in [self.INSIDE, self.OUTSIDE]
if self.field.location == direction:
# Already in the correct location, nothing to do.
return
# Create a tempdir for the copied file if we're cleaning it up afterwords.
if self.delete:
self.tempdir = osutils.TempDir(base_dir=self.destination)
destination = self.tempdir.tempdir
else:
destination = self.destination
source = self.field.path
if direction == self.OUTSIDE and self.prefix:
# When we're extracting files, we need /tmp/result to be
# /path/to/chroot/tmp/result.
source = os.path.join(self.prefix, source.lstrip(os.sep))
if os.path.isfile(source):
# File - use the old file name, just copy it into the destination.
dest_path = os.path.join(destination, os.path.basename(source))
copy_fn = shutil.copy
else:
# Directory - just copy everything into the new location.
dest_path = destination
copy_fn = functools.partial(osutils.CopyDirContents, allow_nonempty=True)
logging.debug('Copying %s to %s', source, dest_path)
copy_fn(source, dest_path)
# Clean up the destination path for returning, if applicable.
return_path = dest_path
if direction == self.INSIDE and return_path.startswith(self.prefix):
return_path = return_path[len(self.prefix):]
self.field.path = return_path
self.field.location = direction
self._transferred = True
def cleanup(self):
if self.tempdir:
self.tempdir.Cleanup()
self.tempdir = None
if self.reset:
self.field.CopyFrom(self._original_message)
class SyncedDirHandler(object):
"""Handler for syncing directories across the chroot boundary."""
def __init__(self, field, destination, prefix):
self.field = field
self.prefix = prefix
self.source = self.field.dir
if not self.source.endswith(os.sep):
self.source += os.sep
self.destination = destination
if not self.destination.endswith(os.sep):
self.destination += os.sep
# For resetting the message later.
self._original_message = common_pb2.SyncedDir()
self._original_message.CopyFrom(self.field)
def _sync(self, src, dest):
logging.info('Syncing %s to %s', src, dest)
# TODO: This would probably be more efficient with rsync.
osutils.EmptyDir(dest)
osutils.CopyDirContents(src, dest)
def sync_in(self):
"""Sync files from the source directory to the destination directory."""
self._sync(self.source, self.destination)
self.field.dir = '/%s' % os.path.relpath(self.destination, self.prefix)
def sync_out(self):
"""Sync files from the destination directory to the source directory."""
self._sync(self.destination, self.source)
self.field.CopyFrom(self._original_message)
@contextlib.contextmanager
def copy_paths_in(message, destination, delete=True, prefix=None):
"""Context manager function to transfer and cleanup all Path messages.
Args:
message (Message): A message whose Path messages should be transferred.
destination (str): The base destination path.
delete (bool): Whether the file(s) should be deleted.
prefix (str|None): A prefix path to remove from the final destination path
in the Path message (i.e. remove the chroot path).
Returns:
list[PathHandler]: The path handlers.
"""
assert destination
handlers = _extract_handlers(message, destination, prefix, delete=delete,
reset=True)
for handler in handlers:
handler.transfer(PathHandler.INSIDE)
try:
yield handlers
finally:
for handler in handlers:
handler.cleanup()
@contextlib.contextmanager
def sync_dirs(message, destination, prefix):
"""Context manager function to handle SyncedDir messages.
The sync semantics are effectively:
rsync -r --del source/ destination/
* The endpoint runs. *
rsync -r --del destination/ source/
Args:
message (Message): A message whose SyncedPath messages should be synced.
destination (str): The destination path.
prefix (str): A prefix path to remove from the final destination path
in the Path message (i.e. remove the chroot path).
Returns:
list[SyncedDirHandler]: The handlers.
"""
assert destination
handlers = _extract_handlers(message, destination, prefix=prefix,
delete=False, reset=True,
message_type=common_pb2.SyncedDir)
for handler in handlers:
handler.sync_in()
try:
yield handlers
finally:
for handler in handlers:
handler.sync_out()
def extract_results(request_message, response_message, chroot):
"""Transfer all response Path messages to the request's ResultPath.
Args:
request_message (Message): The request message containing a ResultPath
message.
response_message (Message): The response message whose Path message(s)
are to be transferred.
chroot (chroot_lib.Chroot): The chroot the files are being copied out of.
"""
# Find the ResultPath.
for descriptor in request_message.DESCRIPTOR.fields:
field = getattr(request_message, descriptor.name)
if isinstance(field, common_pb2.ResultPath):
result_path_message = field
break
else:
# No ResultPath to handle.
return
destination = result_path_message.path.path
handlers = _extract_handlers(response_message, destination, chroot.path,
delete=False, reset=False)
for handler in handlers:
handler.transfer(PathHandler.OUTSIDE)
handler.cleanup()
def _extract_handlers(message, destination, prefix, delete=False, reset=False,
field_name=None, message_type=None):
"""Recursive helper for handle_paths to extract Path messages."""
message_type = message_type or common_pb2.Path
is_path_target = message_type is common_pb2.Path
is_synced_target = message_type is common_pb2.SyncedDir
is_message = isinstance(message, protobuf_message.Message)
is_result_path = isinstance(message, common_pb2.ResultPath)
if not is_message or is_result_path:
# Base case: Nothing to handle.
# There's nothing we can do with scalar values.
# Skip ResultPath instances to avoid unnecessary file copying.
return []
elif is_path_target and isinstance(message, common_pb2.Path):
# Base case: Create handler for this message.
if not message.path or not message.location:
logging.debug('Skipping %s; incomplete.', field_name or 'message')
return []
handler = PathHandler(message, destination, delete=delete, prefix=prefix,
reset=reset)
return [handler]
elif is_synced_target and isinstance(message, common_pb2.SyncedDir):
if not message.dir:
logging.debug('Skipping %s; no directory given.', field_name or 'message')
return []
handler = SyncedDirHandler(message, destination, prefix)
return [handler]
# Iterate through each field and recurse.
handlers = []
for descriptor in message.DESCRIPTOR.fields:
field = getattr(message, descriptor.name)
if field_name:
new_field_name = '%s.%s' % (field_name, descriptor.name)
else:
new_field_name = descriptor.name
if isinstance(field, protobuf_message.Message):
# Recurse for nested Paths.
handlers.extend(
_extract_handlers(field, destination, prefix, delete, reset,
field_name=new_field_name,
message_type=message_type))
else:
# If it's iterable it may be a repeated field, try each element.
try:
iterator = iter(field)
except TypeError:
# Definitely not a repeated field, just move on.
continue
for element in iterator:
handlers.extend(
_extract_handlers(element, destination, prefix, delete, reset,
field_name=new_field_name,
message_type=message_type))
return handlers
|
|
# Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django import shortcuts
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class AllocateIP(tables.LinkAction):
name = "allocate"
verbose_name = _("Allocate IP To Project")
classes = ("ajax-modal",)
icon = "link"
url = "horizon:project:access_and_security:floating_ips:allocate"
def single(self, data_table, request, *args):
return shortcuts.redirect('horizon:project:access_and_security:index')
def allowed(self, request, fip=None):
usages = quotas.tenant_quota_usages(request)
if usages['floating_ips']['available'] <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Allocate IP To Project")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:allocate_floating_ip"),)
return POLICY_CHECK(policy, request)
class ReleaseIPs(tables.BatchAction):
name = "release"
classes = ('btn-danger',)
icon = "unlink"
help_text = _("Once a floating IP is released, there is"
" no guarantee the same IP can be allocated again.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Release Floating IP",
u"Release Floating IPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Released Floating IP",
u"Released Floating IPs",
count
)
def allowed(self, request, fip=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:release_floating_ip"),)
return POLICY_CHECK(policy, request)
def action(self, request, obj_id):
api.network.tenant_floating_ip_release(request, obj_id)
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:associate_floating_ip"),)
return not fip.port_id and POLICY_CHECK(policy, request)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"ip_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate")
classes = ("btn-disassociate", "btn-danger")
icon = "unlink"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:disassociate_floating_ip"),)
return fip.port_id and POLICY_CHECK(policy, request)
def single(self, table, request, obj_id):
try:
fip = table.get_object_by_id(filters.get_int_or_uuid(obj_id))
api.network.floating_ip_disassociate(request, fip.id)
LOG.info('Disassociating Floating IP "%s".' % obj_id)
messages.success(request,
_('Successfully disassociated Floating IP: %s')
% fip.ip)
except Exception:
exceptions.handle(request,
_('Unable to disassociate floating IP.'))
return shortcuts.redirect('horizon:project:access_and_security:index')
def get_instance_info(fip):
if fip.instance_type == 'compute':
return (_("%(instance_name)s %(fixed_ip)s")
% {'instance_name': getattr(fip, "instance_name", ''),
'fixed_ip': fip.fixed_ip})
elif fip.instance_type == 'loadbalancer':
return _("Load Balancer VIP %s") % fip.fixed_ip
elif fip.instance_type:
return fip.fixed_ip
else:
return None
def get_instance_link(datum):
if datum.instance_type == 'compute':
return reverse("horizon:project:instances:detail",
args=(datum.instance_id,))
else:
return None
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("Current status of a Floating IP", u"Active")),
("down", pgettext_lazy("Current status of a Floating IP", u"Down")),
("error", pgettext_lazy("Current status of a Floating IP", u"Error")),
)
class FloatingIPsTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("down", True),
("error", False)
)
ip = tables.Column("ip",
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
fixed_ip = tables.Column(get_instance_info,
link=get_instance_link,
verbose_name=_("Mapped Fixed IP Address"))
pool = tables.Column("pool_name",
verbose_name=_("Pool"))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(FloatingIPsTable, self).__init__(
request, data=data, needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.base.is_service_enabled(request, 'network'):
del self.columns['status']
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, datum):
return datum.ip
class Meta(object):
name = "floating_ips"
verbose_name = _("Floating IPs")
table_actions = (AllocateIP, ReleaseIPs)
row_actions = (AssociateIP, DisassociateIP, ReleaseIPs)
|
|
from bisect import bisect_left
import sys
import numpy as np
from PyQt4.QtCore import QSize, Qt, QTimer
from PyQt4 import QtGui
from PyQt4.QtGui import QApplication, QTableView, QStandardItemModel, \
QStandardItem
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import r2_score
import Orange
from Orange.data import Table, Domain, StringVariable, ContinuousVariable, \
DiscreteVariable
from Orange.canvas import report
from Orange.data.sql.table import SqlTable, AUTO_DL_LIMIT
from Orange.preprocess.score import ReliefF, RReliefF
from Orange.widgets import gui
from Orange.widgets.settings import \
DomainContextHandler, Setting, ContextSetting, SettingProvider
from Orange.widgets.utils.toolbar import ZoomSelectToolbar
from Orange.widgets.visualize.owscatterplotgraph import OWScatterPlotGraph
from Orange.widgets.widget import OWWidget, Default, AttributeList
def font_resize(font, factor, minsize=None, maxsize=None):
font = QtGui.QFont(font)
fontinfo = QtGui.QFontInfo(font)
size = fontinfo.pointSizeF() * factor
if minsize is not None:
size = max(size, minsize)
if maxsize is not None:
size = min(size, maxsize)
font.setPointSizeF(size)
return font
class OWScatterPlot(OWWidget):
name = 'Scatter Plot'
description = 'Scatter plot visualization.'
icon = "icons/ScatterPlot.svg"
inputs = [("Data", Table, "set_data", Default),
("Data Subset", Table, "set_subset_data"),
("Features", AttributeList, "set_shown_attributes")]
outputs = [("Selected Data", Table, Default),
("Other Data", Table),
("Features", Table)]
settingsHandler = DomainContextHandler()
auto_send_selection = Setting(True)
auto_sample = Setting(True)
toolbar_selection = Setting(0)
attr_x = ContextSetting("")
attr_y = ContextSetting("")
graph = SettingProvider(OWScatterPlotGraph)
zoom_select_toolbar = SettingProvider(ZoomSelectToolbar)
jitter_sizes = [0, 0.1, 0.5, 1, 2, 3, 4, 5, 7, 10]
graph_name = "graph.plot_widget.plotItem"
def __init__(self):
super().__init__()
box = gui.widgetBox(self.mainArea, True, margin=0)
self.graph = OWScatterPlotGraph(self, box, "ScatterPlot")
box.layout().addWidget(self.graph.plot_widget)
plot = self.graph.plot_widget
axispen = QtGui.QPen(self.palette().color(QtGui.QPalette.Text))
axis = plot.getAxis("bottom")
axis.setPen(axispen)
axis = plot.getAxis("left")
axis.setPen(axispen)
self.data = None # Orange.data.Table
self.subset_data = None # Orange.data.Table
self.data_metas_X = None # self.data, where primitive metas are moved to X
self.sql_data = None # Orange.data.sql.table.SqlTable
self.attribute_selection_list = None # list of Orange.data.Variable
self.__timer = QTimer(self, interval=1200)
self.__timer.timeout.connect(self.add_data)
common_options = {"labelWidth": 50, "orientation": "horizontal",
"sendSelectedValue": True, "valueType": str}
box = gui.widgetBox(self.controlArea, "Axis Data")
self.cb_attr_x = gui.comboBox(box, self, "attr_x", label="Axis x:",
callback=self.update_attr,
**common_options)
self.cb_attr_y = gui.comboBox(box, self, "attr_y", label="Axis y:",
callback=self.update_attr,
**common_options)
self.vizrank = self.VizRank(self)
vizrank_box = gui.widgetBox(box, None, orientation='horizontal')
gui.separator(vizrank_box, width=common_options["labelWidth"])
self.vizrank_button = gui.button(
vizrank_box, self, "Rank projections", callback=self.vizrank.reshow,
tooltip="Find projections with good class separation")
self.vizrank_button.setEnabled(False)
gui.separator(box)
gui.valueSlider(
box, self, value='graph.jitter_size', label='Jittering: ',
values=self.jitter_sizes, callback=self.reset_graph_data,
labelFormat=lambda x:
"None" if x == 0 else ("%.1f %%" if x < 1 else "%d %%") % x)
gui.checkBox(
gui.indentedBox(box), self, 'graph.jitter_continuous',
'Jitter continuous values', callback=self.reset_graph_data)
self.sampling = gui.auto_commit(
self.controlArea, self, "auto_sample", "Sample", box="Sampling",
callback=self.switch_sampling, commit=lambda: self.add_data(1))
self.sampling.setVisible(False)
box = gui.widgetBox(self.controlArea, "Points")
self.cb_attr_color = gui.comboBox(
box, self, "graph.attr_color", label="Color:",
emptyString="(Same color)", callback=self.update_colors,
**common_options)
self.cb_attr_label = gui.comboBox(
box, self, "graph.attr_label", label="Label:",
emptyString="(No labels)", callback=self.graph.update_labels,
**common_options)
self.cb_attr_shape = gui.comboBox(
box, self, "graph.attr_shape", label="Shape:",
emptyString="(Same shape)", callback=self.graph.update_shapes,
**common_options)
self.cb_attr_size = gui.comboBox(
box, self, "graph.attr_size", label="Size:",
emptyString="(Same size)", callback=self.graph.update_sizes,
**common_options)
g = self.graph.gui
box2 = g.point_properties_box(self.controlArea, box)
box = gui.widgetBox(self.controlArea, "Plot Properties")
g.add_widgets([g.ShowLegend, g.ShowGridLines], box)
gui.checkBox(box, self, value='graph.tooltip_shows_all',
label='Show all data on mouse hover')
self.cb_class_density = gui.checkBox(
box, self, value='graph.class_density', label='Show class density',
callback=self.update_density)
self.zoom_select_toolbar = g.zoom_select_toolbar(
gui.widgetBox(self.controlArea, "Zoom/Select"), nomargin=True,
buttons=[g.StateButtonsBegin, g.SimpleSelect, g.Pan, g.Zoom,
g.StateButtonsEnd, g.ZoomReset]
)
buttons = self.zoom_select_toolbar.buttons
buttons[g.Zoom].clicked.connect(self.graph.zoom_button_clicked)
buttons[g.Pan].clicked.connect(self.graph.pan_button_clicked)
buttons[g.SimpleSelect].clicked.connect(self.graph.select_button_clicked)
buttons[g.ZoomReset].clicked.connect(self.graph.reset_button_clicked)
self.controlArea.layout().addStretch(100)
self.icons = gui.attributeIconDict
p = self.graph.plot_widget.palette()
self.graph.set_palette(p)
gui.auto_commit(self.controlArea, self, "auto_send_selection",
"Send Selection")
self.inline_graph_report()
def zoom(s):
"""Zoom in/out by factor `s`."""
viewbox = plot.getViewBox()
# scaleBy scales the view's bounds (the axis range)
viewbox.scaleBy((1 / s, 1 / s))
def fit_to_view():
viewbox = plot.getViewBox()
viewbox.autoRange()
zoom_in = QtGui.QAction(
"Zoom in", self, triggered=lambda: zoom(1.25)
)
zoom_in.setShortcuts([QtGui.QKeySequence(QtGui.QKeySequence.ZoomIn),
QtGui.QKeySequence(self.tr("Ctrl+="))])
zoom_out = QtGui.QAction(
"Zoom out", self, shortcut=QtGui.QKeySequence.ZoomOut,
triggered=lambda: zoom(1 / 1.25)
)
zoom_fit = QtGui.QAction(
"Fit in view", self,
shortcut=QtGui.QKeySequence(Qt.ControlModifier | Qt.Key_0),
triggered=fit_to_view
)
self.addActions([zoom_in, zoom_out, zoom_fit])
# def settingsFromWidgetCallback(self, handler, context):
# context.selectionPolygons = []
# for curve in self.graph.selectionCurveList:
# xs = [curve.x(i) for i in range(curve.dataSize())]
# ys = [curve.y(i) for i in range(curve.dataSize())]
# context.selectionPolygons.append((xs, ys))
# def settingsToWidgetCallback(self, handler, context):
# selections = getattr(context, "selectionPolygons", [])
# for (xs, ys) in selections:
# c = SelectionCurve("")
# c.setData(xs,ys)
# c.attach(self.graph)
# self.graph.selectionCurveList.append(c)
def reset_graph_data(self, *_):
self.graph.rescale_data()
self.update_graph()
def set_data(self, data):
self.information(1)
self.__timer.stop()
self.sampling.setVisible(False)
self.sql_data = None
if isinstance(data, SqlTable):
if data.approx_len() < 4000:
data = Table(data)
else:
self.information(1, "Large SQL table (showing a sample)")
self.sql_data = data
data_sample = data.sample_time(0.8, no_cache=True)
data_sample.download_data(2000, partial=True)
data = Table(data_sample)
self.sampling.setVisible(True)
if self.auto_sample:
self.__timer.start()
if data is not None and (len(data) == 0 or len(data.domain) == 0):
data = None
if self.data and data and self.data.checksum() == data.checksum():
return
self.closeContext()
same_domain = (self.data and data and
data.domain.checksum() == self.data.domain.checksum())
self.data = data
self.data_metas_X = self.move_primitive_metas_to_X(data)
if not same_domain:
self.init_attr_values()
self.vizrank._initialize()
self.vizrank_button.setEnabled(
self.data is not None and self.data.domain.class_var is not None
and len(self.data.domain.attributes) > 1 and len(self.data) > 1)
self.openContext(self.data)
def add_data(self, time=0.4):
if self.data and len(self.data) > 2000:
return self.__timer.stop()
data_sample = self.sql_data.sample_time(time, no_cache=True)
if data_sample:
data_sample.download_data(2000, partial=True)
data = Table(data_sample)
self.data = Table.concatenate((self.data, data), axis=0)
self.data_metas_X = self.move_primitive_metas_to_X(self.data)
self.handleNewSignals()
def switch_sampling(self):
self.__timer.stop()
if self.auto_sample and self.sql_data:
self.add_data()
self.__timer.start()
def move_primitive_metas_to_X(self, data):
if data is not None:
new_attrs = [a for a in data.domain.attributes + data.domain.metas
if a.is_primitive()]
new_metas = [m for m in data.domain.metas if not m.is_primitive()]
data = Table.from_table(Domain(new_attrs, data.domain.class_vars,
new_metas), data)
return data
def set_subset_data(self, subset_data):
self.warning(0)
if isinstance(subset_data, SqlTable):
if subset_data.approx_len() < AUTO_DL_LIMIT:
subset_data = Table(subset_data)
else:
self.warning(0, "Data subset does not support large Sql tables")
subset_data = None
self.subset_data = self.move_primitive_metas_to_X(subset_data)
# called when all signals are received, so the graph is updated only once
def handleNewSignals(self):
self.graph.new_data(self.data_metas_X, self.subset_data)
if self.attribute_selection_list and \
all(attr.name in self.graph.attribute_name_index
for attr in self.attribute_selection_list):
self.attr_x = self.attribute_selection_list[0].name
self.attr_y = self.attribute_selection_list[1].name
self.attribute_selection_list = None
self.update_graph()
self.cb_class_density.setEnabled(self.graph.can_draw_density())
self.unconditional_commit()
def set_shown_attributes(self, attributes):
if attributes and len(attributes) >= 2:
self.attribute_selection_list = attributes[:2]
else:
self.attribute_selection_list = None
def get_shown_attributes(self):
return self.attr_x, self.attr_y
def init_attr_values(self):
self.cb_attr_x.clear()
self.cb_attr_y.clear()
self.attr_x = None
self.attr_y = None
self.cb_attr_color.clear()
self.cb_attr_color.addItem("(Same color)")
self.cb_attr_label.clear()
self.cb_attr_label.addItem("(No labels)")
self.cb_attr_shape.clear()
self.cb_attr_shape.addItem("(Same shape)")
self.cb_attr_size.clear()
self.cb_attr_size.addItem("(Same size)")
if not self.data:
return
for var in self.data.domain.metas:
if not var.is_primitive():
self.cb_attr_label.addItem(self.icons[var], var.name)
for attr in self.data.domain.variables:
self.cb_attr_x.addItem(self.icons[attr], attr.name)
self.cb_attr_y.addItem(self.icons[attr], attr.name)
self.cb_attr_color.addItem(self.icons[attr], attr.name)
if attr.is_discrete:
self.cb_attr_shape.addItem(self.icons[attr], attr.name)
else:
self.cb_attr_size.addItem(self.icons[attr], attr.name)
self.cb_attr_label.addItem(self.icons[attr], attr.name)
for var in self.data.domain.metas:
if var.is_primitive():
self.cb_attr_x.addItem(self.icons[var], var.name)
self.cb_attr_y.addItem(self.icons[var], var.name)
self.cb_attr_color.addItem(self.icons[var], var.name)
if var.is_discrete:
self.cb_attr_shape.addItem(self.icons[var], var.name)
else:
self.cb_attr_size.addItem(self.icons[var], var.name)
self.cb_attr_label.addItem(self.icons[var], var.name)
self.attr_x = self.cb_attr_x.itemText(0)
if self.cb_attr_y.count() > 1:
self.attr_y = self.cb_attr_y.itemText(1)
else:
self.attr_y = self.cb_attr_y.itemText(0)
if self.data.domain.class_var:
self.graph.attr_color = self.data.domain.class_var.name
else:
self.graph.attr_color = ""
self.graph.attr_shape = ""
self.graph.attr_size = ""
self.graph.attr_label = ""
def update_attr(self, attributes=None):
self.update_graph(attributes=attributes)
self.cb_class_density.setEnabled(self.graph.can_draw_density())
self.send_features()
def update_colors(self):
self.graph.update_colors()
self.cb_class_density.setEnabled(self.graph.can_draw_density())
def update_density(self):
self.update_graph(reset_view=False)
def update_graph(self, attributes=None, reset_view=True, **_):
self.graph.zoomStack = []
if attributes and len(attributes) == 2:
self.attr_x, self.attr_y = attributes
if not self.graph.have_data:
return
self.graph.update_data(self.attr_x, self.attr_y, reset_view)
def selection_changed(self):
self.send_data()
def send_data(self):
selected = unselected = None
# TODO: Implement selection for sql data
if isinstance(self.data, SqlTable):
selected = unselected = self.data
elif self.data is not None:
selection = self.graph.get_selection()
selected = self.data[selection]
unselection = np.full(len(self.data), True, dtype=bool)
unselection[selection] = False
unselected = self.data[unselection]
self.send("Selected Data", selected)
self.send("Other Data", unselected)
def send_features(self):
features = None
if self.attr_x or self.attr_y:
dom = Domain([], metas=(StringVariable(name="feature"),))
features = Table(dom, [[self.attr_x], [self.attr_y]])
features.name = "Features"
self.send("Features", features)
def commit(self):
self.send_data()
self.send_features()
def closeEvent(self, ce):
self.vizrank.close()
super().closeEvent(ce)
def hideEvent(self, he):
self.vizrank.hide()
super().hideEvent(he)
def get_widget_name_extension(self):
if self.data is not None:
return "{} vs {}".format(self.combo_value(self.cb_attr_x),
self.combo_value(self.cb_attr_y))
def send_report(self):
disc_attr = False
if self.data:
domain = self.data.domain
disc_attr = domain[self.attr_x].is_discrete or \
domain[self.attr_y].is_discrete
caption = report.render_items_vert((
("Color", self.combo_value(self.cb_attr_color)),
("Label", self.combo_value(self.cb_attr_label)),
("Shape", self.combo_value(self.cb_attr_shape)),
("Size", self.combo_value(self.cb_attr_size)),
("Jittering", (self.graph.jitter_continuous or disc_attr) and
self.graph.jitter_size)))
self.report_plot()
if caption:
self.report_caption(caption)
def onDeleteWidget(self):
super().onDeleteWidget()
self.graph.plot_widget.getViewBox().deleteLater()
self.graph.plot_widget.clear()
class VizRank(OWWidget):
name = "Rank projections (Scatter Plot)"
want_control_area = False
def __init__(self, parent_widget):
super().__init__()
self.parent_widget = parent_widget
self.running = False
self.progress = None
self.k = 10
self.projectionTable = QTableView()
self.mainArea.layout().addWidget(self.projectionTable)
self.projectionTable.setSelectionBehavior(QTableView.SelectRows)
self.projectionTable.setSelectionMode(QTableView.SingleSelection)
self.projectionTable.setSortingEnabled(True)
self.projectionTableModel = QStandardItemModel(self)
self.projectionTable.setModel(self.projectionTableModel)
self.projectionTable.selectionModel().selectionChanged.connect(
self.on_selection_changed)
self.button = gui.button(self.mainArea, self, "Start evaluation",
callback=self.toggle, default=True)
self.resize(380, 512)
self._initialize()
def _initialize(self):
self.running = False
self.projectionTableModel.clear()
self.projectionTableModel.setHorizontalHeaderLabels(
["Score", "Feature 1", "Feature 2"])
self.projectionTable.setColumnWidth(0, 60)
self.projectionTable.setColumnWidth(1, 120)
self.projectionTable.setColumnWidth(2, 120)
self.button.setText("Start evaluation")
self.button.setEnabled(False)
self.pause = False
self.data = None
self.attrs = []
self.scores = []
self.i, self.j = 0, 0
if self.progress:
self.progress.finish()
self.progress = None
self.information(0)
if self.parent_widget.data:
if not self.parent_widget.data.domain.class_var:
self.information(
0, "Data with a class variable is required.")
return
if len(self.parent_widget.data.domain.attributes) < 2:
self.information(
0, 'At least 2 unique features are needed.')
return
if len(self.parent_widget.data) < 2:
self.information(
0, 'At least 2 instances are needed.')
return
self.button.setEnabled(True)
def on_selection_changed(self, selected, deselected):
"""Called when the ranks view selection changes."""
a1 = selected.indexes()[1].data()
a2 = selected.indexes()[2].data()
self.parent_widget.update_attr(attributes=(a1, a2))
def toggle(self):
self.running ^= 1
if self.running:
self.button.setText("Pause")
self.run()
else:
self.button.setText("Continue")
self.button.setEnabled(False)
def run(self):
graph = self.parent_widget.graph
y_full = self.parent_widget.data.Y
if not self.attrs:
self.attrs = self.score_heuristic()
if not self.progress:
self.progress = gui.ProgressBar(
self, len(self.attrs) * (len(self.attrs) - 1) / 2)
for i in range(self.i, len(self.attrs)):
ind1 = graph.attribute_name_index[self.attrs[i]]
for j in range(self.j, i):
if not self.running:
self.i, self.j = i, j
if not self.projectionTable.selectedIndexes():
self.projectionTable.selectRow(0)
self.button.setEnabled(True)
return
ind2 = graph.attribute_name_index[self.attrs[j]]
X = graph.scaled_data[[ind1, ind2], :]
valid = graph.get_valid_list([ind1, ind2])
X = X[:, valid].T
y = y_full[valid]
n_neighbors = min(self.k, len(X) - 1)
knn = NearestNeighbors(n_neighbors=n_neighbors).fit(X)
ind = knn.kneighbors(return_distance=False)
if self.parent_widget.data.domain.has_discrete_class:
score = np.sum(y[ind] == y.reshape(-1, 1)) / (
len(y_full) * n_neighbors)
else:
score = r2_score(y, np.mean(y[ind], axis=1)) * (
len(y) / len(y_full))
pos = bisect_left(self.scores, score)
self.projectionTableModel.insertRow(
len(self.scores) - pos,
[QStandardItem("{:.4f}".format(score)),
QStandardItem(self.attrs[j]),
QStandardItem(self.attrs[i])])
self.scores.insert(pos, score)
self.progress.advance()
self.j = 0
self.progress.finish()
if not self.projectionTable.selectedIndexes():
self.projectionTable.selectRow(0)
self.button.setText("Finished")
self.button.setEnabled(False)
def score_heuristic(self):
X = self.parent_widget.graph.scaled_data.T
Y = self.parent_widget.data.Y
dom = Domain([ContinuousVariable(str(i))
for i in range(X.shape[1])],
self.parent_widget.data.domain.class_vars)
data = Table(dom, X, Y)
relief = ReliefF if isinstance(dom.class_var,
DiscreteVariable) else RReliefF
weights = relief(n_iterations=100, k_nearest=self.k)(data)
attrs = sorted(zip(weights,
(x.name for x in
self.parent_widget.data.domain.attributes)),
reverse=True)
return [a for _, a in attrs]
def test_main(argv=None):
if argv is None:
argv = sys.argv
argv = list(argv)
a = QApplication(argv)
if len(argv) > 1:
filename = argv[1]
else:
filename = "iris"
ow = OWScatterPlot()
ow.show()
ow.raise_()
data = Orange.data.Table(filename)
ow.set_data(data)
ow.set_subset_data(data[:30])
ow.handleNewSignals()
rval = a.exec()
ow.set_data(None)
ow.set_subset_data(None)
ow.handleNewSignals()
ow.saveSettings()
ow.onDeleteWidget()
return rval
if __name__ == "__main__":
test_main()
|
|
""" Pure Python implementation of the Argon2 password hash.
If you can, use the `argon2_cffi' or `argon2' bindings.
Bas Westerbaan <bas@westerbaan.name> """
import six
from six.moves import range
from six import BytesIO
import struct
import binascii
import multiprocessing
import multiprocessing.dummy
__all__ = [
'argon2',
'ARGON2D',
'ARGON2I',
'ARGON2_DEFAULT_VERSION',
'ARGON2_VERSIONS',
'Argon2Error',
'Argon2ParameterError']
ARGON2D = 0
ARGON2I = 1
ARGON2ID = 2
ARGON2_VERSIONS = (0x10, 0x13)
ARGON2_DEFAULT_VERSION = ARGON2_VERSIONS[-1]
ARGON2_TYPES = (ARGON2D, ARGON2I, ARGON2ID)
class Argon2Error(Exception):
pass
class Argon2ParameterError(Argon2Error):
pass
def argon2(password, salt, time_cost, memory_cost, parallelism,
tag_length=32, secret=b'', associated_data=b'',
type_code=ARGON2I, threads=None, version=ARGON2_DEFAULT_VERSION,
use_threads=False):
""" Compute the Argon2 hash for *password*.
:param bytes password: Password to hash
:param bytes salt: A salt. Should be random and different for each
password.
:param int time_cost: Number of iterations to use.
:param int memory_cost: Amount of kibibytes of memory to use.
:param int parallelism: Amount of threads that can contribute to
the computation of the hash at the same time.
Optional arguments:
:param int tag_length: Length of the hash returned
:param bytes secret: Optional secret to differentiate hash
:param bytes associated_data: Optional associated data
:param int type: variant of argon2 to use. Either ARGON2I or ARGON2D
:param int threads: number of threads to use to compute the hash.
:param bool use_threads: if true, signal multiprocessing to use threads
rather than processes.
:param int version: version of argon2 to use. At the moment either
0x10 for v1.0 or 0x13 for v1.3
:rtype: bytes """
if threads is None:
threads = parallelism
if parallelism <= 0:
raise Argon2ParameterError("parallelism must be strictly positive")
if threads <= 0:
raise Argon2ParameterError("threads must be strictly positive")
if time_cost <= 0:
raise Argon2ParameterError("time_cost must be strictly positive")
if memory_cost < 8 * parallelism:
raise Argon2ParameterError("memory_cost can't be less than 8"
" times the number of lanes")
if type_code not in ARGON2_TYPES:
raise Argon2ParameterError("type_code %s not supported" % type_code)
if version not in ARGON2_VERSIONS:
raise Argon2ParameterError("version %s not supported" % version)
threads = min(parallelism, threads)
if threads == 1:
worker_pool = None
else:
if use_threads:
Pool = multiprocessing.dummy.Pool
else:
Pool = multiprocessing.Pool
worker_pool = Pool(processes=threads)
# Compute the pre-hasing digest
h = Blake2b()
h.update(struct.pack("<iiiiii", parallelism,
tag_length,
memory_cost,
time_cost,
version,
type_code))
h.update(struct.pack("<i", len(password)))
h.update(password)
h.update(struct.pack("<i", len(salt)))
h.update(salt)
h.update(struct.pack("<i", len(secret)))
h.update(secret)
h.update(struct.pack("<i", len(associated_data)))
h.update(associated_data)
H0 = h.digest()
m_prime = (memory_cost // (4 * parallelism)) * (4 * parallelism)
q = m_prime // parallelism # lane_length
segment_length = q // 4
# Allocate the matrix.
B = [[None for j in range(q)] for i in range(parallelism)]
# The blocks in Argon2 are arranged in a matrix. For each thread,
# there is a row, which is also called a lane. The number of
# columns depends on the memory_cost.
# There will be time_cost passes over the whole matrix.
# The colums are put into groups of four, called slices.
# The intersection of a lane with a slice is called a segment.
# The matrix is filled one slice at the time. The segments within
# a slice can be computed in parallel.
for t in range(time_cost):
for segment in range(4):
if not worker_pool:
for i in range(parallelism):
_fill_segment(B, t, segment, i, type_code, segment_length,
H0, q, parallelism, m_prime, time_cost, version)
continue
handles = [None]*parallelism
for i in range(parallelism):
handles[i] = worker_pool.apply_async(_fill_segment,
(B, t, segment, i, type_code, segment_length, H0,
q, parallelism, m_prime, time_cost, version))
for i in range(parallelism):
new_blocks = handles[i].get()
for index in range(segment_length):
B[i][segment * segment_length + index] = new_blocks[index]
if worker_pool:
# don't let workers sit around until pool is GC'd
worker_pool.close()
B_final = b'\0' * 1024
for i in range(parallelism):
B_final = xor1024(B_final, B[i][q-1])
return _H_prime(B_final, tag_length)
def _fill_segment(B, t, segment, i, type_code, segment_length, H0,
q, parallelism, m_prime, time_cost, version):
# Argon2i computes a bunch of pseudo-random numbers
# for every segment.
data_independant = ((type_code == ARGON2I)
or (type_code == ARGON2ID and t == 0 and segment <= 1))
if data_independant:
# See `generate_addresses' in reference implementation
# and section 3.3 of the specification.
pseudo_rands = []
ctr = 0 # `i' in the specification
while len(pseudo_rands) < segment_length:
ctr += 1
address_block = _compress(b'\0'*1024, _compress(b'\0'*1024,
struct.pack('<QQQQQQQ', t, i, segment, m_prime,
time_cost, type_code, ctr)
+ b'\0'*968))
for addr_i in range(0, 1024, 8):
pseudo_rands.append(struct.unpack('<II',
address_block[addr_i:addr_i+8]))
for index in range(segment_length):
j = segment * segment_length + index
if t == 0 and j < 2:
# First two columns are special.
B[i][j] = _H_prime(H0 + struct.pack('<II', j, i), 1024)
continue
# See `section 3.3. Indexing' of argon2 spec.
# First, we derive two pseudo-random values from the current
# state. This is where Argon2i and Argon2d differ.
if data_independant:
J1, J2 = pseudo_rands[index]
else:
J1, J2 = struct.unpack_from('<II', B[i][(j-1)%q][:8])
# Using the pseudo-random J1 and J2, we pick a reference
# block to mix with the previous one to create the next.
i_prime = i if t == 0 and segment == 0 else J2 % parallelism
if t == 0:
if segment == 0 or i == i_prime:
ref_area_size = j - 1
elif index == 0:
ref_area_size = segment * segment_length - 1
else:
ref_area_size = segment * segment_length
elif i == i_prime: # same_lane
ref_area_size = q - segment_length + index - 1
elif index == 0:
ref_area_size = q - segment_length - 1
else:
ref_area_size = q - segment_length
rel_pos = (J1 ** 2) >> 32
rel_pos = ref_area_size - 1 - ((ref_area_size * rel_pos) >> 32)
start_pos = 0
if t != 0 and segment != 3:
start_pos = (segment + 1) * segment_length
j_prime = (start_pos + rel_pos) % q
# Mix the previous and reference block to create
# the next block.
new_block = _compress(B[i][(j-1)%q], B[i_prime][j_prime])
if t != 0 and version == 0x13:
new_block = xor1024(B[i][j], new_block)
B[i][j] = new_block
# If we are run in a separate thread, then B is a copy. Return changes.
return B[i][segment*segment_length:(segment+1)*segment_length]
# xor1024: XOR two 1024 byte blocks with eachother.
if six.PY3:
def xor1024(a, b):
return (int.from_bytes(a, byteorder='little') ^
int.from_bytes(b, byteorder='little')).to_bytes(
1024, byteorder='little')
else:
_1024B_STRUCT = struct.Struct('Q'*128)
def xor1024(a, b):
a2 = _1024B_STRUCT.unpack(a)
b2 = list(_1024B_STRUCT.unpack(b))
for i in xrange(128):
b2[i] ^= a2[i]
return _1024B_STRUCT.pack(*b2)
def _compress(X, Y):
""" Argon2's compression function G.
This function is based on Blake2's compression function.
For the definition, see section 3.4 of Argon2's specification. """
R = xor1024(X, Y)
Q = []
Z = [None]*64
for i in range(0, 64, 8):
Q.extend(_P(R[i *16:(i+1)*16],
R[(i+1)*16:(i+2)*16],
R[(i+2)*16:(i+3)*16],
R[(i+3)*16:(i+4)*16],
R[(i+4)*16:(i+5)*16],
R[(i+5)*16:(i+6)*16],
R[(i+6)*16:(i+7)*16],
R[(i+7)*16:(i+8)*16]))
for i in range(8):
out = _P(Q[i], Q[i+8], Q[i+16], Q[i+24],
Q[i+32], Q[i+40], Q[i+48], Q[i+56])
for j in range(8):
Z[i + j*8] = out[j]
return xor1024(b''.join(Z), R)
def _P(S0, S1, S2, S3, S4, S5, S6, S7):
""" Permutation used in Argon2's compression function G.
It is a modification of the permutation used in Blake2.
See Appendix A of the specification of Argon2. """
S = (S0, S1, S2, S3, S4, S5, S6, S7)
v = [None] * 16
for i in range(8):
tmp1, tmp2 = struct.unpack_from('<QQ', S[i])
v[2*i] = tmp1
v[2*i+1] = tmp2
_G(v, 0, 4, 8, 12)
_G(v, 1, 5, 9, 13)
_G(v, 2, 6, 10, 14)
_G(v, 3, 7, 11, 15)
_G(v, 0, 5, 10, 15)
_G(v, 1, 6, 11, 12)
_G(v, 2, 7, 8, 13)
_G(v, 3, 4, 9, 14)
ret = [struct.pack("<QQ", v[2*i], v[2*i+1]) for i in range(8)]
return ret
def _G(v, a, b, c, d):
""" Quarter-round of the permutation used in the compression of Argon2.
It is a modification of the quarter-round used in Blake2, which in turn
is a modification of ChaCha. See Appendix A of the specification of
Argon2. """
va, vb, vc, vd = v[a], v[b], v[c], v[d]
va = (va + vb + 2 * (va & 0xffffffff) * (vb & 0xffffffff)
) & 0xffffffffffffffff
tmp = vd ^ va
vd = (tmp >> 32) | ((tmp & 0xffffffff) << 32)
vc = (vc + vd + 2 * (vc & 0xffffffff) * (vd & 0xffffffff)
) & 0xffffffffffffffff
tmp = vb ^ vc
vb = (tmp >> 24) | ((tmp & 0xffffff) << 40)
va = (va + vb + 2 * (va & 0xffffffff) * (vb & 0xffffffff)
) & 0xffffffffffffffff
tmp = vd ^ va
vd = (tmp >> 16) | ((tmp & 0xffff) << 48)
vc = (vc + vd + 2 * (vc & 0xffffffff) * (vd & 0xffffffff)
) & 0xffffffffffffffff
tmp = vb ^ vc
vb = (tmp >> 63) | ((tmp << 1) & 0xffffffffffffffff)
v[a], v[b], v[c], v[d] = va, vb, vc, vd
def _H_prime(X, tag_length):
""" Blake2b turned into a "variable-length hash function".
See definition of H' in section 3.2 of the argon2 spec. """
if tag_length <= 64:
return Blake2b(struct.pack('<I', tag_length) + X,
digest_length=tag_length).digest()
buf = BytesIO()
V = Blake2b(struct.pack('<I', tag_length) + X).digest() # V_1
buf.write(V[:32])
todo = tag_length - 32
while todo > 64:
V = Blake2b(V).digest() # V_2, ..., V_r
buf.write(V[:32])
todo -= 32
buf.write(Blake2b(V, digest_length=todo).digest()) # V_{r+1}
return buf.getvalue()
class Blake2b(object):
""" Minimal implementation of Blake2b, as required by Argon2. """
IV = [0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179]
SIGMA = ((0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15),
(14,10,4,8,9,15,13,6,1,12,0,2,11,7,5,3),
(11,8,12,0,5,2,15,13,10,14,3,6,7,1,9,4),
(7,9,3,1,13,12,11,14,2,6,5,10,4,0,15,8),
(9,0,5,7,2,4,10,15,14,1,11,12,6,8,3,13),
(2,12,6,10,0,11,8,3,4,13,7,5,15,14,1,9),
(12,5,1,15,14,13,4,10,0,7,6,3,9,2,8,11),
[13,11,7,14,12,1,3,9,5,0,15,4,8,6,2,10],
(6,15,14,9,11,3,0,8,12,2,13,7,1,4,10,5),
(10,2,8,4,7,6,1,5,15,11,9,14,3,12,13,0),
(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15),
(14,10,4,8,9,15,13,6,1,12,0,2,11,7,5,3))
def __init__(self, data=b'', key=b'', digest_length=64):
# default parameter block for sequential Blake2b with 128 byte
# digest and key.
assert 0 <= len(key) <= 128
assert 0 < digest_length <= 64
P = [0x0000000001010000, 0, 0, 0, 0, 0, 0, 0]
P[0] |= len(key) << 8
P[0] |= digest_length
self._digest_length = digest_length
self._buf = b'' # data that didn't fit in a block yet
self._h = [self.IV[i] ^ P[i] for i in range(8)] # current hash
self._t = [0, 0] # counter
self._f = [0, 0] # finalization flags
self._N = 0
self.finalized = False
if key:
self.update(key + b'\0' * (128 - len(key)))
if data:
self.update(data)
def update(self, data):
assert not self.finalized
i = 0
l = len(data)
if len(self._buf) + l <= 128:
# We do not have enough data for one compression. Store it in
# the buffer and return.
self._buf += data
return
# First, use the buffer
self._compress(self._buf + data[:128 - len(self._buf)], 128)
i = 128 - len(self._buf)
# Now take as many blocks from data as we can.
while l - i > 128:
self._compress(data[i:i+128], 128)
i += 128
# Put the rest in the buffer
self._buf = data[i:]
def final(self):
if not self.finalized:
n_remaining = len(self._buf)
buf = self._buf + b'\0' * (128 - len(self._buf))
self._f[0] = 0xffffffffffffffff
self._compress(buf, n_remaining)
self._digest = struct.pack('<8Q', *self._h)[:self._digest_length]
self.finalized = True
return self._digest
digest = final
def hexdigest(self):
return binascii.hexlify(self.final())
def _compress(self, block, n_data):
self._N += n_data
self._t[0] = self._N & 0xffffffffffffffff
self._t[1] = self._N >> 64
m = struct.unpack_from('<16Q', block)
v = self._h + self.IV
v[12] ^= self._t[0]
v[13] ^= self._t[1]
v[14] ^= self._f[0]
v[15] ^= self._f[1]
for r in range(12):
Blake2b._G(v, m, r, 0, 0, 4, 8, 12)
Blake2b._G(v, m, r, 1, 1, 5, 9, 13)
Blake2b._G(v, m, r, 2, 2, 6, 10, 14)
Blake2b._G(v, m, r, 3, 3, 7, 11, 15)
Blake2b._G(v, m, r, 4, 0, 5, 10, 15)
Blake2b._G(v, m, r, 5, 1, 6, 11, 12)
Blake2b._G(v, m, r, 6, 2, 7, 8, 13)
Blake2b._G(v, m, r, 7, 3, 4, 9, 14)
self._h = [self._h[i] ^ v[i] ^ v[i+8] for i in range(8)]
@staticmethod
def _G(v, m, r, i, a, b, c, d):
va, vb, vc, vd = v[a], v[b], v[c], v[d]
va = (va + vb + m[Blake2b.SIGMA[r][2*i]]) & 0xffffffffffffffff
tmp = vd ^ va
vd = (tmp >> 32) | ((tmp & 0xffffffff) << 32)
vc = (vc + vd) & 0xffffffffffffffff
tmp = vb ^ vc
vb = (tmp >> 24) | ((tmp & 0xffffff) << 40)
va = (va + vb + m[Blake2b.SIGMA[r][2*i+1]]) & 0xffffffffffffffff
tmp = vd ^ va
vd = (tmp >> 16) | ((tmp & 0xffff) << 48)
vc = (vc + vd) & 0xffffffffffffffff
tmp = vb ^ vc
vb = (tmp >> 63) | ((tmp << 1) & 0xffffffffffffffff)
v[a], v[b], v[c], v[d] = va, vb, vc, vd
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
import os
import csv
import sys
import datetime
import tempfile
import fileinput
import shutil
import gzip
import ConfigParser
# Config file
Config = ConfigParser.ConfigParser()
Config.read('cron.cfg')
def ConfigSectionMap(section):
dict1 = {}
options = Config.options(section)
for option in options:
try:
dict1[option] = Config.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def set_tmp_path(debug):
global tmppath
tmppath = tempfile.mkdtemp()
if debug:
print "[+] creata directory: " + tmppath
return tmppath
def check_data_path(debug):
directory = ConfigSectionMap("export")['directory']
if not os.path.exists(directory):
os.makedirs(directory)
if debug:
print "[+] Creata directory"
def yesterday():
today = datetime.date.today()
yesterday = today - datetime.timedelta(1)
return yesterday
def set_weekday(yesterday, debug):
numdaycheck=yesterday.strftime("%u")
if len(numdaycheck) is 1:
numdaycheck = "0"+str(numdaycheck)
if debug:
print "[+] analizzo i dati del giorno " + numdaycheck
return numdaycheck
if len(numdaycheck) is 2:
if debug:
print "[+] analizzo i dati del giorno " + numdaycheck
return numdaycheck
def set_monthday(yesterday, debug):
global monthday
monthday = str(yesterday)[-2:]
if debug:
print "[+] analizzo i dati del giorno " + monthday
return monthday
def remove_utc(csv,searchExp,replaceExp, debug):
for line in fileinput.input(csv, inplace=1):
if searchExp in line:
line = line.replace(searchExp,replaceExp)
sys.stdout.write(line)
if debug:
print "[+] rimosso UTC dal timestamp dal csv"
def remove_header(csv, debug):
lines = open(csv).readlines()
open(csv, 'w').writelines(lines[1:])
if debug:
print "[+] rimosso l'header dal csv"
def sar_to_csv(monthday, export, check, debug):
os.system('sadf -d %s%s -t -- -%s > %s/%s.csv.tmp' % (ConfigSectionMap("log")['sar'], monthday, export, tmppath, check))
csvpath = '%s/%s.csv.tmp' % (tmppath, check)
remove_utc(csvpath," UTC","", debug)
remove_header(csvpath, debug)
if debug:
print "[+] creato il csv: " + csvpath
return csvpath
def custom_csv(monthday, export, debug):
"""Il modulo estrae dal csv solo le informazioni necessarie"""
global check
if export == "r":
check = "ram"
sarcsv = sar_to_csv(monthday, export, check, debug)
try:
with open(sarcsv, 'rb') as f:
reader = csv.reader(f, delimiter=';')
mycsv = '%s/%s-%s_%s.csv.gz' % (tmppath, check, yesterday(), ConfigSectionMap("client")['hostname'])
csv_custom_path['ram'] = mycsv
f = gzip.open(mycsv,'w')
for row in reader:
f.write(row[2]+";"+row[4]+";"+row[6]+";"+row[9]+"\n")
f.close()
if debug:
print "[+] Custom check RAM eseguito"
except IndexError:
print "[-] Il sistema ha subito un riavvio, dati RAM parzialmente corrotti"
if export == "u":
check = "cpu"
sarcsv = sar_to_csv(monthday, export, check, debug)
try:
with open(sarcsv, 'rb') as f:
reader = csv.reader(f, delimiter=';')
mycsv = '%s/%s-%s_%s.csv.gz' % (tmppath, check, yesterday(), ConfigSectionMap("client")['hostname'])
csv_custom_path['cpu'] = mycsv
f = gzip.open(mycsv,'w')
for row in reader:
f.write(row[2]+";"+row[4]+";"+row[6]+";"+row[9]+"\n")
f.close()
if debug:
print "[+] Custom check CPU eseguito"
except IndexError:
print "[-] Il sistema ha subito un riavvio, dati CPU parzialmente corrotti"
return 1
if export == "q":
check = "load-average"
sarcsv = sar_to_csv(monthday, export, check, debug)
try:
with open(sarcsv, 'rb') as f:
reader = csv.reader(f, delimiter=';')
mycsv = '%s/%s-%s_%s.csv.gz' % (tmppath, check, yesterday(), ConfigSectionMap("client")['hostname'])
csv_custom_path['ldavg'] = mycsv
f = gzip.open(mycsv,'w')
for row in reader:
f.write(row[2]+";"+row[5]+";"+row[6]+";"+row[7]+"\n")
f.close()
if debug:
print "[+] Custom check Load Average eseguito"
except IndexError:
print "[-] Il sistema ha subito un riavvio, dati LA parzialmente corrotti"
return 1
if export == "S":
check = "swap"
sarcsv = sar_to_csv(monthday, export, check, debug)
try:
with open(sarcsv, 'rb') as f:
reader = csv.reader(f, delimiter=';')
mycsv = '%s/%s-%s_%s.csv.gz' % (tmppath, check, yesterday(), ConfigSectionMap("client")['hostname'])
csv_custom_path['swap'] = mycsv
f = gzip.open(mycsv,'w')
for row in reader:
f.write(row[2]+";"+row[3]+";"+row[5]+"\n")
f.close()
if debug:
print "[+] Custom check SWAP eseguito"
except IndexError:
print "[-] Il sistema ha subito un riavvio, dati SWAP parzialmente corrotti"
return 1
if export != "S" and export != "u" and export != "r" and export != "q":
print "[-] Argomento export errato!"
def store_csv(debug):
ram = csv_custom_path['ram']
ldavg = csv_custom_path['ldavg']
cpu = csv_custom_path['cpu']
swap = csv_custom_path['swap']
for csvfile in [ram, ldavg, cpu, swap]:
shutil.move(csvfile, ConfigSectionMap("export")['directory'])
if debug:
print "[+] Copiato csvfile in %s" % ConfigSectionMap("export")['directory']
def clean_tmp(debug):
try:
shutil.rmtree(tmppath)
if debug:
print "[+] rimossa la directory temporanea:" + tmppath
except OSError, e:
if e.errno != 2:
raise
return 1
def main():
debug = int(ConfigSectionMap("debug")['debug'])
set_tmp_path(debug)
check_data_path(debug)
global csv_custom_path
csv_custom_path = {}
for export in ["r", "u", "q", "S"]:
custom_csv(set_monthday(yesterday(), debug), export, debug)
store_csv(debug)
clean_tmp(debug)
if __name__ == "__main__":
main()
|
|
"""
Definition of App class and the app manager.
"""
import os
import time
import inspect
import logging
import tornado.ioloop
import tornado.web
from ..util.icon import Icon
from .. import webruntime
from .. import react
from .clientcode import clientCode, Exporter # global client code
from .pair import Pair
# Create/get the tornado event loop
_tornado_loop = tornado.ioloop.IOLoop.instance()
# The tornado server, started on start()
_tornado_app = None
class AppManager(object):
""" Manage apps, or more specifically, the proxy objects.
There is one AppManager class (in ``flexx.pair.manager``). It's
purpose is to manage the application classes and instances. Intended
for internal use.
"""
def __init__(self):
# name -> (PairClass, pending, connected) - lists contain proxies
self._proxies = {'__default__': (None, [], [])}
def register_app_class(self, cls):
""" Register a Pair class as being an application.
Applications are identified by the ``__name__`` attribute of
the class. The given class must inherit from ``Pair``.
After registering a class, it becomes possible to connect to
"http://address:port/ClassName".
"""
assert isinstance(cls, type) and issubclass(cls, Pair)
name = cls.__name__
pending, connected = [], []
if name in self._proxies and cls is not self._proxies[name][0]:
oldCls, pending, connected = self._proxies[name]
logging.warn('Re-registering app class %r' % name)
#raise ValueError('App with name %r already registered' % name)
self._proxies[name] = cls, pending, connected
def get_default_proxy(self):
""" Get the default proxy that is used for interactive use.
When a Pair class is created without a proxy, this method
is called to get one.
The default "app" is served at "http://address:port/__default__".
"""
_, pending, connected = self._proxies['__default__']
proxies = pending + connected
if proxies:
return proxies[-1]
else:
runtime = 'notebook' if is_notebook else 'browser' # todo: what runtime?
proxy = Proxy('__default__', runtime, title='Flexx app')
pending.append(proxy)
return proxy
def add_pending_proxy_instance(self, proxy):
""" Add an app instance as a pending app.
This means that the proxy is created from Python and not yet
connected. A runtime has been launched and we're waiting for
it to connect.
"""
assert isinstance(proxy, Proxy)
assert proxy.app_name in self._proxies
cls, pending, connected = self._proxies[proxy.app_name]
if proxy.status == Proxy.STATUS.PENDING:
assert proxy not in pending
pending.append(proxy)
else:
raise RuntimeError('Cannot add proxy instances that are/were '
'already connected')
def connect_client(self, ws, name, app_id=None):
""" Connect an incoming client connection to a proxy object
Called by the websocket object upon connecting, thus initiating
the application. The connection can be for the default app, for
a pending app, or for a fresh app (external connection).
"""
logging.debug('connecting %s %s' %(name, app_id))
cls, pending, connected = self._proxies[name]
if name == '__default__':
if pending:
proxy = pending.pop(-1)
else:
proxy = Proxy(name, runtime=None)
elif not app_id:
# Create a fresh proxy - there already is a runtime
proxy = Proxy(cls.__name__, runtime=None)
app = cls(proxy=proxy)
proxy._set_pair_instance(app)
else:
# Search for the app with the specific id
for proxy in pending:
if proxy.id == app_id:
pending.remove(proxy)
break
else:
raise RuntimeError('Asked for app id %r, '
'but could not find it' % app_id)
# Add app to connected, set ws
assert proxy.status == Proxy.STATUS.PENDING
proxy._connect_client(ws)
connected.append(proxy)
self.connections_changed._set(proxy.app_name)
return proxy # For the ws
def disconnect_client(self, proxy):
""" Close a connection to a client.
This is called by the websocket when the connection is closed.
The manager will remove the proxy from the list of connected
instances.
"""
cls, pending, connected = self._proxies[proxy.app_name]
try:
connected.remove(proxy)
except ValueError:
pass
proxy.close()
self.connections_changed._set(proxy.app_name)
def has_app_name(self, name):
""" Returns True if name is a registered appliciation name
"""
return name in self._proxies.keys()
def get_app_names(self):
""" Get a list of registered application names
"""
return [name for name in self._proxies.keys()]
def get_proxy_by_id(self, name, id):
""" Get proxy object by name and id
"""
cls, pending, connected = self._proxies[name]
for proxy in pending:
if proxy.id == id:
return proxy
for proxy in connected:
if proxy.id == id:
return proxy
def get_connections(self, name):
""" Given an app name, return the proxy connected objects.
"""
cls, pending, connected = self._proxies[name]
return list(connected)
@react.source
def connections_changed(self, name):
""" Emits the name of the app for which a connection is added
or removed.
"""
return str(name)
# Create global app manager object
manager = AppManager()
# todo: move to ..utils
def port_hash(name):
""" port_hash(name)
Given a string, returns a port number between 49152 and 65535.
(2**14 (16384) different posibilities)
This range is the range for dynamic and/or private ports
(ephemeral ports) specified by iana.org.
The algorithm is deterministic, thus providing a way to map names
to port numbers.
"""
fac = 0xd2d84a61
val = 0
for c in name:
val += ( val>>3 ) + ( ord(c)*fac )
val += (val>>3) + (len(name)*fac)
return 49152 + (val % 2**14)
def init_server(host=None, port=None):
""" Initialize the server if it is not already running.
"""
global _tornado_app
# Check that its not already running
if _tornado_app is not None:
return
#raise RuntimeError('flexx.ui server already created')
# Create server
from .server import FlexxTornadoApplication
_tornado_app = FlexxTornadoApplication()
# Get default host and port
if host is None:
host = os.getenv('FLEXX_HOSTNAME', 'localhost')
if port is None:
port = os.getenv('FLEXX_PORT', None)
# Start server (find free port number if port not given)
if port is not None:
port = int(port)
_tornado_app.listen(port, host)
else:
for i in range(100):
port = port_hash('flexx%i' % i)
try:
_tornado_app.listen(port, host)
break
except OSError:
pass # address already in use
else:
raise RuntimeError('Could not bind to free address')
# Notify address, so its easy to e.g. copy and paste in the browser
_tornado_app.serving_at = host, port
print('Serving apps at http://%s:%i/' % (host, port))
def start(host=None, port=None):
""" Start the server and event loop if not already running.
This function generally does not return until the application is
stopped, although it will try to behave nicely in interactive
environments (e.g. Spyder, IEP, Jupyter notebook), so the caller
should take into account that the function may return immediately.
Arguments:
host (str): The hostname to serve on. Default 'localhost'. This
parameter is ignored if the server was already running.
port (int, str): The port number. If a string is given, it is
hashed to an ephemeral port number. If not given or None,
will try a series of ports until one is found that is free.
"""
# Get server up
init_server(host, port)
# Start event loop
if not (hasattr(_tornado_loop, '_running') and _tornado_loop._running):
_tornado_loop.start()
def run():
""" Start the event loop if not already running, for desktop apps.
In contrast to ``start()``, when the server is started this way,
it will close down when there are no more connections.
"""
manager._auto_stop = True
return start()
manager._auto_stop = False
@react.connect('manager.connections_changed')
def _auto_closer(name):
if not manager._auto_stop:
return
for name in manager.get_app_names():
proxies = manager.get_connections(name)
if proxies:
return
else:
logging.info('Stopping Flexx event loop.')
stop()
is_notebook = False
def init_notebook():
""" Initialize the Jupyter notebook by injecting the necessary CSS
and JS into the browser.
"""
global is_notebook
from IPython.display import display, Javascript, HTML
if is_notebook:
display(HTML("<i>Flexx already loaded</i>"))
return # Don't inject twice
is_notebook = True
init_server()
host, port = _tornado_app.serving_at
#name = app.app_name + '-' + app.id
name = '__default__'
url = 'ws://%s:%i/%s/ws' % (host, port, name)
t = "<i>Injecting Flexx JS and CSS</i>"
t += "<style>\n%s\n</style>\n" % clientCode.get_css()
t += "<script>\n%s\n</script>" % clientCode.get_js()
t += "<script>flexx.ws_url=%r; flexx.is_notebook=true; flexx.init();</script>" % url
display(HTML(t))
def stop():
""" Stop the event loop
"""
_tornado_loop.stop()
# # todo: this does not work if the event loop is running!
# def process_events():
# """ Process events
#
# Call this to keep the application working while running in a loop.
# """
# _tornado_loop.run_sync(lambda x=None: None)
def call_later(delay, callback, *args, **kwargs):
""" Call the given callback after delay seconds. If delay is zero,
call in the next event loop iteration.
"""
if delay <= 0:
_tornado_loop.add_callback(callback, *args, **kwargs)
else:
_tornado_loop.add_timeout(_tornado_loop.time() + delay, callback, *args, **kwargs)
#_tornado_loop.call_later(delay, callback, *args, **kwargs) # v4.0+
# todo: move to ..util?
def create_enum(*members):
""" Create an enum type from given string arguments.
"""
assert all([isinstance(m, str) for m in members])
enums = dict([(s, s) for s in members])
return type('Enum', (), enums)
def serve(cls):
""" Serve the given Pair class as a web app. Can be used as a decorator.
This registers the given class with the internal app manager. The
app can be loaded via 'http://hostname:port/classname'.
Arguments:
cls (Pair): a subclass of ``app.Pair`` (or ``ui.Widget``).
Returns:
cls: The given class.
"""
assert isinstance(cls, type) and issubclass(cls, Pair)
manager.register_app_class(cls)
cls._IS_APP = True # Mark the class as an app
return cls
def launch(cls, runtime='xul', **runtime_kwargs):
""" Launch the given Pair class as a desktop app in the given runtime.
Arguments:
cls (type, str): a subclass of ``app.Pair`` (or ``ui.Widget`). If this
is a string, it simply calls ``webruntime.launch()``.
runtime (str): the runtime to launch the application in. Default 'xul'.
runtime_kwargs: kwargs to pass to the ``webruntime.launch`` function.
Returns:
app (Pair): an instance of the given class.
"""
if isinstance(cls, str):
return webruntime.launch(cls, runtime, **runtime_kwargs)
assert isinstance(cls, type) and issubclass(cls, Pair)
serve(cls)
proxy = Proxy(cls.__name__, runtime, **runtime_kwargs)
app = cls(proxy=proxy)
proxy._set_pair_instance(app)
return app
def export(cls, filename=None):
""" Export the given Pair class to an HTML document.
Arguments:
cls (Pair): a subclass of ``app.Pair`` (or ``ui.Widget``).
filename (str, optional): Path to write the HTML document to.
If not given or None, will return the html as a string.
Returns:
html (str): The resulting html. If a filename was specified
this returns None.
"""
assert isinstance(cls, type) and issubclass(cls, Pair)
serve(cls)
proxy = Proxy(cls.__name__, '<export>')
app = cls(proxy=proxy)
proxy._set_pair_instance(app)
if filename is None:
return proxy._ws.to_html()
else:
proxy._ws.write_html(filename)
# todo: this does not work well with creating apps from scratch yet; see run_python_in_node.py example
class Proxy(object):
""" A proxy between Python and the client runtime
This class is basically a wrapper for the app widget, the web runtime,
and the websocket instance that connects to it.
"""
STATUS = create_enum('PENDING', 'CONNECTED', 'CLOSED')
def __init__(self, app_name, runtime=None, **runtime_kwargs):
# Note: to avoid circular references, do not store the app instance!
self._app_name = app_name
self._runtime_kwargs = runtime_kwargs
# Init runtime object (the runtime argument is a string)
self._runtime = None
# Init websocket, will be set when a connection is made
self._ws = None
# Unless app_name is __default__, the proxy will have a Pair instance
self._pair = None
# Object to manage the client code (JS/CSS/HTML)
self._known_pair_classes = set()
for cls in clientCode.get_defined_pair_classes():
self._known_pair_classes.add(cls)
# While the client is not connected, we keep a queue of
# commands, which are send to the client as soon as it connects
self._pending_commands = []
if runtime:
self._launch_runtime(runtime, **runtime_kwargs)
@property
def id(self):
""" The unique identifier of this app as a string. Used to
connect a runtime to a specific client.
"""
return '%x' % id(self)
@property
def app_name(self):
""" The name of the application that this proxy represents.
"""
return self._app_name
@property
def app(self):
""" The Pair instance that represents the app. Can be None if this
is the ``__default__`` app.
"""
return self._pair
def __repr__(self):
s = self.status.lower()
return '<Proxy for %r (%s) at 0x%x>' % (self.app_name, s, id(self))
def _launch_runtime(self, runtime, **runtime_kwargs):
# Register the instance at the manager
manager.add_pending_proxy_instance(self)
if runtime == '<export>':
self._ws = Exporter(self)
elif runtime == 'notebook':
pass
elif runtime:
init_server()
host, port = _tornado_app.serving_at
# We associate the runtime with this specific app instance by
# including the app id to the url. In this way, it is pretty
# much guaranteed that the runtime will connect to *this* app.
name = self.app_name
if name != '__default__':
name += '-' + self.id
if runtime == 'nodejs':
self._runtime = launch('http://%s:%i/%s/' % (host, port, name),
runtime=runtime, code=clientCode.get_js())
else:
self._runtime = launch('http://%s:%i/%s/' % (host, port, name),
runtime=runtime, **runtime_kwargs)
logging.debug('Instantiate app client %s' % self.app_name)
def _connect_client(self, ws):
assert self._ws is None
# Set websocket object - this is what changes the status to CONNECTED
self._ws = ws
# todo: re-enable this
# Set some app specifics
# self._ws.command('ICON %s.ico' % self.id)
# self._ws.command('TITLE %s' % self._config.title)
# Send pending commands
for command in self._pending_commands:
self._ws.command(command)
def _set_pair_instance(self, pair):
assert self._pair is None
self._pair = pair
# todo: connect to title change and icon change events
def close(self):
""" Close the runtime, if possible
"""
# todo: close via JS
if self._runtime:
self._runtime.close()
if self._pair:
self._pair.disconnect_signals()
self._pair = None # break circular reference
@property
def status(self):
""" The status of this proxy. Can be PENDING, CONNECTED or
CLOSED. See Proxy.STATUS enum.
"""
# todo: is this how we want to do enums throughout?
if self._ws is None:
return self.STATUS.PENDING # not connected yet
elif self._ws.close_code is None:
return self.STATUS.CONNECTED # alive and kicking
else:
return self.STATUS.CLOSED # connection closed
## Widget-facing code
def register_pair_class(self, cls):
""" Register the given class. If already registered, this function
does nothing.
"""
if not (isinstance(cls, type) and issubclass(cls, Pair)):
raise ValueError('Not a Pair class')
if cls in self._known_pair_classes:
return
# Make sure the base classes are defined first
for cls2 in cls.mro()[1:]:
if not issubclass(cls2, Pair): # True if cls2 is *the* Pair class
break
if cls2 not in self._known_pair_classes:
self.register_pair_class(cls2)
# Register
self._known_pair_classes.add(cls)
# Define class
logging.debug('Dynamically defining class %r' % cls)
js = cls.JS.CODE
css = cls.CSS
self._send_command('DEFINE-JS ' + js)
if css.strip():
self._send_command('DEFINE-CSS ' + css)
def _send_command(self, command):
""" Send the command, add to pending queue.
"""
if self.status == self.STATUS.CONNECTED:
if is_notebook:
# In the notebook, we send commands via a JS display, so that
# they are also executed when the notebook is exported
from IPython.display import display, Javascript
display(Javascript('flexx.command(%r);' % command))
else:
self._ws.command(command)
elif self.status == self.STATUS.PENDING:
self._pending_commands.append(command)
else:
#raise RuntimeError('Cannot send commands; app is closed')
logging.warn('Cannot send commands; app is closed')
def _receive_command(self, command):
""" Received a command from JS.
"""
if command.startswith('RET '):
print(command[4:]) # Return value
elif command.startswith('ERROR '):
logging.error('JS - ' + command[6:].strip())
elif command.startswith('WARN '):
logging.warn('JS - ' + command[5:].strip())
elif command.startswith('PRINT '):
print(command[5:].strip())
elif command.startswith('INFO '):
logging.info('JS - ' + command[5:].strip())
elif command.startswith('SIGNAL '):
# todo: seems weird to deal with here. implement this by registring some handler?
_, id, esid, signal_name, txt = command.split(' ', 4)
ob = Pair._instances.get(id, None)
if ob is not None:
ob._set_signal_from_js(signal_name, txt, esid)
else:
logging.warn('Unknown command received from JS:\n%s' % command)
def _exec(self, code):
""" Like eval, but without returning the result value.
"""
self._send_command('EXEC ' + code)
def eval(self, code):
""" Evaluate the given JavaScript code in the client
Intended for use during development and debugging. Deployable
code should avoid making use of this function.
"""
if self._ws is None:
raise RuntimeError('App not connected')
self._send_command('EVAL ' + code)
|
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import agents_db
from neutron.db import l3_agentschedulers_db
from neutron.extensions import l3 as ext_l3
from neutron.extensions import metering as ext_metering
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants
from neutron.tests.unit.db.metering import test_db_metering
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_l3_plugin
_uuid = uuidutils.generate_uuid
DB_METERING_PLUGIN_KLASS = (
"neutron.services.metering."
"metering_plugin.MeteringPlugin"
)
class MeteringTestExtensionManager(object):
def get_resources(self):
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_metering.RESOURCE_ATTRIBUTE_MAP)
attr.RESOURCE_ATTRIBUTE_MAP.update(ext_l3.RESOURCE_ATTRIBUTE_MAP)
l3_res = ext_l3.L3.get_resources()
metering_res = ext_metering.Metering.get_resources()
return l3_res + metering_res
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestMeteringPlugin(test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self):
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
plugin = 'neutron.tests.unit.test_l3_plugin.TestL3NatIntPlugin'
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPlugin, self).setUp(plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'neutron.openstack.common.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
fanout = ('neutron.openstack.common.rpc.proxy.RpcProxy.'
'fanout_cast')
self.fanout_patch = mock.patch(fanout)
self.mock_fanout = self.fanout_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.topic = 'metering_agent'
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
tenant_id_2 = '8a268a58-1610-4890-87e0-07abb8231206'
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=tenant_id_2,
set_context=True):
self.mock_uuid.return_value = self.uuid
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected,
topic=self.topic)
def test_remove_metering_label_rpc_call(self):
expected = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected,
topic=self.topic)
expected['method'] = 'remove_metering_label'
self.mock_fanout.assert_called_with(self.ctx, expected,
topic=self.topic)
def test_remove_one_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': self.uuid},
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'add_metering_label'}
expected_remove = {'args':
{'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'remove_metering_label'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
self.mock_fanout.assert_called_with(self.ctx, expected_add,
topic=self.topic)
self.mock_fanout.assert_called_with(self.ctx, expected_remove,
topic=self.topic)
def test_update_metering_label_rules_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected_add = {'args':
{'routers': [
{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': self.uuid},
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'egress',
'metering_label_id': self.uuid,
'excluded': False,
'id': second_uuid}],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'update_metering_label_rules'}
expected_del = {'args':
{'routers': [
{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [
{'remote_ip_prefix': '10.0.0.0/24',
'direction': 'ingress',
'metering_label_id': self.uuid,
'excluded': False,
'id': self.uuid}],
'id': self.uuid}],
'id': self.uuid}]},
'namespace': None,
'method': 'update_metering_label_rules'}
with self.router(tenant_id=self.tenant_id, set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True) as label:
l = label['metering_label']
with self.metering_label_rule(l['id']):
self.mock_uuid.return_value = second_uuid
with self.metering_label_rule(l['id'], direction='egress'):
self.mock_fanout.assert_called_with(self.ctx,
expected_add,
topic=self.topic)
self.mock_fanout.assert_called_with(self.ctx,
expected_del,
topic=self.topic)
def test_delete_metering_label_does_not_clear_router_tenant_id(self):
tenant_id = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
with self.metering_label(tenant_id=tenant_id,
no_delete=True) as metering_label:
with self.router(tenant_id=tenant_id, set_context=True) as r:
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
metering_label_id = metering_label['metering_label']['id']
self._delete('metering-labels', metering_label_id, 204)
router = self._show('routers', r['router']['id'])
self.assertEqual(tenant_id, router['router']['tenant_id'])
class TestRouteIntPlugin(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
test_l3_plugin.TestL3NatIntPlugin):
supported_extension_aliases = ["router", "l3_agent_scheduler"]
class TestMeteringPluginL3AgentScheduler(
test_db_plugin.NeutronDbPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
test_db_metering.MeteringPluginDbTestCaseMixin):
resource_prefix_map = dict(
(k.replace('_', '-'), constants.COMMON_PREFIXES[constants.METERING])
for k in ext_metering.RESOURCE_ATTRIBUTE_MAP.keys()
)
def setUp(self):
service_plugins = {'metering_plugin_name': DB_METERING_PLUGIN_KLASS}
plugin_str = ('neutron.tests.unit.services.metering.'
'test_metering_plugin.TestRouteIntPlugin')
ext_mgr = MeteringTestExtensionManager()
super(TestMeteringPluginL3AgentScheduler,
self).setUp(plugin=plugin_str, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.uuid = '654f6b9d-0f36-4ae5-bd1b-01616794ca60'
uuid = 'neutron.openstack.common.uuidutils.generate_uuid'
self.uuid_patch = mock.patch(uuid, return_value=self.uuid)
self.mock_uuid = self.uuid_patch.start()
cast = 'neutron.openstack.common.rpc.proxy.RpcProxy.cast'
self.cast_patch = mock.patch(cast)
self.mock_cast = self.cast_patch.start()
self.tenant_id = 'a7e61382-47b8-4d40-bae3-f95981b5637b'
self.ctx = context.Context('', self.tenant_id, is_admin=True)
self.context_patch = mock.patch('neutron.context.Context',
return_value=self.ctx)
self.mock_context = self.context_patch.start()
self.l3routers_patch = mock.patch(plugin_str +
'.get_l3_agents_hosting_routers')
self.l3routers_mock = self.l3routers_patch.start()
self.topic = 'metering_agent'
def test_add_metering_label_rpc_call(self):
second_uuid = 'e27fe2df-376e-4ac7-ae13-92f050a21f84'
expected = {'args': {'routers': [{'status': 'ACTIVE',
'name': 'router1',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': self.uuid},
{'status': 'ACTIVE',
'name': 'router2',
'gw_port_id': None,
'admin_state_up': True,
'tenant_id': self.tenant_id,
'_metering_labels': [
{'rules': [],
'id': second_uuid}],
'id': second_uuid}]},
'namespace': None,
'method': 'add_metering_label'}
agent_host = 'l3_agent_host'
agent = agents_db.Agent(host=agent_host)
self.l3routers_mock.return_value = [agent]
with self.router(name='router1', tenant_id=self.tenant_id,
set_context=True):
self.mock_uuid.return_value = second_uuid
with self.router(name='router2', tenant_id=self.tenant_id,
set_context=True):
with self.metering_label(tenant_id=self.tenant_id,
set_context=True):
topic = "%s.%s" % (self.topic, agent_host)
self.mock_cast.assert_called_with(self.ctx,
expected,
topic=topic)
|
|
import re
from functools import update_wrapper
import random
from django.conf.urls import url, include
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
from django import http
from django.utils.decorators import classonlymethod
from . import views
from . import options
from . import helpers
from . import actions
from .item import VersionsList
# Constant that defines a attribute points to it's parent
PARENT = 'parent'
ACTION_ALIAS = '_action'
def create_new_viewclass(base, **kwargs):
#Create a new view class based on a view instance
data = {}
kwargs.update(getattr(base, 'changed_kwargs', {}))
for k, v in kwargs.items():
if hasattr(base, k):
data[k] = v
if isinstance(base, views.CMSView):
name = "%s%s%s" % (base.__class__.__name__,
hex(id(base)),random.random())
parent = base.__class__
else:
name = base.__name__ + "Sub"
parent = base
return type(name, (parent,), data)
class PromiseBundle(object):
def __init__(self, cls, name=None, title=None, title_plural=None):
assert name
self.name = name
self.title = title
self.title_plural = title_plural
self.cls = cls
self.initialized = None
def __call__(self, child_name, parent, site):
return self.cls(name=self.name,
title=self.title,
title_plural=self.title_plural,
parent=parent,
attr_on_parent=child_name,
site=site)
@staticmethod
def hidden_name(name):
return "_%s_promise" % name
class URLAlias(object):
"""
Alias urls to some other view or bundle. Aliases
created in this way will not be added to the actual
urls in the cms site. But when a url is requested
for an attribute on a bundle that points to a URLAlias
instance, whether that happens through a template tag
or one of bundles view getter methods, the url or view
returned will be the one for the aliased name/bundle.
:param bundle_attr: The name of the bundle that this alias \
points to. None means the current bundle, using the `PARENT` \
constant means the view name will be looked up on the \
parent bundle. Defaults to None.
:param alias_to: The name of the view that you want this \
to point to instead. Defaults to None.
"""
def __init__(self, bundle_attr=None, alias_to=None):
self.bundle_attr = bundle_attr
self.alias_to = alias_to
def get_bundle(self, current_bundle, url_kwargs, context_kwargs):
"""
Returns the bundle to get the alias view from.
If 'self.bundle_attr' is set, that bundle that it points to
will be returned, otherwise the current_bundle will be
returned.
"""
if self.bundle_attr:
if self.bundle_attr == PARENT:
return current_bundle.parent
view, name = current_bundle.get_view_and_name(self.bundle_attr)
return view
return current_bundle
def get_view_name(self, requested):
"""
Returns the name of the view to lookup.
If `requested` is equal to 'self.bundle_attr' then
'main' will be returned. Otherwise if `self.alias_to`
is set the it's value will be returned. Otherwise
the `requested` itself will be returned.
"""
value = self.alias_to and self.alias_to or requested
if value == self.bundle_attr:
return 'main'
return value
class ViewAlias(URLAlias):
"""
Works the same as URLAlias accept it allows
you to reuse a view registered somewhere
else as at different url on this bundle.
"""
pass
class BundleMeta(type):
"""
Metaclass for bundle that gathers the known views,
subbundles and meta options from all the parent classes.
"""
def __new__(cls, name, bases, attrs):
meta = options.Meta()
_children = set()
_views = set()
# Copy views from bases along with meta
for base in bases[::-1]:
val = getattr(base, '_views', None)
if val and type(val) == tuple:
_views = _views.union(set(base._views))
val = getattr(base, '_children', None)
if val and type(val) == tuple:
_children = _children.union(set(base._children))
if hasattr(base, '_meta'):
meta.add_meta(base._meta)
m = attrs.pop('Meta', None)
meta.add_meta(m)
for k, v in attrs.items():
if isinstance(v, PromiseBundle):
_children.add(k)
_views.add(k)
attrs[v.hidden_name(k)] = v
elif isinstance(v, views.CMSView):
_views.add(k)
elif isinstance(v, ViewAlias):
_views.add(k)
for v in _children:
attrs.pop(v, None)
attrs['_children'] = tuple(_children)
attrs['_views'] = tuple(_views)
attrs['_meta'] = meta
cls = super(BundleMeta, cls).__new__(cls, name, bases, attrs)
return cls
class Bundle(object):
"""
Base bundle class. A bundle is a class that is meant to group together
CMSViews and other bundle classes. It contains some methods to
help the views know where to find each other, keep track of their
url parameters and provide page navigation and headers.
Views and sub bundles are specified as class attributes when
creating a new Bundle class.
Each bundle class has a options class stored at _meta. When one bundle
inherits from another the meta class attributes are copied from all
base classes, with the normal resolution rules applying. The exception
is attributes containing a dictionary. In that case a copy of the
dictionary from the further ancestor will be made and then updated
with the dictionary from the closer. The resulting new dictionary
is stored as the value for that attribute.
Any time you set the value of a class attribute to the constant
`PARENT` (also available on bundle instances as `self.parent_attr`)
you are saying that attribute should be looked up on the parent object.
This works for view attributes and some non view attributes like
`navigation` and `object_header`.
:param navigation: A list of tuples that represent the side navigation \
items for this bundle. The format is (attribute_name, title, url_kwargs). \
Title and url_kwargs are optional. If no title is given the title of the bundle
that the view is on will be used. Default is an empty tuple.
:param dashboard: A list of the tuples that represent the main navigation.\
format is the same as `navigation`. Default is an empty tuple.
:param required_groups: A list of groups names that a visitor must \
be a member of to access views in this bundle. Default is an empty tuple.
:param live_groups: A list of groups names that a visitor must \
be a member of to access the 'live_views` in this bundle. Default is None \
which means same as `required_groups`.
:param object_view: The name of the view that should be rendered as \
the object header. Defaults to 'delete'.
:param main_list: A URLAlias for 'main' used by main views as their \
default redirect target.
By default the following views are created:
* **main** - ListView
* **add*** - FormView
* **edit** - FormView
* **delete** - DeleteActionView
"""
__metaclass__ = BundleMeta
parent_attr = PARENT
action_alias = ACTION_ALIAS
navigation = ()
dashboard = ()
required_groups = ()
live_groups = None
_children = ()
_views = ()
main = views.ListView()
add = views.FormView(force_add=True)
edit = views.FormView()
delete = actions.DeleteActionView()
main_list = URLAlias(alias_to="main")
object_view = "delete"
def __init__(self, title=None, title_plural=None, name=None,
parent=None, attr_on_parent=None, site=None):
assert name
self.name = name
self.title = title
self.title_plural=title_plural
self.admin_site = site
self._url_params = ()
self.attr_on_parent = attr_on_parent
self.parent = parent
if self.parent:
self.name = "%s_%s" % (self.parent.name, self.name)
reg = r'^%s' % parent.get_regex_for_name(self.name, attr_on_parent)
url_params = re.compile(reg).groupindex.keys()
l = list(parent.url_params)
l.extend(url_params)
self._url_params = tuple(l)
if self.required_groups == self.parent_attr:
self.required_groups = self.parent.required_groups
self.item_regex = self._meta.item_regex_base % {'name': self.name}
# Only process defaults if we have a model
if self._meta.model:
if site and self._meta.primary_model_bundle:
site.register_model(self._meta.model, self)
added_views = []
action_views = set(self._meta.action_views)
for view in self._views:
v = getattr(self, view, None)
if v and isinstance(v, views.CMSView):
view_kwargs = self._meta.get_kwargs_for_view(view)
if self.live_groups and view in self._meta.live_views:
view_kwargs['required_groups'] = list(self.live_groups)
setattr(self, view, create_new_viewclass(v,
**view_kwargs))
# Create aliases for action views
if view in action_views:
view_name = '{0}{1}'.format(view, ACTION_ALIAS)
if not hasattr(self, view_name):
setattr(self, view_name, ViewAlias(alias_to=view))
added_views.append(view_name)
if added_views:
self._views = tuple(list(self._views)+added_views)
def set_admin_site(self, site):
self.admin_site = site
if site and self._meta.primary_model_bundle:
site.register_model(self._meta.model, self)
def _get_url_params(self):
return self._url_params
url_params = property(_get_url_params)
def get_object_header_view(self, request, url_kwargs, parent_only=False,
render_type='object_header'):
"""
An object header is the title block of a CMS page. Actions
to linked to in the header are based on this views
bundle.
This returns a view instance and view name of the view that
should be rendered as an object header the view used is specified
in `self.object_view`. If not match is found None, None is returned
:param request: The request object
:param url_kwargs: Any url keyword arguments as a dictionary
:param parent_only: If `True` then the view will only \
be rendered if object_view points to parent. This is usually \
what you want to avoid extra lookups to get the object \
you already have.
:param render_type: The render type to use for the header. \
Defaults to 'object_header'.
"""
if parent_only and self.object_view != self.parent_attr:
return None, None
if self.object_view == self.parent_attr and self.parent:
return self.parent.get_object_header_view(request, url_kwargs,
render_type=render_type)
elif self.object_view:
view, name = self.get_initialized_view_and_name(self.object_view,
can_submit=False,
base_template='cms/partial.html',
request=request, kwargs=url_kwargs,
render_type=render_type)
if view and view.can_view(request.user):
return view, name
return None, None
def get_string_from_view(self, request, view_name, url_kwargs,
render_type='string'):
"""
Returns a string that is a rendering of the view given a
request, view_name, and the original url_kwargs. Makes the
following changes the view before rendering:
* Sets can_submit to False.
* Adds action_url to the context. This is the url where \
this view actually lives.
* Sets the default base_template to be 'cms/partial.html'
This will always call GET and never POST as any actions
that modify data should take place on the original
url and not like this.
:param request: The request object.
:param view_name: The name of the view that you want.
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param render_type: The render type to use. Defaults to \
'string'.
"""
response = ""
try:
view, name = self.get_initialized_view_and_name(view_name,
render_type=render_type,
can_submit=False,
base_template='cms/partial.html',
request=request, kwargs=url_kwargs)
if isinstance(view, URLAlias):
view_name = view.get_view_name(view_name)
bundle = view.get_bundle(self, url_kwargs, {})
if bundle and isinstance(bundle, Bundle):
return bundle.get_string_from_view(request, view_name,
url_kwargs,
render_type=render_type)
elif view:
if view and name and view.can_view(request.user):
response = self._render_view_as_string(view, name, request,
url_kwargs)
except http.Http404:
pass
return response
def _render_view_as_string(self, view, name, request, url_kwargs):
url_kwargs = view.get_url_kwargs()
url = reverse("admin:%s" % name, kwargs=url_kwargs)
view.add_to_render_data(action_url=url)
return mark_safe(view.as_string(request, **url_kwargs))
def get_view_url(self, view_name, user,
url_kwargs=None, context_kwargs=None,
follow_parent=True, check_permissions=True):
"""
Returns the url for a given view_name. If the view isn't
found or the user does not have permission None is returned.
A NoReverseMatch error may be raised if the view was unable
to find the correct keyword arguments for the reverse function
from the given url_kwargs and context_kwargs.
:param view_name: The name of the view that you want.
:param user: The user who is requesting the view
:param url_kwargs: The url keyword arguments that came \
with the request object. The view itself is responsible \
to remove arguments that would not be part of a normal match \
for that view. This is done by calling the `get_url_kwargs` \
method on the view.
:param context_kwargs: Extra arguments that will be passed \
to the view for consideration in the final keyword arguments \
for reverse.
:param follow_parent: If we encounter a parent reference should \
we follow it. Defaults to True.
:param check_permisions: Run permissions checks. Defaults to True.
"""
view, url_name = self.get_initialized_view_and_name(view_name,
follow_parent=follow_parent)
if isinstance(view, URLAlias):
view_name = view.get_view_name(view_name)
bundle = view.get_bundle(self, url_kwargs, context_kwargs)
if bundle and isinstance(bundle, Bundle):
return bundle.get_view_url(view_name, user,
url_kwargs=url_kwargs,
context_kwargs=context_kwargs,
follow_parent=follow_parent,
check_permissions=check_permissions)
elif view:
# Get kwargs from view
if not url_kwargs:
url_kwargs = {}
url_kwargs = view.get_url_kwargs(context_kwargs, **url_kwargs)
view.kwargs = url_kwargs
if check_permissions and not view.can_view(user):
return None
url = reverse("admin:%s" % url_name, kwargs=url_kwargs)
return url
def _view_uses_name_as_url_kwarg(self, view_name):
# Returns True if the given view_name uses
# self.name in url kwargs
view_name = view_name.replace(ACTION_ALIAS, '')
return (view_name in self._meta.item_views) or \
(view_name in self._meta.action_views)
def _get_slug_url_kwarg_for_name(self, view_name):
arg = None
if self._view_uses_name_as_url_kwarg(view_name):
arg = '%s_pk' % self.name
elif self.parent:
# Get the attribute from the parent so this can be chained
arg = self.parent._get_slug_url_kwarg_for_name(self.attr_on_parent)
return arg
def _get_view_kwargs(self, view, view_name):
kwargs = {}
if hasattr(view, 'bundle'):
kwargs['bundle'] = self
if hasattr(view, 'slug_url_kwarg'):
kwargs['slug_url_kwarg'] = self._get_slug_url_kwarg_for_name(view_name)
return kwargs
def get_initialized_view_and_name(self, view_name,
follow_parent=True, **extra_kwargs):
"""
Creates and returns a new instance of a CMSView \
and it's url_name.
:param view_name: The name of the view to return.
:param follow_parent: If we encounter a parent reference should \
we follow it. Defaults to True.
:param extra_kwargs: Keyword arguments to pass to the view.
"""
view, name = self.get_view_and_name(view_name)
# Initialize the view with the right kwargs
if hasattr(view, 'as_view'):
e = dict(extra_kwargs)
e.update(**self._get_view_kwargs(view, view_name))
e['name'] = view_name
view = view(**e)
# It is a Bundle return the main
elif isinstance(view, Bundle):
view, name = view.get_initialized_view_and_name('main',
**extra_kwargs)
elif view == self.parent_attr and self.parent:
if follow_parent:
return self.parent.get_initialized_view_and_name(view_name,
**extra_kwargs)
else:
view = None
name = None
return view, name
def get_single_title(self):
return self.get_title(plural=False)
def get_title(self, plural=True):
"""
Get's the title of the bundle. Titles can be singular
or plural.
"""
value = self.title
if value == self.parent_attr:
return self.parent.get_title(plural=plural)
if not value and self._meta.model:
value = helpers.model_name(self._meta.model,
self._meta.custom_model_name,
self._meta.custom_model_name_plural,
plural)
elif self.title and plural:
value = helpers.pluralize(self.title, self.title_plural)
return helpers.capfirst_if_needed(value)
def _get_bundle_from_promise(self, attname):
assert self.admin_site, "You must specify an admin_site before initializing sub bundles"
attr = "_%s_bundle" % attname
view = getattr(self, attr, None)
if not view:
promise = getattr(self, PromiseBundle.hidden_name(attname),
None)
if promise:
view = promise(attname, self, self.admin_site)
setattr(self, attr, view)
return view
def get_view_and_name(self, attname):
"""
Gets a view or bundle and returns it
and it's url_name.
"""
view = getattr(self, attname, None)
if attname in self._children:
view = self._get_bundle_from_promise(attname)
if view:
if attname in self._children:
return view, view.name
elif isinstance(view, ViewAlias):
view_name = view.get_view_name(attname)
bundle = view.get_bundle(self, {}, {})
if bundle and isinstance(bundle, Bundle):
view, name = bundle.get_view_and_name(view_name)
if hasattr(view, 'as_view'):
if attname != 'main':
name = "%s_%s" % (self.name, attname)
else:
name = self.name
return view, name
elif view == self.parent_attr and self.parent:
return self.parent_attr, None
elif isinstance(view, URLAlias):
return view, None
return None, None
def get_regex_for_name(self, name, attname):
# Get the regex for this view
regex = ''
if name != self.name and attname != 'main':
regex = "%s/" % attname
if hasattr(self._meta, "%s_regex_base" % attname):
regex = getattr(self._meta, "%s_regex_base" % attname)
regex = regex % {'group_name': self.name,
'attname': attname}
elif attname in self._meta.item_views or \
attname in self._meta.action_views:
regex = "%s%s" % (self.item_regex, regex)
return regex
def get_url(self, name, view_obj, attname):
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
regex = self.get_regex_for_name(name, attname)
if isinstance(view_obj, Bundle):
reg = r'^%s' % regex
u = url(reg, include(view_obj.get_urls()))
else:
view_kwargs = self._get_view_kwargs(view_obj, attname)
u = url(r'^%s$' % regex, wrap(view_obj.as_view(**view_kwargs)),
name=name)
return u
def get_urls(self):
"""
Returns urls handling bundles and views.
This processes the 'item view' first in order
and then adds any non item views at the end.
"""
parts = []
seen = set()
# Process item views in order
for v in list(self._meta.item_views)+list(self._meta.action_views):
if not v in seen:
view, name = self.get_view_and_name(v)
if view and name:
parts.append(self.get_url(name, view, v))
seen.add(v)
# Process everything else that we have not seen
for v in set(self._views).difference(seen):
# Get the url name
view, name = self.get_view_and_name(v)
if view and name:
parts.append(self.get_url(name, view, v))
return parts
def _optional_tuples(self, tup):
for item in tup:
if len(item) == 1:
yield (item[0], None, None)
elif len(item) == 2:
yield (item[0], item[1], None)
else:
yield item
def _nav_from_tuple(self, request, tup, **kwargs):
navigation = []
for view_name, title, url_kwargs in self._optional_tuples(tup):
url = self.get_view_url(view_name, request.user,
url_kwargs=url_kwargs,
context_kwargs=kwargs)
if url:
if not title and view_name in self._children:
b = self._get_bundle_from_promise(view_name)
title = b.get_title()
elif not title:
title = self.get_title()
navigation.append((url, title))
return navigation
def get_dashboard_urls(self, request):
"""
Generates a list of tuples based on the values
in `self.dashboard` that are the main navigation links
for this bundle. The tuple format is (url, title).
"""
return self._nav_from_tuple(request, self.dashboard)
def get_dashboard_block(self, request):
"""
Returns a block of html for display on the dashboard.
"""
return None
def get_navigation(self, request, **kwargs):
"""
Generates a list of tuples based on the values
in `self.navigation` that are the side navigation links
for this bundle. The tuple format is (url, title).
"""
if self.navigation == self.parent_attr:
if self.parent:
return self.parent.get_navigation(request, **kwargs)
return ()
else:
return self._nav_from_tuple(request, self.navigation,
**kwargs)
@classonlymethod
def as_subbundle(cls, name=None, title=None, title_plural=None):
"""
Wraps the given bundle so that it can be lazily
instantiated.
:param name: The slug for this bundle.
:param title: The verbose name for this bundle.
"""
return PromiseBundle(cls, name=name, title=title,
title_plural=title_plural)
class BlankBundle(Bundle):
"""
Base bundle that has no preset views. Should be used as a base
for bundle's that are not meant for typical CRUD operations.
"""
main = None
add = None
edit = None
delete = None
publish = None
versions = None
unpublish = None
main_list = None
class VersionMixin(object):
_views = ('publish', 'unpublish', 'versions')
publish = actions.PublishActionView()
unpublish = actions.UnPublishActionView()
versions = VersionsList()
class VersionedBundle(Bundle, VersionMixin):
"""
Base bundle for versioned models. Adds views for publishing,
un-publishing and managing versions.
"""
class Meta(options.VersionMeta):
pass
class DelegatedObjectBundle(Bundle):
"""
Base bundle that delegates the following views to use the
bundle specified by edit:
* delete
* publish
* unpublish
* versions
This is useful for bundles that contain a list but all the actions
for items in that list are specified on the sub bundle edit.
"""
delete = URLAlias(bundle_attr='edit')
publish = URLAlias(bundle_attr='edit')
unpublish = URLAlias(bundle_attr='edit')
versions = URLAlias(bundle_attr='edit')
delete_action = ViewAlias(bundle_attr='edit', alias_to='delete')
publish_action = ViewAlias(bundle_attr='edit', alias_to='publish')
unpublish_action = ViewAlias(bundle_attr='edit', alias_to='unpublish')
class Meta(options.VersionMeta):
pass
class ObjectOnlyBundle(Bundle):
"""
Base Bundle for sub bundles that do not contain a list
page. Makes the following changes
* Removes add.
* main is a FormView.
* edit points to PARENT, since that is what main is.
* main_list points to PARENT.
* The item views attribute of meta is set to be empty.
"""
add = None
main = views.FormView()
edit = PARENT
main_list = URLAlias(bundle_attr=PARENT)
delegated = True
class Meta:
item_views = ()
action_views = ()
live_views = ('delete', 'publish', 'unpublish', 'versions')
class VersionedObjectOnlyBundle(ObjectOnlyBundle, VersionMixin):
"""
Same as ObjectOnlyBundle but adds version management views.
"""
pass
class ChildBundle(Bundle):
"""
Base Bundle for sub bundles. Makes the following changes:
* required_groups is inherited from PARENT.
"""
required_groups = PARENT
class Meta:
pass
class ParentVersionedBundle(ChildBundle):
"""
Same as ChildBundle expect that is also changes:
* object_view is inherited from PARENT.
"""
object_view = PARENT
|
|
"""
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
dist = np.inf
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
# If the data already has singular covariance, calculate the precision,
# as the loop below will not be entered.
if np.isinf(det):
precision = linalg.pinvh(covariance)
previous_det = np.inf
while (det < previous_det and remaining_iterations > 0
and not np.isinf(det)):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = linalg.pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Check if best fit already found (det => 0, logdet => -inf)
if np.isinf(det):
results = location, covariance, det, support, dist
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[RV]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [RV] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [RouseeuwVan]_,
see the MinCovDet object.
References
----------
.. [RouseeuwVan] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start] +
X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = linalg.pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rousseeuw] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [ButlerDavies] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = linalg.pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [RVD]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVD] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates) described
in [RVDriessen]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
References
----------
.. [RVDriessen] `A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS`
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
|
|
# -*- coding: utf-8 -*-
# This file is part of visvalingamwyatt.
# https://github.com/fitnr/visvalingamwyatt
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT-license
# Copyright (c) 2015, fitnr <contact@fakeisthenewreal.org>
"""visvalingamwyatt module tests"""
import json
import os
import unittest
from collections import namedtuple
import numpy as np
import visvalingamwyatt as vw
from visvalingamwyatt import __main__ as cli
class TestVW(unittest.TestCase):
def setUp(self):
self.samplefile = os.path.join(os.path.dirname(__file__), 'data', 'sample.json')
with open(self.samplefile) as f:
self.fixture = json.load(f).get('features')[0]
def standard(self, **kwargs):
result = vw.simplify_feature(self.fixture, **kwargs)
self.assertIn('geometry', result)
self.assertIn('properties', result)
self.assertEqual(result['properties'], self.fixture['properties'])
self.assertEqual(self.fixture['geometry']['type'], result['geometry']['type'])
self.assertEqual(
self.fixture['geometry']['coordinates'][0],
result['geometry']['coordinates'][0],
)
self.assertGreater(
len(self.fixture['geometry']['coordinates']),
len(result['geometry']['coordinates']),
)
return result
def testSimplifyFeature(self):
self.standard()
def testSimplifyFeatureThreshold(self):
self.standard(threshold=0.1)
def testSimplifyFeatureRatio(self):
result = self.standard(ratio=0.1)
b = vw.simplify_feature(self.fixture, ratio=0.90)
assert len(b['geometry']['coordinates']) > len(
result['geometry']['coordinates']
)
for i, j in zip(range(1, 9), range(2, 10)):
r = vw.simplify_feature(self.fixture, ratio=float(i) / 10)
s = vw.simplify_feature(self.fixture, ratio=float(j) / 10)
assert len(r['geometry']['coordinates']) <= len(
s['geometry']['coordinates']
)
def testSimplifyFeatureNumber(self):
result = self.standard(number=10)
self.assertEqual(len(result['geometry']['coordinates']), 10)
def test3dCoords(self):
coordinates = [
[0.0, 0.0, 0.0],
[1.1, 0, 1],
[2.1, 3, 0],
[4.1, 5, 10],
[1.1, 2, 0],
[5.1, 2, 0],
]
a = vw.simplify(coordinates)
self.assertEqual(a[0], [0, 0, 0])
self.assertLessEqual(len(a), len(coordinates))
def testSimplifyTupleLike(self):
Point = namedtuple("Point", ("x", "y"))
# coordinates are in the shape
#
# c
# b d
# a e
#
# so b and d are eliminated
a, b, c, d, e = Point(0, 0), Point(1, 1), Point(2, 2), Point(3, 1), Point(4, 0)
inp = [a, b, c, d, e]
expected_output = np.array([a, c, e])
actual_output = vw.simplify(inp, threshold=0.001)
self.assertTrue(np.array_equal(actual_output, expected_output))
def testSimplifyIntegerCoords(self):
# coordinates are in the shape
#
# c
# b d
# a e
#
# so b and d are eliminated
a, b, c, d, e = (0, 0), (1, 1), (2, 2), (3, 1), (4, 0)
inp = [a, b, c, d, e]
expected_output = np.array([a, c, e])
actual_output = vw.simplify(inp, threshold=0.001)
self.assertTrue(np.array_equal(actual_output, expected_output))
def testSimplifyClosedFeature(self):
'''When simplifying geometries with closed rings (Polygons and MultiPolygons),
the first and last points in each ring should remain the same'''
test_ring = [
[121.20803833007811, 24.75431413309125],
[121.1846923828125, 24.746831298412058],
[121.1517333984375, 24.74059525872194],
[121.14486694335936, 24.729369599118222],
[121.12152099609375, 24.693191139677126],
[121.13525390625, 24.66449040712424],
[121.10504150390625, 24.66449040712424],
[121.10092163085936, 24.645768980151793],
[121.0748291015625, 24.615808859044243],
[121.09405517578125, 24.577099744289427],
[121.12564086914062, 24.533381526147682],
[121.14624023437499, 24.515889973088104],
[121.19018554687499, 24.528384188171866],
[121.19430541992186, 24.57959746772822],
[121.23687744140624, 24.587090339209634],
[121.24099731445311, 24.552119771544227],
[121.2451171875, 24.525885444592642],
[121.30279541015624, 24.55087064225044],
[121.27258300781251, 24.58958786341259],
[121.26708984374999, 24.623299562653035],
[121.32614135742188, 24.62579636412304],
[121.34674072265624, 24.602074737077242],
[121.36871337890625, 24.580846310771612],
[121.40853881835936, 24.653257887871963],
[121.40853881835936, 24.724380091871726],
[121.37283325195312, 24.716895455859337],
[121.3604736328125, 24.693191139677126],
[121.343994140625, 24.69942955501979],
[121.32888793945312, 24.728122241065808],
[121.3714599609375, 24.743089712134605],
[121.37695312499999, 24.77177232822881],
[121.35635375976562, 24.792968265314457],
[121.32476806640625, 24.807927923059236],
[121.29730224609375, 24.844072974931866],
[121.24923706054688, 24.849057671305268],
[121.24786376953125, 24.816653556469955],
[121.27944946289062, 24.79047481357294],
[121.30142211914061, 24.761796517185815],
[121.27258300781251, 24.73311159823193],
[121.25335693359374, 24.708162811665265],
[121.20391845703125, 24.703172454280217],
[121.19979858398438, 24.731864277701714],
[121.20803833007811, 24.75431413309125],
]
multipolygon = {"type": "MultiPolygon", "coordinates": [[test_ring]]}
number = vw.simplify_geometry(multipolygon, number=10)
self.assertEqual(
number['coordinates'][0][0][0], number['coordinates'][0][0][-1]
)
ratio = vw.simplify_geometry(multipolygon, ratio=0.3)
self.assertEqual(ratio['coordinates'][0][0][0], ratio['coordinates'][0][0][-1])
thres = vw.simplify_geometry(multipolygon, threshold=0.01)
self.assertEqual(thres['coordinates'][0][0][0], thres['coordinates'][0][0][-1])
number = vw.simplify_geometry(multipolygon, number=10)
self.assertEqual(
number['coordinates'][0][0][0], number['coordinates'][0][0][-1]
)
ratio = vw.simplify_geometry(multipolygon, ratio=0.3)
self.assertEqual(ratio['coordinates'][0][0][0], ratio['coordinates'][0][0][-1])
thres = vw.simplify_geometry(multipolygon, threshold=0.01)
self.assertEqual(thres['coordinates'][0][0][0], thres['coordinates'][0][0][-1])
def testCli(self):
pass
def testSimplify(self):
'''Use the command-line function to simplify the sample data.'''
try:
output = 'tmp.json'
cli.simplify(self.samplefile, output, number=9)
self.assertTrue(os.path.exists(output))
with open('tmp.json', 'r') as f:
result = json.load(f)
coords = result['features'][0]['geometry']['coordinates']
self.assertEqual(len(coords), 9)
finally:
os.remove(output)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""pytree-related utilities.
This module collects various utilities related to the parse trees produced by
the lib2to3 library.
NodeName(): produces a string name for pytree nodes.
ParseCodeToTree(): convenience wrapper around lib2to3 interfaces to parse
a given string with code to a pytree.
InsertNodeBefore(): insert a node before another in a pytree.
InsertNodeAfter(): insert a node after another in a pytree.
{Get,Set}NodeAnnotation(): manage custom annotations on pytree nodes.
"""
import ast
from lib2to3 import pygram
from lib2to3 import pytree
from lib2to3.pgen2 import driver
from lib2to3.pgen2 import parse
from lib2to3.pgen2 import token
# TODO(eliben): We may want to get rid of this filtering at some point once we
# have a better understanding of what information we need from the tree. Then,
# these tokens may be filtered out from the tree before the tree gets to the
# unwrapper.
NONSEMANTIC_TOKENS = frozenset(['DEDENT', 'INDENT', 'NEWLINE', 'ENDMARKER'])
OPENING_BRACKETS = frozenset({'(', '[', '{'})
CLOSING_BRACKETS = frozenset({')', ']', '}'})
class Annotation(object):
"""Annotation names associated with pytrees."""
CHILD_INDENT = 'child_indent'
NEWLINES = 'newlines'
MUST_SPLIT = 'must_split'
SPLIT_PENALTY = 'split_penalty'
SUBTYPE = 'subtype'
def NodeName(node):
"""Produce a string name for a given node.
For a Leaf this is the token name, and for a Node this is the type.
Arguments:
node: a tree node
Returns:
Name as a string.
"""
# Nodes with values < 256 are tokens. Values >= 256 are grammar symbols.
if node.type < 256:
return token.tok_name[node.type]
else:
return pygram.python_grammar.number2symbol[node.type]
# lib2to3 thoughtfully provides pygram.python_grammar_no_print_statement for
# parsing Python 3 code that wouldn't parse otherwise (when 'print' is used in a
# context where a keyword is disallowed).
# It forgets to do the same for 'exec' though. Luckily, Python is amenable to
# monkey-patching.
_GRAMMAR_FOR_PY3 = pygram.python_grammar_no_print_statement.copy()
del _GRAMMAR_FOR_PY3.keywords['exec']
_GRAMMAR_FOR_PY2 = pygram.python_grammar.copy()
def ParseCodeToTree(code):
"""Parse the given code to a lib2to3 pytree.
Arguments:
code: a string with the code to parse.
Raises:
SyntaxError if the code is invalid syntax.
parse.ParseError if some other parsing failure.
Returns:
The root node of the parsed tree.
"""
# This function is tiny, but the incantation for invoking the parser correctly
# is sufficiently magical to be worth abstracting away.
try:
# Try to parse using a Python 3 grammar, which is more permissive (print and
# exec are not keywords).
parser_driver = driver.Driver(_GRAMMAR_FOR_PY3, convert=pytree.convert)
tree = parser_driver.parse_string(code, debug=False)
except parse.ParseError:
# Now try to parse using a Python 2 grammar; If this fails, then
# there's something else wrong with the code.
try:
parser_driver = driver.Driver(_GRAMMAR_FOR_PY2, convert=pytree.convert)
tree = parser_driver.parse_string(code, debug=False)
except parse.ParseError:
# Raise a syntax error if the code is invalid python syntax.
try:
ast.parse(code)
except SyntaxError as e:
raise e
else:
raise
return _WrapEndMarker(tree)
def _WrapEndMarker(tree):
"""Wrap a single ENDMARKER token in a "file_input" node.
Arguments:
tree: (pytree.Node) The root node of the parsed tree.
Returns:
The root node of the parsed tree. If the tree is a single ENDMARKER node,
then that node is wrapped in a "file_input" node. That will ensure we don't
skip comments attached to that node.
"""
if isinstance(tree, pytree.Leaf) and tree.type == token.ENDMARKER:
return pytree.Node(pygram.python_symbols.file_input, [tree])
return tree
def InsertNodesBefore(new_nodes, target):
"""Insert new_nodes before the given target location in the tree.
Arguments:
new_nodes: a sequence of new nodes to insert (the nodes should not be in the
tree).
target: the target node before which the new node node will be inserted.
Raises:
RuntimeError: if the tree is corrupted, or the insertion would corrupt it.
"""
for node in new_nodes:
_InsertNodeAt(node, target, after=False)
def InsertNodesAfter(new_nodes, target):
"""Insert new_nodes after the given target location in the tree.
Arguments:
new_nodes: a sequence of new nodes to insert (the nodes should not be in the
tree).
target: the target node after which the new node node will be inserted.
Raises:
RuntimeError: if the tree is corrupted, or the insertion would corrupt it.
"""
for node in reversed(new_nodes):
_InsertNodeAt(node, target, after=True)
def _InsertNodeAt(new_node, target, after=False):
"""Underlying implementation for node insertion.
Arguments:
new_node: a new node to insert (this node should not be in the tree).
target: the target node.
after: if True, new_node is inserted after target. Otherwise, it's inserted
before target.
Returns:
nothing
Raises:
RuntimeError: if the tree is corrupted, or the insertion would corrupt it.
"""
# Protect against attempts to insert nodes which already belong to some tree.
if new_node.parent is not None:
raise RuntimeError('inserting node which already has a parent',
(new_node, new_node.parent))
# The code here is based on pytree.Base.next_sibling
parent_of_target = target.parent
if parent_of_target is None:
raise RuntimeError('expected target node to have a parent', (target,))
for i, child in enumerate(parent_of_target.children):
if child is target:
insertion_index = i + 1 if after else i
parent_of_target.insert_child(insertion_index, new_node)
return
raise RuntimeError('unable to find insertion point for target node',
(target,))
# The following constant and functions implement a simple custom annotation
# mechanism for pytree nodes. We attach new attributes to nodes. Each attribute
# is prefixed with _NODE_ANNOTATION_PREFIX. These annotations should only be
# managed through GetNodeAnnotation and SetNodeAnnotation.
_NODE_ANNOTATION_PREFIX = '_yapf_annotation_'
def GetNodeAnnotation(node, annotation, default=None):
"""Get annotation value from a node.
Arguments:
node: the node.
annotation: annotation name - a string.
default: the default value to return if there's no annotation.
Returns:
Value of the annotation in the given node. If the node doesn't have this
particular annotation name yet, returns default.
"""
return getattr(node, _NODE_ANNOTATION_PREFIX + annotation, default)
def SetNodeAnnotation(node, annotation, value):
"""Set annotation value on a node.
Arguments:
node: the node.
annotation: annotation name - a string.
value: annotation value to set.
"""
setattr(node, _NODE_ANNOTATION_PREFIX + annotation, value)
def AppendNodeAnnotation(node, annotation, value):
"""Appends an annotation value to a list of annotations on the node.
Arguments:
node: the node.
annotation: annotation name - a string.
value: annotation value to set.
"""
attr = GetNodeAnnotation(node, annotation, set())
attr.add(value)
SetNodeAnnotation(node, annotation, attr)
def RemoveSubtypeAnnotation(node, value):
"""Removes an annotation value from the subtype annotations on the node.
Arguments:
node: the node.
value: annotation value to remove.
"""
attr = GetNodeAnnotation(node, Annotation.SUBTYPE)
if attr and value in attr:
attr.remove(value)
SetNodeAnnotation(node, Annotation.SUBTYPE, attr)
def DumpNodeToString(node):
"""Dump a string representation of the given node. For debugging.
Arguments:
node: the node.
Returns:
The string representation.
"""
if isinstance(node, pytree.Leaf):
fmt = '{name}({value}) [lineno={lineno}, column={column}, prefix={prefix}]'
return fmt.format(name=NodeName(node),
value=repr(node),
lineno=node.lineno,
column=node.column,
prefix=repr(node.prefix))
else:
fmt = '{node} [{len} children] [child_indent="{indent}"]'
return fmt.format(node=NodeName(node),
len=len(node.children),
indent=GetNodeAnnotation(node, Annotation.CHILD_INDENT))
def IsCommentStatement(node):
return (NodeName(node) == 'simple_stmt' and
NodeName(node.children[0]) == 'COMMENT')
|
|
"""Sensor platform for the GitHub integratiom."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from aiogithubapi import GitHubAPI, GitHubException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_NAME,
CONF_ACCESS_TOKEN,
CONF_NAME,
CONF_PATH,
CONF_URL,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_REPOS = "repositories"
ATTR_LATEST_COMMIT_MESSAGE = "latest_commit_message"
ATTR_LATEST_COMMIT_SHA = "latest_commit_sha"
ATTR_LATEST_RELEASE_TAG = "latest_release_tag"
ATTR_LATEST_RELEASE_URL = "latest_release_url"
ATTR_LATEST_OPEN_ISSUE_URL = "latest_open_issue_url"
ATTR_OPEN_ISSUES = "open_issues"
ATTR_LATEST_OPEN_PULL_REQUEST_URL = "latest_open_pull_request_url"
ATTR_OPEN_PULL_REQUESTS = "open_pull_requests"
ATTR_PATH = "path"
ATTR_STARGAZERS = "stargazers"
ATTR_FORKS = "forks"
ATTR_CLONES = "clones"
ATTR_CLONES_UNIQUE = "clones_unique"
ATTR_VIEWS = "views"
ATTR_VIEWS_UNIQUE = "views_unique"
DEFAULT_NAME = "GitHub"
SCAN_INTERVAL = timedelta(seconds=300)
REPO_SCHEMA = vol.Schema(
{vol.Required(CONF_PATH): cv.string, vol.Optional(CONF_NAME): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_URL): cv.url,
vol.Required(CONF_REPOS): vol.All(cv.ensure_list, [REPO_SCHEMA]),
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the GitHub sensor platform."""
sensors = []
session = async_get_clientsession(hass)
for repository in config[CONF_REPOS]:
data = GitHubData(
repository=repository,
access_token=config[CONF_ACCESS_TOKEN],
session=session,
server_url=config.get(CONF_URL),
)
sensors.append(GitHubSensor(data))
async_add_entities(sensors, True)
class GitHubSensor(SensorEntity):
"""Representation of a GitHub sensor."""
_attr_icon = "mdi:github"
def __init__(self, github_data):
"""Initialize the GitHub sensor."""
self._attr_unique_id = github_data.repository_path
self._repository_path = None
self._latest_commit_message = None
self._latest_commit_sha = None
self._latest_release_tag = None
self._latest_release_url = None
self._open_issue_count = None
self._latest_open_issue_url = None
self._pull_request_count = None
self._latest_open_pr_url = None
self._stargazers = None
self._forks = None
self._clones = None
self._clones_unique = None
self._views = None
self._views_unique = None
self._github_data = github_data
async def async_update(self):
"""Collect updated data from GitHub API."""
await self._github_data.async_update()
self._attr_available = self._github_data.available
if not self.available:
return
self._attr_name = self._github_data.name
self._attr_native_value = self._github_data.last_commit.sha[0:7]
self._latest_commit_message = self._github_data.last_commit.commit.message
self._latest_commit_sha = self._github_data.last_commit.sha
self._stargazers = self._github_data.repository_response.data.stargazers_count
self._forks = self._github_data.repository_response.data.forks_count
self._pull_request_count = len(self._github_data.pulls_response.data)
self._open_issue_count = (
self._github_data.repository_response.data.open_issues_count or 0
) - self._pull_request_count
if self._github_data.last_release:
self._latest_release_tag = self._github_data.last_release.tag_name
self._latest_release_url = self._github_data.last_release.html_url
if self._github_data.last_issue:
self._latest_open_issue_url = self._github_data.last_issue.html_url
if self._github_data.last_pull_request:
self._latest_open_pr_url = self._github_data.last_pull_request.html_url
if self._github_data.clones_response:
self._clones = self._github_data.clones_response.data.count
self._clones_unique = self._github_data.clones_response.data.uniques
if self._github_data.views_response:
self._views = self._github_data.views_response.data.count
self._views_unique = self._github_data.views_response.data.uniques
self._attr_extra_state_attributes = {
ATTR_PATH: self._github_data.repository_path,
ATTR_NAME: self.name,
ATTR_LATEST_COMMIT_MESSAGE: self._latest_commit_message,
ATTR_LATEST_COMMIT_SHA: self._latest_commit_sha,
ATTR_LATEST_RELEASE_URL: self._latest_release_url,
ATTR_LATEST_OPEN_ISSUE_URL: self._latest_open_issue_url,
ATTR_OPEN_ISSUES: self._open_issue_count,
ATTR_LATEST_OPEN_PULL_REQUEST_URL: self._latest_open_pr_url,
ATTR_OPEN_PULL_REQUESTS: self._pull_request_count,
ATTR_STARGAZERS: self._stargazers,
ATTR_FORKS: self._forks,
}
if self._latest_release_tag is not None:
self._attr_extra_state_attributes[
ATTR_LATEST_RELEASE_TAG
] = self._latest_release_tag
if self._clones is not None:
self._attr_extra_state_attributes[ATTR_CLONES] = self._clones
if self._clones_unique is not None:
self._attr_extra_state_attributes[ATTR_CLONES_UNIQUE] = self._clones_unique
if self._views is not None:
self._attr_extra_state_attributes[ATTR_VIEWS] = self._views
if self._views_unique is not None:
self._attr_extra_state_attributes[ATTR_VIEWS_UNIQUE] = self._views_unique
class GitHubData:
"""GitHub Data object."""
def __init__(self, repository, access_token, session, server_url=None):
"""Set up GitHub."""
self._repository = repository
self.repository_path = repository[CONF_PATH]
self._github = GitHubAPI(
token=access_token, session=session, **{"base_url": server_url}
)
self.available = False
self.repository_response = None
self.commit_response = None
self.issues_response = None
self.pulls_response = None
self.releases_response = None
self.views_response = None
self.clones_response = None
@property
def name(self):
"""Return the name of the sensor."""
return self._repository.get(CONF_NAME, self.repository_response.data.name)
@property
def last_commit(self):
"""Return the last issue."""
return self.commit_response.data[0] if self.commit_response.data else None
@property
def last_issue(self):
"""Return the last issue."""
return self.issues_response.data[0] if self.issues_response.data else None
@property
def last_pull_request(self):
"""Return the last pull request."""
return self.pulls_response.data[0] if self.pulls_response.data else None
@property
def last_release(self):
"""Return the last release."""
return self.releases_response.data[0] if self.releases_response.data else None
async def async_update(self):
"""Update GitHub data."""
try:
await asyncio.gather(
self._update_repository(),
self._update_commit(),
self._update_issues(),
self._update_pulls(),
self._update_releases(),
)
if self.repository_response.data.permissions.push:
await asyncio.gather(
self._update_clones(),
self._update_views(),
)
self.available = True
except GitHubException as err:
_LOGGER.error("GitHub error for %s: %s", self.repository_path, err)
self.available = False
async def _update_repository(self):
"""Update repository data."""
self.repository_response = await self._github.repos.get(self.repository_path)
async def _update_commit(self):
"""Update commit data."""
self.commit_response = await self._github.repos.list_commits(
self.repository_path, **{"params": {"per_page": 1}}
)
async def _update_issues(self):
"""Update issues data."""
self.issues_response = await self._github.repos.issues.list(
self.repository_path
)
async def _update_releases(self):
"""Update releases data."""
self.releases_response = await self._github.repos.releases.list(
self.repository_path
)
async def _update_clones(self):
"""Update clones data."""
self.clones_response = await self._github.repos.traffic.clones(
self.repository_path
)
async def _update_views(self):
"""Update views data."""
self.views_response = await self._github.repos.traffic.views(
self.repository_path
)
async def _update_pulls(self):
"""Update pulls data."""
response = await self._github.repos.pulls.list(
self.repository_path, **{"params": {"per_page": 100}}
)
if not response.is_last_page:
results = await asyncio.gather(
*(
self._github.repos.pulls.list(
self.repository_path,
**{"params": {"per_page": 100, "page": page_number}},
)
for page_number in range(
response.next_page_number, response.last_page_number + 1
)
)
)
for result in results:
response.data.extend(result.data)
self.pulls_response = response
|
|
from PySide import QtGui, QtCore
from SearcherLite import *
from ListDelegate import *
if sys.platform not in ['win32','darwin']:
from LinuxFileOpener import *
import os, sys
if getattr(sys, 'frozen', False):
# frozen
program_location = os.path.dirname(sys.executable)
else:
# unfrozen
program_location = os.path.dirname(os.path.realpath(__file__))
iconfolder = os.path.join(program_location,'Icons')
MUSIC_EXT = ['.mp3','.flac','.wav','.wma','.m4a','.aiff','.m4p']
IMAGE_EXT = ['.jpeg','.jpg','.bmp','.tiff','.png','.psd','gif','.jfif','.exif','.']
TXT_LIST = ['.txt', '.ini']
SRC_LIST = ['.c','.cpp','.py','.java','.h','.hpp', '.php', '.sql']
VIDEO_EXT = ['.avi','.mp4','.mpeg','.mov','.wma','.wmv','.wmx','.ogm','.mkv']
MODEL_LIST = ['.max','.mb','.ma']
ARCHIVE_LIST = ['.zip', '.rar','.7z', '.tar', '.gz', '.tar.gz']
fileicon = os.path.join(iconfolder, 'file.png')
foldericon = os.path.join(iconfolder, 'folder.png')
musicicon = os.path.join(iconfolder, 'music.png')
archiveicon = os.path.join(iconfolder, 'archive.png')
texticon = os.path.join(iconfolder, 'txt.png')
dmgicon = os.path.join(iconfolder, 'dmg.png')
docicon = os.path.join(iconfolder, 'doc.png')
epubicon = os.path.join(iconfolder, 'epub.png')
mobiicon = os.path.join(iconfolder, 'mobi.png')
pdficon = os.path.join(iconfolder, 'PDF.png')
videoicon = os.path.join(iconfolder, 'video.png')
xmlicon = os.path.join(iconfolder, 'xml.png')
srcicon = os.path.join(iconfolder, 'src.png')
modelicon = os.path.join(iconfolder, '3D.png')
cssicon = os.path.join(iconfolder, 'css.png')
exeicon = os.path.join(iconfolder, 'exe.png')
htmlicon = os.path.join(iconfolder, 'html.png')
objicon = os.path.join(iconfolder, 'obj.png')
class ResultWindow(QtGui.QWidget):
def __init__(self, parent = None):
super(ResultWindow,self).__init__()
self.Searcher = parent
self.menu = QtGui.QMenu(self)
if sys.platform not in ['win32','darwin']:
self.lfo = LinuxFileOpener()
self.y =0
self.initUI()
def initUI(self):
self.is_preview_open = False
layout = QtGui.QVBoxLayout(self)
self.listwidget = QtGui.QListWidget(self)
layout.addWidget(self.listwidget)
layout.setContentsMargins(0, 0, 0, 0)
self.listwidget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.listwidget.itemSelectionChanged.connect(self.previewing)
self.listwidget.doubleClicked.connect(self.OpenFile)
# shortcut = QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Backspace), self.listwidget)
# shortcut.activated.connect(self.Searcher.delete)
# shortcut2 = QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Delete), self.listwidget)
# shortcut2.activated.connect(self.Searcher.delete)
action = QtGui.QAction('focus right',self)
action.setShortcut("R")
action.triggered.connect(self.focusTextRight)
action2 = QtGui.QAction('focus left', self)
action2.setShortcut("G")
action2.triggered.connect(self.focusTextLeft)
action3 = QtGui.QAction('focus right highlight', self)
action3.setShortcut("V")
action3.triggered.connect(self.focusTextLeftHigh)
CopyPathAction = QtGui.QAction('Copy Path',self)
CopyPathAction.setShortcut("C")
CopyPathAction.triggered.connect(self.copyPath)
self.addAction(action)
self.addAction(action2)
self.addAction(action3)
self.addAction(CopyPathAction)
self.setGeometry(self.Searcher.pos().x() + 21, self.Searcher.pos().y() + 125, 780, 50)
self.setAutoFillBackground(True)
self.setPalette(QtCore.Qt.white)
self.setWindowTitle("Searcher")
self.setMinimumHeight(40)
self.setWindowFlags(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowOpacity(0.85)
self.createAction()
def focusTextLeftHigh(self):
self.Searcher.activateWindow()
self.Searcher.setFocus()
self.Searcher.line_edit.setFocus()
def focusTextRight(self):
self.Searcher.activateWindow()
self.Searcher.setFocus()
self.Searcher.deep_line.setFocus()
def focusTextLeft(self):
self.Searcher.activateWindow()
self.Searcher.setFocus()
self.Searcher.line_edit.setFocus()
self.Searcher.line_edit.deselect()
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Escape:
self.hide()
if event.key() == QtCore.Qt.Key_Control:
self.Searcher.activateWindow()
self.Searcher.setFocus()
self.Searcher.line_edit.setFocus()
self.Searcher.line_edit.deselect()
if event.key() == QtCore.Qt.Key_Return:
self.OpenFile()
def updateSize(self, checktextfromresult=False):
height = QtGui.QDesktopWidget().availableGeometry().height()
height = (height - height%100)/2
height = height + 5
self.y = 5+len(self.Searcher.found)*25
if self.y > height: self.y=height
self.resize(self.Searcher.size().width()-20, self.y)
counter = 0
total = len(self.Searcher.found)
if not checktextfromresult:
for item in self.Searcher.found:
if not self.isVisible(): break
if counter%100==0:
self.Searcher.status_label.setText("Status: Drawing "+ str(counter) + ' of ' +str(total) + " Result Tiles...Please Wait ")
QtGui.QApplication.processEvents()
counter +=1
if self.Searcher.option !=2:
self.updateWidget(item)
else:
if item not in self.Searcher.hiddenlist:
self.updateWidget(item)
self.y = 5 + self.listwidget.count() * 25
def updateSizeChange(self):
self.resize(self.Searcher.size().width()-20, self.y)
def updateLoc(self):
if sys.platform =='win32':
self.move(QtCore.QPoint(self.Searcher.pos().x() + 10, self.Searcher.pos().y() + 55))
else:
self.move(QtCore.QPoint(self.Searcher.pos().x() + 10, self.Searcher.pos().y() + 67))
height = QtGui.QDesktopWidget().availableGeometry().height()
height = (height - height%100)/2
height = height + 5
if self.y > height: self.y = height
self.resize(self.Searcher.size().width()-20, self.y)
def createAction(self):
CopyAction = QtGui.QAction('Copy file to target location', self)
CopyAction.triggered.connect(self.Searcher.CopyFiles)
MoveAction = QtGui.QAction('Move file to target location', self)
MoveAction.triggered.connect(self.Searcher.MoveFiles)
ShowDirAction = QtGui.QAction('Show folders only', self)
ShowDirAction.triggered.connect(self.ShowDirOnly)
ShowFilesAction = QtGui.QAction('Show files only', self)
ShowFilesAction.triggered.connect(self.ShowFilesOnly)
self.ShowHiddenAction = QtGui.QAction('Show Hiddens only',self)
self.ShowHiddenAction.triggered.connect(self.showHiddenOnly)
self.HideHiddenAction = QtGui.QAction('Hide Hidden', self)
self.HideHiddenAction.triggered.connect(self.HideHidden)
ShowAllAction = QtGui.QAction('Show all', self)
ShowAllAction.triggered.connect(self.ShowAll)
OpenFileAction = QtGui.QAction('Open File', self)
OpenFileAction.triggered.connect(self.OpenFile)
# grepAction = QtGui.QAction('Grep',self)
# grepAction.triggered.connect(self.grep)
self.openHighlightAction = QtGui.QAction('Open and Highlight',self)
self.openHighlightAction.triggered.connect(self.openText)
self.previewAction = QtGui.QAction("Preview",self)
self.previewAction.triggered.connect(self.preview)
deleteAction = QtGui.QAction("&Delete", self)
deleteAction.triggered.connect(self.Searcher.delete)
OpenContainFolderAction = QtGui.QAction('Open containing folder', self)
OpenContainFolderAction.triggered.connect(self.OpenContainerFolder)
CopyPathAction = QtGui.QAction('Copy Path',self)
CopyPathAction.triggered.connect(self.copyPath)
CopyFolderPathAction = QtGui.QAction('Copy Folder Path',self)
CopyFolderPathAction.triggered.connect(self.copyFolderPath)
#self.menu.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.menu.addAction(self.previewAction)
#self.menu.addAction(playAction)
self.menu.addAction(OpenFileAction)
self.menu.addAction(OpenContainFolderAction)
self.menu.addAction(self.openHighlightAction)
#self.menu.addAction(grepAction)
self.menu.addSeparator()
self.menu.addAction(ShowAllAction)
self.menu.addAction(ShowDirAction)
self.menu.addAction(ShowFilesAction)
self.menu.addAction(self.ShowHiddenAction)
self.menu.addAction(self.HideHiddenAction)
self.menu.addSeparator()
self.menu.addAction(CopyPathAction)
self.menu.addAction(CopyFolderPathAction)
self.menu.addSeparator()
self.menu.addAction(CopyAction)
self.menu.addAction(MoveAction)
#self.menu.addAction(deleteAction)
def contextMenuEvent(self,event):
for item in self.listwidget.selectedItems():
item= item.text()
ext = os.path.splitext(item)[-1].lower()
if ext not in IMAGE_EXT:
self.previewAction.setVisible(False)
else:
self.previewAction.setVisible(True)
if self.Searcher.option ==1:
self.ShowHiddenAction.setVisible(False)
self.HideHiddenAction.setVisible(False)
else:
self.ShowHiddenAction.setVisible(True)
self.HideHiddenAction.setVisible(True)
if self.Searcher.deep_line.text()=='':
self.openHighlightAction.setVisible(False)
else:
self.openHighlightAction.setVisible(True)
self.menu.exec_(event.globalPos())
def updateWidget(self,item):
self.listwidget.setItemDelegate(ListDelegate(self.listwidget))
list_item = QtGui.QListWidgetItem()
if sys.platform =='win32':
list_item.setData(QtCore.Qt.UserRole, item[item.rfind('\\')+1:])
else:
list_item.setData(QtCore.Qt.UserRole, item[item.rfind('/')+1:])
list_item.setData(QtCore.Qt.DisplayRole,item)
if os.path.isdir(item):
list_item.setData(QtCore.Qt.DecorationRole,foldericon)
elif os.path.isfile(item):
ext = os.path.splitext(item)[-1].lower()
if ext in MUSIC_EXT:
list_item.setData(QtCore.Qt.DecorationRole,musicicon)
elif ext in TXT_LIST:
list_item.setData(QtCore.Qt.DecorationRole,texticon)
elif ext in ARCHIVE_LIST:
list_item.setData(QtCore.Qt.DecorationRole,archiveicon)
elif ext in VIDEO_EXT:
list_item.setData(QtCore.Qt.DecorationRole, videoicon)
elif ext in MODEL_LIST:
list_item.setData(QtCore.Qt.DecorationRole,modelicon)
elif ext in SRC_LIST:
list_item.setData(QtCore.Qt.DecorationRole,srcicon)
elif ext == '.mobi':
list_item.setData(QtCore.Qt.DecorationRole, mobiicon)
elif ext == '.epub':
list_item.setData(QtCore.Qt.DecorationRole, epubicon)
elif ext == '.xml':
list_item.setData(QtCore.Qt.DecorationRole, xmlicon)
elif ext == '.pdf':
list_item.setData(QtCore.Qt.DecorationRole, pdficon)
elif ext == '.dmg':
list_item.setData(QtCore.Qt.DecorationRole, dmgicon)
elif ext == '.obj':
list_item.setData(QtCore.Qt.DecorationRole,objicon)
elif ext == '.css':
list_item.setData(QtCore.Qt.DecorationRole,cssicon)
elif ext == '.exe':
list_item.setData(QtCore.Qt.DecorationRole,exeicon)
elif ext == '.html':
list_item.setData(QtCore.Qt.DecorationRole,htmlicon)
else:
list_item.setData(QtCore.Qt.DecorationRole,fileicon)
self.listwidget.addItem(list_item)
def HideHidden(self):
self.Searcher.status_label.setText('Status: Drawing Result Tiles... Please Wait')
counter = 0
self.listwidget.clear()
for item in self.Searcher.found:
if item not in self.Searcher.hiddenlist:
if counter%50==0:
QtGui.QApplication.processEvents()
self.updateWidget(item)
counter+=1
self.Searcher.status_label.setText('Status: Found ' + str(len(self.Searcher.found)) + ' items' + ' Showing: ' + str(self.listwidget.count()))
def ShowDirOnly(self):
self.Searcher.status_label.setText('Status: Drawing Result Tiles... Please Wait')
counter = 0
self.listwidget.clear()
for item in self.Searcher.found:
if counter%50==0:
QtGui.QApplication.processEvents()
counter+=1
if os.path.isdir(item):
if self.Searcher.option !=2:
self.updateWidget(item)
else:
if item not in self.Searcher.hiddenlist:
self.updateWidget(item)
self.Searcher.status_label.setText('Status: Found ' + str(len(self.Searcher.found)) + ' items' + ' Showing: ' + str(self.listwidget.count()))
def ShowFilesOnly(self):
self.Searcher.status_label.setText('Status: Drawing Result Tiles... Please Wait')
counter = 0
self.listwidget.clear()
for item in self.Searcher.found:
if counter%50==0:
QtGui.QApplication.processEvents()
counter+=1
if os.path.isfile(item):
if self.Searcher.option !=2:
self.updateWidget(item)
else:
if item not in self.Searcher.hiddenlist:
self.updateWidget(item)
self.Searcher.status_label.setText('Status: Found ' + str(len(self.Searcher.found)) + ' items' + ' Showing: ' + str(self.listwidget.count()))
def ShowAll(self):
self.Searcher.status_label.setText('Status: Drawing Result Tiles... Please Wait')
counter = 0
self.listwidget.clear()
for item in self.Searcher.found:
if counter%50==0:
QtGui.QApplication.processEvents()
self.updateWidget(item)
counter +=1
self.Searcher.status_label.setText('Status: Found ' + str(len(self.Searcher.found)) + ' items' + ' Showing: ' + str(self.listwidget.count()))
def showHiddenOnly(self):
self.Searcher.status_label.setText('Status: Drawing Result Tiles... Please Wait')
counter = 0
self.listwidget.clear()
for item in self.Searcher.found:
if item in self.Searcher.hiddenlist:
if counter%50==0:
QtGui.QApplication.processEvents()
self.updateWidget(item)
counter+=1
self.Searcher.status_label.setText('Status: Found ' + str(len(self.Searcher.found)) + ' items' + ' Showing: ' + str(self.listwidget.count()))
def OpenContainerFolder(self):
for item in self.listwidget.selectedItems():
folder = ''
if sys.platform == 'win32':
path = item.text().split('\\')
else:
path = item.text().split('/')
for x in range(len(path) - 1):
folder = os.path.join(folder, path[x])
if sys.platform == 'win32':
os.startfile(folder)
else:
subprocess.call(['xdg-open', '/' + folder])
def copyPath(self):
clipboard = QtGui.QApplication.clipboard()
for item in self.listwidget.selectedItems():
clipboard.setText(item.text())
def copyFolderPath(self):
clipboard = QtGui.QApplication.clipboard()
for item in self.listwidget.selectedItems():
folder = ''
if sys.platform == 'win32':
path = item.text().split('\\')
else:
path = item.text().split('/')
for x in range(len(path) - 1):
folder = os.path.join(folder, path[x])
clipboard.setText(folder)
def previewing(self):
if self.is_preview_open:
for item in self.listwidget.selectedItems():
image = item.text()
if os.path.splitext(image)[-1].lower() in IMAGE_EXT:
self.imageview.open(image)
def preview(self):
if not self.is_preview_open:
self.is_preview_open = True
self.imageview = ImageView(self.filter())
self.imageview.destroyed.connect(self.previewClosed)
self.imageview.show()
for item in self.listwidget.selectedItems():
image = item.text()
if os.path.splitext(image)[-1].lower() in IMAGE_EXT:
self.imageview.open(image)
def filter(self):
templist = []
current = self.Searcher.found[self.listwidget.currentRow()]
for item in self.Searcher.found:
ext = os.path.splitext(item)[-1].lower()
if ext in IMAGE_EXT:
templist.append(item)
index = templist.index(current)
return (templist,index)
def previewClosed(self):
self.is_preview_open = False
def openText(self):
self.editor = TextEditor()
if self.Searcher.deep_line != '':
for item in self.listwidget.selectedItems():
f = open(item.text(), 'r')
for line in f:
self.editor.textedit.append(line.rstrip('\n'))
f.close()
self.editor.highlight(self.Searcher.deep_line.text())
self.editor.show()
def grep(self):
text, ok = QtGui.QInputDialog.getText(self, 'Grep', 'Enter Search Pattern')
if ok:
for item in self.listwidget.selectedItems():
f = open(item.text(), 'r')
string = []
for line in f:
string.append(line.rstrip('\n'))
x = 0
xlist = []
for item in string:
if text in item:
xlist.append(x)
x += 1
self.editor = TextEditor()
for x in xlist:
if x > 1:
self.editor.textedit.append(string[x - 1])
self.editor.textedit.append(string[x])
if x < len(string) - 1:
self.editor.textedit.append(string[x + 1])
self.editor.textedit.append('\n')
x += 1
self.editor.highlight(text)
self.editor.show()
def OpenFile(self):
for item in self.listwidget.selectedItems():
if sys.platform == 'win32':
os.startfile(item.text())
elif sys.platform == 'darwin':
command = "/usr/bin/open " + item.text().replace(' ','\\ ')
subprocess.Popen(command,shell=True)
else:
if self.lfo.checkList(item.text()):
self.lfo.openFile(item.text())
else:
file_type = self.lfo.xdgQuery('filetype',item.text())
app = self.inputDialog(file_type)
self.lfo.addToList(file_type,app)
self.lfo.openFile(item.text())
def inputDialog(self, app_type):
dia = 'no/invalid default app for: ' + app_type + '-> set one below (ex:gedit)\n (refer to view->app table if you are not sure)'
text, ok = QtGui.QInputDialog.getText(self, 'set default application', dia)
if ok:
if self.checkApps(text):
return text
else:
self.inputDialog(app_type)
def checkApps(self, text):
for item in os.listdir(r'/usr/share/applications'):
if text == item.split('.')[0]:
return True
return False
|
|
#!/usr/bin/env python
from __future__ import print_function
from mindmup_as_attack_trees import *
import sys,json
import re
from collections import OrderedDict
import math
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--only-severities", action='store_true', help="only generate markdown for the attacker objective severities, not the rest of the tree")
parser.add_argument('--safety-privacy-financial-operational', action='store_true', help="use this alternate ordering for the severities")
parser.add_argument('mupin', nargs='?', help="the mindmup file that will be processed -- transforming and augmenting the JSON")
args = parser.parse_args()
#import ipdb
def info(type, value, tb):
ipdb.pm()
sys.excepthook = info
levels_count = dict()
nodes_lookup = dict()
objective_node = None
def clamp_to_json_values(val):
# JSON doesn't have infinities, but we can use strings as long as we convert back.
if val == float('inf'):
return "Infinity"
elif val == float('-inf'):
return "-Infinity"
elif val == float('nan'):
return "NaN"
return val
def parse_evita_raps(node):
if not 'EVITA::' in get_raw_description(node):
raise ValueError("couldn't find EVITA:: tag in attack vector node", node)
for line in get_unclean_description(node).splitlines():
if not 'EVITA::' in line:
continue
evita_line = line.strip().split('|')
if node.get('attr', None) is None:
node.update({'attr': dict()})
attr = node.get('attr')
if len(evita_line) != 10:
print ("EVITA:: tag should have exactly 9 elements in attack vector node %s where it only has %s" % (node,len(evita_line)-1))
raise ValueError("EVITA:: tag should have exactly 9 elements in attack vector node %s where it only has %s" % (node,len(evita_line)-1))
attr.update({'evita_et': clamp_to_json_values(float(evita_line[5]))})
attr.update({'evita_e': clamp_to_json_values(float(evita_line[6]))})
attr.update({'evita_k': clamp_to_json_values(float(evita_line[7]))})
attr.update({'evita_wo': clamp_to_json_values(float(evita_line[8]))})
attr.update({'evita_eq': clamp_to_json_values(float(evita_line[9]))})
return
def get_evita_et_label(node):
et = node.get('attr').get('evita_et')
if et == 0:
return "**0**: < One Day"
elif et == 1:
return "**1**: < One Week"
elif et == 4:
return "**4**: < One Month"
elif et == 10:
return "**10**: < Three Months"
elif et == 17:
return "**17**: < Six Months"
elif et == 19:
return "**19**: > Six Months"
elif float(et) == float('inf'):
return "Not Practical"
else:
return "%d Unknown" % et
def get_evita_e_label(node):
e = node.get('attr').get('evita_e')
if e == 0:
return "**0**: Layman"
elif e == 3:
return "**3**: Proficient"
elif e == 6:
return "**6**: Expert"
elif e == 8:
return "**8**: Multiple Experts"
elif float(e) == float('inf'):
return "Not Practical"
else:
return "%d Unknown" % e
def get_evita_k_label(node):
k = node.get('attr').get('evita_k')
if k == 0:
return "**0**: Public"
elif k == 3:
return "**3**: Restricted"
elif k == 7:
return "**7**: Sensitive"
elif k == 11:
return "**11**: Critical"
elif float(k) == float('inf'):
return "Not Practical"
else:
return "%d Unknown" % k
def get_evita_wo_label(node):
wo = node.get('attr').get('evita_wo')
if wo == 0:
return "**0**: Unlimited"
elif wo == 1:
return "**1**: Easy"
elif wo == 4:
return "**4**: Moderate"
elif wo == 10:
return "**10**: Difficult"
elif float(wo) == float('inf'):
return "None"
else:
return "%d Unknown" % wo
def get_evita_eq_label(node):
eq = node.get('attr').get('evita_eq')
if eq == 0:
return "**0**: Standard"
elif eq == 4:
return "**4**: Specialized"
elif eq == 7:
return "**7**: Bespoke"
elif eq == 9:
return "**9**: Multiple Bespoke"
elif float(eq) == float('inf'):
return "Not Practical"
else:
return "%d Unknown" % eq
def append_evita_rap_table(node):
description = get_raw_description(node)
if description.endswith('|'):
print("warning. node %s. don't end node description in '|''" % get_node_title(node))
html = detect_html(description)
bookends = ("<div>", "</div>") if html else ('\n', '')
update_raw_description(node, description +
"%s%s" % bookends +
"%s| Elapsed Time | Expertise | Knowledge | Window of Opportunity | Equipment |%s" % bookends +
"%s|-----------------------------|-----------------------------|-----------------------------|-----------------------------|-----------------------------|%s" % bookends +
("%s| %%s | %%s | %%s | %%s | %%s |%s" % bookends ) % (
get_evita_et_label(node),
get_evita_e_label(node),
get_evita_k_label(node),
get_evita_wo_label(node),
get_evita_eq_label(node)
)
)
def derive_evita_apt(node):
attrs = node.get('attr')
total_rap = sum(map(lambda ev: float(attrs.get(ev)), ['evita_et', 'evita_e', 'evita_k', 'evita_wo', 'evita_eq']))
if total_rap < 0:
raise ValueError('encountered negative Total Required Attack Potential', node.get('attr'))
elif total_rap < 10:
apt = 5
elif total_rap < 14:
apt = 4
elif total_rap < 20:
apt = 3
elif total_rap < 25:
apt = 2
else:
apt = 1
#TODO support non-zero controllability
attrs.update({'evita_apt': apt})
update_node_apt_colour(node, apt)
return
def get_evita_ss_label(node):
ss = node.get('attr').get('evita_ss')
ss = int(ss)
if ss == 0:
return "**S%s**: No injuries" % ss
elif ss == 1:
return "**S%s**: Light or moderate injuries" % ss
elif ss == 2:
return "**S%s**: Severe injuries (survival probable); light/moderate injuries for multiple vehicles" % ss
elif ss == 3:
return "**S%s**: Life threatening (survival uncertain) or fatal injuries; severe injuries for multiple vehicles" % ss
elif ss == 4:
return "**S%s**: Life threatening or fatal injuries for multiple vehicles" % ss
else:
return "**%d**: unknown" % ss
def get_evita_os_label(node):
os = node.get('attr').get('evita_os')
os = int(os)
if os == 0:
return "**S%s**: No impact on operational performance" % os
elif os == 1:
return "**S%s**: Impact not discernible to driver" % os
elif os == 2:
return "**S%s**: Driver aware of performance degradation; indiscernible impacts for multiple vehicles" % os
elif os == 3:
return "**S%s**: Significant impact on performance; noticeable impact for multiple vehicles" % os
elif os == 4:
return "**S%s**: Significant impact for multiple vehicles" % os
else:
return "**%d**: unknown" % os
def get_evita_ps_label(node):
ps = node.get('attr').get('evita_ps')
ps = int(ps)
if ps == 0:
return "**S%s**: No unauthorized access to data" % ps
elif ps == 1:
return "**S%s**: Anonymous data only (no specific driver of vehicle data)" % ps
elif ps == 2:
return "**S%s**: Identification of vehicle or driver; anonymous data for multiple vehicles" % ps
elif ps == 3:
return "**S%s**: Driver or vehicle tracking; identification of driver or vehicle for multiple vehicles" % ps
elif ps == 4:
return "**S%s**: Driver or vehicle tracking for multiple vehicles" % ps
else:
return "**%d**: unknown" % ps
def get_evita_fs_label(node):
fs = node.get('attr').get('evita_fs')
fs = int(fs)
if fs == 0:
return "**S%s**: No financial loss" % fs
elif fs == 1:
return "**S%s**: Low-level loss (~ 10EU)" % fs
elif fs == 2:
return "**S%s**: Moderate loss (~ 100EU); low losses for multiple vehicles" % fs
elif fs == 3:
return "**S%s**: Heavy loss (~ 1000EU); moderate losses for multiple vehicles" % fs
elif fs == 4:
return "**S%s**: Heavy losses for multiple vehicles" % fs
else:
return "**%d**: unknown" % fs
def append_evita_severity_table(node):
description = get_raw_description(node)
html = detect_html(description)
if description.endswith('|'):
print("warning. node %s. don't end node description in '|''" % get_node_title(node))
bookends = ("<div>", "</div>") if html else ('\n', '')
update_raw_description(node, get_raw_description(node) +
"%s%s" % bookends +
"%s| Safety Severity | Privacy Severity | Financial Severity | Operational Severity |%s" % bookends +
"%s|-------------------------|-------------------------|-------------------------|-------------------------|%s" % bookends +
("%s| %%s | %%s | %%s | %%s |%s" % bookends ) % (
get_evita_ss_label(node),
get_evita_ps_label(node),
get_evita_fs_label(node),
get_evita_os_label(node)
)
)
def parse_evita_severities(node):
if not 'EVITA::' in get_raw_description(node):
raise ValueError("couldn't find EVITA:: tag in attacker objective node", node)
for line in get_unclean_description(node).splitlines():
if not 'EVITA::' in line:
continue
evita_line = line.strip().split('|')
attr = node.get('attr')
if args.safety_privacy_financial_operational:
attr.update({'evita_ss': clamp_to_json_values(float(evita_line[1]))})
attr.update({'evita_ps': clamp_to_json_values(float(evita_line[2]))})
attr.update({'evita_fs': clamp_to_json_values(float(evita_line[3]))})
attr.update({'evita_os': clamp_to_json_values(float(evita_line[4]))})
else:
attr.update({'evita_fs': clamp_to_json_values(float(evita_line[1]))})
attr.update({'evita_os': clamp_to_json_values(float(evita_line[2]))})
attr.update({'evita_ps': clamp_to_json_values(float(evita_line[3]))})
attr.update({'evita_ss': clamp_to_json_values(float(evita_line[4]))})
return
def set_node_apts(node):
def evita_rap_apt_parser_deriver(node):
if is_attack_vector(node) and (not is_node_a_reference(node)):
parse_evita_raps(node)
derive_evita_apt(node)
if not is_outofscope(node):
append_evita_rap_table(node)
return
apply_each_node_below_objectives(node, evita_rap_apt_parser_deriver)
return
def set_node_severities(node, nodes_context):
if is_objective(node) and not is_outofscope(node):
parse_evita_severities(node)
append_evita_severity_table(node)
for child in get_node_children(node):
set_node_severities(child, nodes_context)
return
if args.mupin is None:
fd_in=sys.stdin
else:
fd_in=open(args.mupin, 'r')
data = json.load(fd_in)
if args.mupin is None:
fd_out = sys.stdout
else:
fd_in.close()
fd_out=open(args.mupin,'w')
nodes_context=list()
if 'id' in data and data['id'] == 'root':
#version 2 mindmup
root_node = data['ideas']['1']
else:
root_node = data
nodes_lookup = build_nodes_lookup(root_node)
set_node_severities(root_node, nodes_context)
if not args.only_severities:
set_node_apts(root_node)
apply_each_node(root_node, remove_override_apt)
propagate_all_the_apts(root_node, nodes_lookup)
derive_node_risks(root_node)
normalize_nodes(root_node)
str = json.dumps(data, indent=2, sort_keys=False)
str = re.sub(r'\s+$', '', str, 0, re.M)
str = re.sub(r'\s+$', '', str, flags=re.M)
fd_out.write(str)
if len(sys.argv) >= 1:
fd_out.close()
|
|
#path hack.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import unittest
from cppstub import CppFile
class CppStubHeaderParsingTestSuite(unittest.TestCase):
def setUp(self):
self.cpp_file = CppFile("TestSuite")
def test_header_parse_namespace(self):
self.cpp_file.parse_header("namespace test{}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
def test_header_parse_namespace_in_namespace(self):
self.cpp_file.parse_header("namespace test{namespace test1{}}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].namespaces[0].name, "test1")
def test_header_parse_multiple_namespaces(self):
self.cpp_file.parse_header("namespace test{}namespace test1{}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[1].name, "test1")
def test_header_parse_function_in_namespace(self):
self.cpp_file.parse_header("namespace test{void test1();}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].methods[0].name, "test1")
def test_header_parse_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
def test_header_parse_class_with_inherited_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test : Test1{};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].inherited_classes[0], "Test1")
def test_header_parse_class_with_multiple_inherited_classes_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test : Test1, Test2{};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].inherited_classes[0], "Test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].inherited_classes[1], "Test2")
def test_header_parse_constructor_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test(class own_t *sink_);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "Test")
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 1)
def test_header_parse_constructor_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test(){function();};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "Test")
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 1)
#operators split and ordered via http://en.wikipedia.org/wiki/Operators_in_C_and_C%2B%2B
def test_header_parse_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test& operator=(const Test& rhs){function();}Test operator+(const Test& rhs) const {function();}Test operator-(const Test& rhs) const {function();}Test operator+() const {function();}Test operator-() const{function();}Test operator*(const Test& rhs) const {function();}Test operator/(const Test& rhs) const {function();}Test operator%(const Test& rhs) const {function();}Test& operator++(){function();}Test operator++(int){function();}Test& operator--(){function();}Test operator--(int){function();}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator+")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][1].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator-")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][2].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][3].name, "operator+")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][3].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][4].name, "operator-")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][4].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][5].name, "operator*")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][5].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][6].name, "operator/")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][6].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][7].name, "operator%")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][7].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][8].name, "operator++")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][9].name, "operator++")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][10].name, "operator--")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][11].name, "operator--")
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 12)
def test_header_parse_comparison_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:bool operator==(const Test& rhs) const{function();};bool operator!=(const Test& rhs) const {function();}bool operator>(const Test& rhs) const {function();}bool operator<(const Test& rhs) const {function();}bool operator>=(const Test& rhs) const {function();}bool operator<=(const Test& rhs) const {function();};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator==")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][0].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator!=")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][1].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator>")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][2].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][3].name, "operator<")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][3].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][4].name, "operator>=")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][4].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][5].name, "operator<=")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][5].const)
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 6)
def test_header_parse_logical_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:bool operator!() const{function();}bool operator&&(const Test& rhs) const {function();}bool operator||(const Test& rhs) const {function();}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator!")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][0].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator&&")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][1].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator||")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][2].const)
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 3)
def test_header_parse_bitwise_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test operator~() const{function();}Test operator&(const Test& rhs) const {function();}Test operator|(const Test& rhs) const {function();}Test operator^(const Test& rhs) const {function();}Test operator<<(const Test& rhs) const {function();}Test operator>>(const Test& rhs) const {function();}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator~")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][0].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator&")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][1].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator|")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][2].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][3].name, "operator^")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][3].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][4].name, "operator<<")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][4].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][5].name, "operator>>")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][5].const)
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 6)
def test_header_parse_compound_assignment_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test& operator+=(const Test& rhs){function();}Test& operator-=(const Test& rhs){function();}Test& operator*=(const Test& rhs){function();}Test& operator/=(const Test& rhs){function();}Test& operator%=(const Test& rhs){function();}Test& operator&=(const Test& rhs){function();}Test& operator|=(const Test& rhs){function();}Test& operator^=(const Test& rhs){function();}Test& operator<<=(const Test& rhs){function();}Test& operator>>=(const Test& rhs){function();}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator+=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator-=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator*=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][3].name, "operator/=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][4].name, "operator%=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][5].name, "operator&=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][6].name, "operator|=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][7].name, "operator^=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][8].name, "operator<<=")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][9].name, "operator>>=")
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 10)
def test_header_parse_member_and_pointer_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test2& operator[](const Test1& rhs){function();}Test2& operator*(){function();}Test* operator&(){function();}Test1* operator->(){function();}Test1 operator->*(){function();}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator[]")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator*")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator&")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][3].name, "operator->")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][4].name, "operator->*")
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 5)
def test_header_parse_other_operators_with_function_in_implementation_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:Test1 operator()(Arg1 a1, Arg2 a2){function();}Test1& operator,(Test1& rhs) const {function();}void* operator new(size_t x){function();}void* operator new[](size_t x){function();}void operator delete(void* x){function();}void operator delete[](void* x){function();}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "operator()")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][1].name, "operator,")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["public"][1].const)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][2].name, "operator new")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][3].name, "operator new[]")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][4].name, "operator delete")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][5].name, "operator delete[]")
self.assertEquals(len(self.cpp_file.namespaces[0].classes[0].methods["public"]), 6)
def test_header_parse_default_access_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{void test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
def test_header_parse_default_access_const_return_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{const int test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][0].const_return_type)
def test_header_parse_default_access_virtual_const_return_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{virtual const int test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][0].const_return_type)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][0].virtual)
def test_header_parse_static_return_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{static int test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][0].static)
def test_header_parse_private_access_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{private:void test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
def test_header_parse_public_access_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:void test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "test1")
def test_header_parse_protected_access_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{protected:void test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["protected"][0].name, "test1")
def test_header_parse_method_with_return_type_and_arguments_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{int test1(int argument);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "int argument")
def test_header_parse_method_with_different_return_type_and_multiple_arguments_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{std::string test1(int argument1, std::string argument2);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "std::string")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "int argument1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[1], "std::string argument2")
def test_header_parse_multiple_methods_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{int test1(int argument1);std::string test2(std::string argument2);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "int argument1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].return_type, "std::string")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[0], "std::string argument2")
def test_header_parse_multiple_access_multiple_methods_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{public:int test1(int argument1);private:std::string test2(std::string argument2);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].method_arguments[0], "int argument1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "std::string")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "std::string argument2")
def test_header_parse_multiple_access_including_default_multiple_methods_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{private:int test1(int argument1);public:std::string test2(std::string argument2);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "int argument1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].return_type, "std::string")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["public"][0].method_arguments[0], "std::string argument2")
def test_header_parse_implemented_method_with_return_type_and_arguments_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{int test1(int argument1);std::string test2(std::string argument2){}};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "int argument1")
self.assertFalse(self.cpp_file.namespaces[0].classes[0].methods["private"][0].implemented)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].return_type, "std::string")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[0], "std::string argument2")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][1].implemented)
def test_header_parse_default_access_class_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test1{class Test2{};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].classes["private"][0].name, "Test2")
def test_header_parse_private_access_class_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test1{private:class Test2{};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].classes["private"][0].name, "Test2")
def test_header_parse_public_access_class_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test1{public:class Test2{};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].classes["public"][0].name, "Test2")
def test_header_parse_protected_access_class_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test1{protected:class Test2{};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].classes["protected"][0].name, "Test2")
def test_header_parse_method_with_reference_return_type_and_reference_arguments_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{std::string& test1(std::string& argument1, std::string &argument2, std::string & argument3);char &test2(char& argument4, char &argument5,char & argument6);int & test3(int& argument7, int &argument8, int & argument9);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "std::string&")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "std::string& argument1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[1], "std::string &argument2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[2], "std::string & argument3")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].return_type, "char &")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[0], "char& argument4")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[1], "char &argument5")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[2], "char & argument6")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].name, "test3")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].return_type, "int &")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[0], "int& argument7")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[1], "int &argument8")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[2], "int & argument9")
def test_header_parse_method_with_pointer_return_type_and_pointer_arguments_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{std::string* test1(std::string* argument1, std::string *argument2, std::string * argument3);char *test2(char* argument4, char *argument5,char * argument6);int * test3(int* argument7, int *argument8, int * argument9);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "std::string*")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "std::string* argument1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[1], "std::string *argument2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[2], "std::string * argument3")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].return_type, "char *")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[0], "char* argument4")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[1], "char *argument5")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[2], "char * argument6")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].name, "test3")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].return_type, "int *")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[0], "int* argument7")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[1], "int *argument8")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[2], "int * argument9")
def test_header_parse_method_with_pointer_return_type_and_array_argument_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{std::string* test1(std::string argument[]);};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "std::string*")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "std::string argument[]")
def test_header_parse_virtual_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test{std::string test1(std::string argument1);virtual int test2(int argument2);virtual int test3(int argument3){};};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].return_type, "std::string")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].method_arguments[0], "std::string argument1")
self.assertFalse(self.cpp_file.namespaces[0].classes[0].methods["private"][0].virtual)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].name, "test2")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][1].method_arguments[0], "int argument2")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][1].virtual)
self.assertFalse(self.cpp_file.namespaces[0].classes[0].methods["private"][1].implemented)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].name, "test3")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].return_type, "int")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][2].method_arguments[0], "int argument3")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][2].virtual)
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][2].implemented)
def test_header_parse_template_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{template <class Test>class Test1{};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].templated)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].template_type, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test1")
def test_header_parse_template_method_in_class_in_namespace(self):
self.cpp_file.parse_header("namespace test{class Test1{template <class Test> Test& test1();};}")
self.assertEquals(self.cpp_file.namespaces[0].name, "test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].name, "Test1")
self.assertTrue(self.cpp_file.namespaces[0].classes[0].methods["private"][0].templated)
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].template_type, "Test")
self.assertEquals(self.cpp_file.namespaces[0].classes[0].methods["private"][0].name, "test1")
if __name__ == '__main__':
unittest.main()
|
|
"""diesel's async I/O event hub meets multiprocessing.
Let's you run CPU intensive work in subprocesses and not block the event hub
while doing so.
"""
import multiprocessing as mp
import traceback
from diesel import runtime
from diesel import core
from diesel.util.queue import Queue
def spawn(func):
"""Spawn a new process that will run func.
The returned Process instance can be called just like func.
The spawned OS process lives until it is term()ed or otherwise dies. Each
call to the returned Process instance results in another iteration of
the remote loop. This way a single process can handle multiple calls to
func.
"""
return Process(func)
def term(proc):
"""Terminate the given proc.
That is all.
"""
proc.cleanup()
proc.proc.terminate()
class ConflictingCall(Exception):
pass
class Process(object):
"""A subprocess that cooperates with diesel's event hub.
Communication with the spawned process happens over a pipe. Data that
is to be sent to or received from the process is dispatched by the
event hub. This makes it easy to run CPU intensive work in a non-blocking
fashion and utilize multiple CPU cores.
"""
def __init__(self, func):
"""Creates a new Process instance that will call func.
The returned instance can be called as if it were func. The following
code will run ``time.sleep`` in a subprocess and execution will resume
when the remote call completes. Other green threads can run in the
meantime.
>>> time_sleep = Process(time.sleep)
>>> time_sleep(4.2)
>>> do_other_stuff()
"""
self.func = func
self.proc = None
self.caller = None
self.args = None
self.params = None
self.pipe = None
self.in_call = False
self.launch()
def launch(self):
"""Starts a subprocess and connects it to diesel's plumbing.
A pipe is created, registered with the event hub and used to
communicate with the subprocess.
"""
self.pipe, remote_pipe = mp.Pipe()
runtime.current_app.hub.register(
self.pipe,
self.handle_return_value,
self.send_arguments_to_process,
runtime.current_app.global_bail('Process error!'),
)
def wrapper(pipe):
while True:
try:
args, params = pipe.recv()
pipe.send(self.func(*args, **params))
except (SystemExit, KeyboardInterrupt):
pipe.close()
break
except Exception, e:
e.original_traceback = traceback.format_exc()
pipe.send(e)
self.proc = mp.Process(target=wrapper, args=(remote_pipe,))
self.proc.daemon = True
self.proc.start()
def cleanup(self):
runtime.current_app.hub.unregister(self.pipe)
def handle_return_value(self):
"""Wakes up the caller with the return value of the subprocess func.
Called by the event hub when data is ready.
"""
try:
result = self.pipe.recv()
except EOFError:
self.pipe.close()
self.proc.terminate()
else:
self.in_call = False
self.caller.wake(result)
def send_arguments_to_process(self):
"""Sends the arguments to the function to the remote process.
Called by the event hub after the instance has been called.
"""
runtime.current_app.hub.disable_write(self.pipe)
self.pipe.send((self.args, self.params))
def __call__(self, *args, **params):
"""Trigger the execution of self.func in the subprocess.
Switches control back to the event hub, letting other loops run until
the subprocess finishes computation. Returns the result of the
subprocess's call to self.func.
"""
if self.in_call:
msg = "Another loop (%r) is executing this process." % self.caller
raise ConflictingCall(msg)
runtime.current_app.hub.enable_write(self.pipe)
self.args = args
self.params = params
self.caller = core.current_loop
self.in_call = True
return self.caller.dispatch()
class NoSubProcesses(Exception):
pass
class ProcessPool(object):
"""A bounded pool of subprocesses.
An instance is callable, just like a Process, and will return the result
of executing the function in a subprocess. If all subprocesses are busy,
the caller will wait in a queue.
"""
def __init__(self, concurrency, handler):
"""Creates a new ProcessPool with subprocesses that run the handler.
Args:
concurrency (int): The number of subprocesses to spawn.
handler (callable): A callable that the subprocesses will execute.
"""
self.concurrency = concurrency
self.handler = handler
self.available_procs = Queue()
self.all_procs = []
def __call__(self, *args, **params):
"""Gets a process from the pool, executes it, and returns the results.
This call will block until there is a process available to handle it.
"""
if not self.all_procs:
raise NoSubProcesses("Did you forget to start the pool?")
try:
p = self.available_procs.get()
result = p(*args, **params)
return result
finally:
self.available_procs.put(p)
def pool(self):
"""A callable that starts the processes in the pool.
This is useful as the callable to pass to a diesel.Loop when adding a
ProcessPool to your application.
"""
for i in xrange(self.concurrency):
proc = spawn(self.handler)
self.available_procs.put(proc)
self.all_procs.append(proc)
if __name__ == '__main__':
import diesel
def sleep_and_return(secs):
import time
start = time.time()
time.sleep(secs)
return time.time() - start
sleep_pool = ProcessPool(2, sleep_and_return)
def main():
def waiting(ident):
print ident, "waiting ..."
t = sleep_pool(4)
print ident, "woken up after", t
diesel.fork(waiting, 'a')
diesel.fork(waiting, 'b')
diesel.fork(waiting, 'c')
for i in xrange(11):
print "busy!"
diesel.sleep(1)
div = spawn(lambda x,y: x/y)
try:
div(1,0)
except ZeroDivisionError, e:
diesel.log.error(e.original_traceback)
print '^^ That was an intentional exception.'
term(div)
psleep = spawn(sleep_and_return)
diesel.fork(psleep, 0.5)
diesel.fork(psleep, 0.5)
diesel.sleep(1)
print '^^ That was an intentional exception.'
diesel.quickstop()
diesel.quickstart(sleep_pool.pool, main)
|
|
from copy import deepcopy
from unittest import TestCase
from mock import patch
from corehq.apps.es import filters
from corehq.apps.es import forms, users
from corehq.apps.es.es_query import HQESQuery
from corehq.apps.es.tests.utils import ElasticTestMixin
from corehq.elastic import SIZE_LIMIT
class TestESQuery(ElasticTestMixin, TestCase):
maxDiff = 1000
def test_basic_query(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"match_all": {}}
]
},
"query": {"match_all": {}}
}
},
"size": SIZE_LIMIT
}
self.checkQuery(HQESQuery('forms'), json_output)
def test_query_size(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"match_all": {}}
]
},
"query": {"match_all": {}}
}
},
"size": 0
}
# use `is not None`; 0 or 1000000 == 1000000
self.checkQuery(HQESQuery('forms').size(0), json_output)
json_output['size'] = 123
self.checkQuery(HQESQuery('forms').size(123), json_output)
def test_form_query(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"not": {"missing": {
"field": "domain"}}},
{"term": {"doc_type": "xforminstance"}},
{"not": {"missing":
{"field": "xmlns"}}},
{"not": {"missing":
{"field": "form.meta.userID"}}},
]
},
"query": {"match_all": {}}
}
},
"size": SIZE_LIMIT
}
query = forms.FormES()
self.checkQuery(query, json_output)
def test_user_query(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"term": {"is_active": True}},
{"term": {"base_doc": "couchuser"}},
]
},
"query": {"match_all": {}}
}
},
"size": SIZE_LIMIT
}
query = users.UserES()
self.checkQuery(query, json_output)
def test_filtered_forms(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"term": {"domain.exact": "zombocom"}},
{"term": {"xmlns.exact": "banana"}},
{"not": {"missing": {
"field": "domain"}}},
{"term": {"doc_type": "xforminstance"}},
{"not": {"missing":
{"field": "xmlns"}}},
{"not": {"missing":
{"field": "form.meta.userID"}}},
]
},
"query": {"match_all": {}}
}
},
"size": SIZE_LIMIT
}
query = forms.FormES()\
.filter(filters.domain("zombocom"))\
.xmlns('banana')
self.checkQuery(query, json_output)
def test_remove_all_defaults(self):
# Elasticsearch fails if you pass it an empty list of filters
query = (users.UserES()
.remove_default_filter('not_deleted')
.remove_default_filter('active'))
filters = query.raw_query['query']['filtered']['filter']['and']
self.assertTrue(len(filters) > 0)
def test_values_list(self):
example_response = {
u'_shards': {u'failed': 0, u'successful': 5, u'total': 5},
u'hits': {u'hits': [{
u'_id': u'8063dff5-460b-46f2-b4d0-5871abfd97d4',
u'_index': u'xforms_1cce1f049a1b4d864c9c25dc42648a45',
u'_score': 1.0,
u'_type': u'xform',
u'_source': {
u'app_id': u'fe8481a39c3738749e6a4766fca99efd',
u'doc_type': u'xforminstance',
u'domain': u'mikesproject',
u'xmlns': u'http://openrosa.org/formdesigner/3a7cc07c-551c-4651-ab1a-d60be3017485'
}
},
{
u'_id': u'dc1376cd-0869-4c13-a267-365dfc2fa754',
u'_index': u'xforms_1cce1f049a1b4d864c9c25dc42648a45',
u'_score': 1.0,
u'_type': u'xform',
u'_source': {
u'app_id': u'3d622620ca00d7709625220751a7b1f9',
u'doc_type': u'xforminstance',
u'domain': u'jacksproject',
u'xmlns': u'http://openrosa.org/formdesigner/54db1962-b938-4e2b-b00e-08414163ead4'
}
}
],
u'max_score': 1.0,
u'total': 5247
},
u'timed_out': False,
u'took': 4
}
fields = [u'app_id', u'doc_type', u'domain']
query = forms.FormES()
with patch('corehq.apps.es.es_query.run_query', return_value=example_response):
response = query.values_list(*fields)
self.assertEqual(
[
(u'fe8481a39c3738749e6a4766fca99efd', u'xforminstance', u'mikesproject'),
(u'3d622620ca00d7709625220751a7b1f9', u'xforminstance', u'jacksproject')
],
response
)
response = query.values_list('domain', flat=True)
self.assertEqual([u'mikesproject', u'jacksproject'], response)
def test_sort(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"match_all": {}}
]
},
"query": {"match_all": {}}
}
},
"size": SIZE_LIMIT,
"sort": [{
"timeEnd": {
"order": "asc"
}
}],
}
query = (
HQESQuery('forms')
.sort('timeEnd')
)
self.checkQuery(query, json_output)
json_output['sort'] = [
{"timeStart": {"order": "asc"}},
]
self.checkQuery(query.sort('timeStart'), json_output)
json_output['sort'] = [
{"timeEnd": {"order": "asc"}},
{"timeStart": {"order": "asc"}},
]
self.checkQuery(query.sort('timeStart', reset_sort=False), json_output)
def test_cleanup_before_run(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{"match_all": {}}
]
},
"query": {"match_all": {}}
}
},
"aggs": {
"by_day": {
"date_histogram": {
"field": "date",
"interval": "day",
"time_zone": "-01:00"
}
}
},
"size": SIZE_LIMIT
}
expected_output = deepcopy(json_output)
expected_output['size'] = 0
query = HQESQuery('forms').date_histogram('by_day', 'date', 'day', '-01:00')
self.checkQuery(query, json_output)
self.checkQuery(query._clean_before_run(), expected_output)
def test_exclude_source(self):
json_output = {
"query": {
"filtered": {
"filter": {
"and": [
{
"term": {
"domain.exact": "test-exclude"
}
},
{
"match_all": {}
}
]
},
"query": {
"match_all": {}
}
}
},
"_source": False,
"size": SIZE_LIMIT,
}
query = HQESQuery('forms').domain('test-exclude').exclude_source()
self.checkQuery(query, json_output)
|
|
"""The IPython kernel implementation"""
import getpass
import sys
import traceback
from IPython.core import release
from IPython.utils.py3compat import builtin_mod, PY3
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from IPython.utils.traitlets import Instance, Type, Any, List
from IPython.utils.decorators import undoc
from ..comm import CommManager
from .kernelbase import Kernel as KernelBase
from .serialize import serialize_object, unpack_apply_message
from .zmqshell import ZMQInteractiveShell
def lazy_import_handle_comm_opened(*args, **kwargs):
from IPython.html.widgets import Widget
Widget.handle_comm_opened(*args, **kwargs)
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(ZMQInteractiveShell)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.shell.data_pub.session = self.session
self.shell.data_pub.pub_socket = self.iopub_socket
# TMP - hack while developing
self.shell._reply_content = None
self.comm_manager = CommManager(shell=self.shell, parent=self,
kernel=self)
self.comm_manager.register_target('ipython.widget', lazy_import_handle_comm_opened)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
help_links = List([
{
'text': "Python",
'url': "http://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython",
'url': "http://ipython.org/documentation.html",
},
{
'text': "NumPy",
'url': "http://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy",
'url': "http://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib",
'url': "http://matplotlib.org/contents.html",
},
{
'text': "SymPy",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas",
'url': "http://pandas.pydata.org/pandas-docs/stable/",
},
])
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {'name': 'ipython',
'version': sys.version_info[0]},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrememnting done by KernelBase, in favour of our shell's
# execution counter.
pass
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
# FIXME: the shell calls the exception handler itself.
shell._reply_content = None
try:
shell.run_cell(code, store_history=store_history, silent=silent)
except:
status = u'error'
# FIXME: this code right now isn't being used yet by default,
# because the run_cell() call above directly fires off exception
# reporting. This code, therefore, is only active in the scenario
# where runlines itself has an unhandled exception. We need to
# uniformize this, for all exception construction to come from a
# single location in the codbase.
etype, evalue, tb = sys.exc_info()
tb_list = traceback.format_exception(etype, evalue, tb)
reply_content.update(shell._showtraceback(etype, evalue, tb_list))
else:
status = u'ok'
finally:
self._restore_input()
reply_content[u'status'] = status
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
# FIXME - fish exception info out of shell, possibly left there by
# runlines. We'll need to clean up this logic later.
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='execute')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and clear the payload system always.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be agressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
info = self.shell.object_inspect(name)
reply_content = {'status' : 'ok'}
reply_content['data'] = data = {}
reply_content['metadata'] = {}
reply_content['found'] = info['found']
if info['found']:
info_text = self.shell.object_inspect_text(
name,
detail_level=detail_level,
)
data['text/plain'] = info_text
return reply_content
def do_history(self, hist_access_type, output, raw, session=None, start=None,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {'history' : list(hist)}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
status, indent_spaces = self.shell.input_transformer_manager.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except:
# invoke IPython traceback formatting
shell.showtraceback()
# FIXME - fish exception info out of shell, possibly left there by
# run_code. We'll need to clean up this logic later.
reply_content = {}
if shell._reply_content is not None:
reply_content.update(shell._reply_content)
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
# reset after use
shell._reply_content = None
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
if reply_content['ename'] == 'UnmetDependency':
reply_metadata['dependencies_met'] = False
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
@undoc
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of IPython.kernel.zmq.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``BGP.py``
``IxNetwork BGP protocol emulation functionality``
Note:
TCL procedures::
::ixia::emulation_bgp_config
::ixia::emulation_bgp_control
::ixia::emulation_bgp_info
::ixia::emulation_bgp_route_config
"""
import copy
class BGP(object):
"""IxNet BGP configuration wrapper.
"""
def __init__(self, ixia):
"""BGP class initialization.
Args:
ixia(IxiaHLTMixin): Ixia traffic generator
"""
self.ixia = ixia
self.bgp_dict = {}
def configure_neighbour(self, port, *args, **kwargs):
"""Configure BGP neighbors.
Args:
port(tuple(int)): TG port in format tuple(chassisID, cardId, portId)
Raises:
AssertionError: error in executing tcl code
Returns:
dict: Neighbour handler names
Note:
See description of keyword arguments in ixia_bgp_api.tcl
Full path: /opt/ixos/lib/hltapi/library/ixia_bgp_api.tcl
"""
kwargs['port_handle'] = "/".join(map(str, port))
self.ixia.ixia_emulation_bgp_config(*args, **kwargs)
assert self.ixia.check_return_code() == ""
_port = "_".join(map(str, port))
cfg_name = "bgp_routers_status_{0}".format(_port)
if port not in self.bgp_dict:
self.bgp_dict[port] = {}
self.bgp_dict[port]['cfg_name'] = cfg_name
if "n_handler" not in self.bgp_dict:
self.bgp_dict[port]['n_handler'] = {}
self.ixia.set_var(**{cfg_name: "$return_code"})
# Create bgp neighbors handles list
_rlist = self.ixia.tcl("keylget {0} handles".format(cfg_name))
_rlist = _rlist.split(" ")
for item in _rlist:
# item example: 'ixNet::OBJ-/vport:1/protocols/bgp/neighborRange:1'
pos = item.rfind(":")
_id = item[pos + 1:]
_index = _rlist.index(item)
self.bgp_dict[port]['n_handler'][_id] = "bgp_neighbour_{0}_{1}".format(_port, _id)
self.ixia.set_var(**{self.bgp_dict[port]['n_handler'][_id]: "[lindex [keylget {0} handles] {1}]".format(cfg_name, _index)})
return self.bgp_dict[port]['n_handler'].copy()
def control(self, *args, **kwargs):
"""Turning BGP on/off, enabling statistics.
Raises:
AssertionError: error in executing tcl code
Returns:
None
Note:
See description of keyword arguments in ixia_bgp_api.tcl
Full path: /opt/ixos/lib/hltapi/library/ixia_bgp_api.tcl
"""
if "port" in kwargs:
kwargs['port_handle'] = "/".join(map(str, kwargs.pop("port")))
if "router" in kwargs:
kwargs['handle'] = "$" + kwargs.pop("router")
self.ixia.ixia_emulation_bgp_control(*args, **kwargs)
assert self.ixia.check_return_code() == ""
def emulation_bgp_info(self, *args, **kwargs):
"""Command to retrieve BGP statistics.
Raises:
AssertionError: error in executing tcl code
Returns:
dict: BGP statistics
Note:
See description of keyword arguments in ixia_bgp_api.tcl
Full path: /opt/ixos/lib/hltapi/library/ixia_bgp_api.tcl
"""
if 'neighbour_keys' in kwargs:
is_neighbour_handler = True
neighbour_key_list = kwargs.pop('neighbour_keys')
the_port = None
for port in args:
self.bgp_dict[port]["info"] = {}
self.bgp_dict[port]["bgp_info"] = {}
if is_neighbour_handler:
key_list = neighbour_key_list[args.index(port)]
else:
key_list = list(self.bgp_dict[port]["n_handler"].keys())
# create bgp info dictionary:
for key in key_list:
n_handle = self.bgp_dict[port]['n_handler'][key]
self.bgp_dict[port]["info"][n_handle] = {}
cfg_name = "bgp_info_{0}".format(n_handle.replace("bgp_neighbour_", ""))
self.ixia.puts("${0}".format(n_handle))
kwargs["handle"] = "${0}".format(n_handle)
self.ixia.ixia_emulation_bgp_info(**kwargs)
assert self.ixia.check_return_code() == ""
self.ixia.set_var(**{cfg_name: "$return_code"})
self.bgp_dict[port]["bgp_info"][key] = cfg_name
self.ixia.puts("$return_code")
# create list of info objects keys:
_rlist = self.ixia.tcl("keylkeys {0}".format(cfg_name))
_rlist = _rlist.split(" ")
for key_item in _rlist:
self.bgp_dict[port]["info"][n_handle][key_item] = self.ixia.tcl("keylget {0} {1}".format(cfg_name, key_item))
the_port = self.bgp_dict[port]['info']
if the_port is not None:
return copy.deepcopy(the_port)
def configure_route(self, *args, **kwargs):
"""Create a route range associated with neighbor.
Raises:
AssertionError: error in executing tcl code
Returns:
dict: Route handler names
Note:
See description of keyword arguments in ixia_bgp_api.tcl
Full path: /opt/ixos/lib/hltapi/library/ixia_bgp_api.tcl
"""
if "neighbor" in kwargs:
kwargs['handle'] = "$" + kwargs.pop("neighbor")
self.ixia.ixia_emulation_bgp_route_config(*args, **kwargs)
assert self.ixia.check_return_code() == ""
# Get IxNet port name and neighbor id from handler name
port = tuple([int(x) for x in kwargs['handle'].split("_")[-4:-1]])
_port = "_".join(map(str, port))
neighbor_id = kwargs['handle'].split("_")[-1]
if "r_handler" not in self.bgp_dict:
self.bgp_dict[port]['r_handler'] = {}
# Create bgp routers handles list
# return_code example:
# {bgp_routes {::ixNet::OBJ-/vport:1/protocols/bgp/neighborRange:2/routeRange:3
# ::ixNet::OBJ-/vport:1/protocols/bgp/neighborRange:2/routeRange:4} }
# {status 1}
_rlist = self.ixia.tcl("keylget return_code bgp_routes")
_rlist = _rlist.split(" ")
for item in _rlist:
_id = item.split(":")[-1]
_index = _rlist.index(item)
self.bgp_dict[port]['r_handler'][_id] = "bgp_routes_{0}_n{1}_{2}".format(_port, neighbor_id, _id)
self.ixia.set_var(**{self.bgp_dict[port]['r_handler'][_id]: "[lindex [keylget return_code bgp_routes] {0}]".format(_index)})
return self.bgp_dict[port]['r_handler'].copy()
|
|
"""
The custom manager for Scripts.
"""
from django.db.models import Q
from evennia.typeclasses.managers import TypedObjectManager, TypeclassManager
from evennia.typeclasses.managers import returns_typeclass_list
from evennia.utils.utils import make_iter
__all__ = ("ScriptManager",)
_GA = object.__getattribute__
VALIDATE_ITERATION = 0
class ScriptDBManager(TypedObjectManager):
"""
This Scriptmanager implements methods for searching
and manipulating Scripts directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (or dbref_search)
get_dbref_range
object_totals
typeclass_search
get_all_scripts_on_obj
get_all_scripts
delete_script
remove_non_persistent
validate
script_search (equivalent to evennia.search_script)
copy_script
"""
@returns_typeclass_list
def get_all_scripts_on_obj(self, obj, key=None):
"""
Returns as result all the Scripts related to a particular object.
key can be given as a dbref or name string. If given, only scripts
matching the key on the object will be returned.
"""
if not obj:
return []
player = _GA(_GA(obj, "__dbclass__"), "__name__") == "PlayerDB"
if key:
dbref = self.dbref(key)
if dbref or dbref == 0:
if player:
return self.filter(db_player=obj, id=dbref)
else:
return self.filter(db_obj=obj, id=dbref)
elif player:
return self.filter(db_player=obj, db_key=key)
else:
return self.filter(db_obj=obj, db_key=key)
elif player:
return self.filter(db_player=obj)
else:
return self.filter(db_obj=obj)
@returns_typeclass_list
def get_all_scripts(self, key=None):
"""
Return all scripts, alternative only
scripts with a certain key/dbref
"""
if key:
script = []
dbref = self.dbref(key)
if dbref or dbref == 0:
script = [self.dbref_search(dbref)]
if not script:
script = self.filter(db_key=key)
return script
return self.all()
def delete_script(self, dbref):
"""
This stops and deletes a specific script directly
from the script database. This might be
needed for global scripts not tied to
a specific game object.
"""
scripts = self.get_id(dbref)
for script in make_iter(scripts):
script.stop()
def remove_non_persistent(self, obj=None):
"""
This cleans up the script database of all non-persistent
scripts, or only those on obj. It is called every time the server
restarts.
"""
if obj:
to_stop = self.filter(db_obj=obj, db_persistent=False, db_is_active=True)
to_delete = self.filter(db_obj=obj, db_persistent=False, db_is_active=False)
else:
to_stop = self.filter(db_persistent=False, db_is_active=True)
to_delete = self.filter(db_persistent=False, db_is_active=False)
nr_deleted = to_stop.count() + to_delete.count()
for script in to_stop:
script.stop()
for script in to_delete:
script.delete()
return nr_deleted
def validate(self, scripts=None, obj=None, key=None, dbref=None,
init_mode=False):
"""
This will step through the script database and make sure
all objects run scripts that are still valid in the context
they are in. This is called by the game engine at regular
intervals but can also be initiated by player scripts.
If key and/or obj is given, only update the related
script/object.
Only one of the arguments are supposed to be supplied
at a time, since they are exclusive to each other.
scripts = a list of scripts objects obtained somewhere.
obj = validate only scripts defined on a special object.
key = validate only scripts with a particular key
dbref = validate only the single script with this particular id.
init_mode - This is used during server upstart and can have
three values:
False (no init mode). Called during run.
"reset" - server reboot. Kill non-persistent scripts
"reload" - server reload. Keep non-persistent scripts.
This method also makes sure start any scripts it validates,
this should be harmless, since already-active scripts
have the property 'is_running' set and will be skipped.
"""
# we store a variable that tracks if we are calling a
# validation from within another validation (avoids
# loops).
global VALIDATE_ITERATION
if VALIDATE_ITERATION > 0:
# we are in a nested validation. Exit.
VALIDATE_ITERATION -= 1
return None, None
VALIDATE_ITERATION += 1
# not in a validation - loop. Validate as normal.
nr_started = 0
nr_stopped = 0
if init_mode:
if init_mode == 'reset':
# special mode when server starts or object logs in.
# This deletes all non-persistent scripts from database
nr_stopped += self.remove_non_persistent(obj=obj)
# turn off the activity flag for all remaining scripts
scripts = self.get_all_scripts()
for script in scripts:
script.is_active = False
elif not scripts:
# normal operation
if dbref and self.dbref(dbref, reqhash=False):
scripts = self.get_id(dbref)
elif obj:
#print "calling get_all_scripts_on_obj", obj, key, VALIDATE_ITERATION
scripts = self.get_all_scripts_on_obj(obj, key=key)
else:
scripts = self.get_all_scripts(key=key) #self.model.get_all_cached_instances()
if not scripts:
# no scripts available to validate
VALIDATE_ITERATION -= 1
return None, None
#print "scripts to validate: [%s]" % (", ".join(script.key for script in scripts))
for script in scripts:
#print "validating %s (%i) (init_mode=%s)" % (script.key, id(script), init_mode)
if script.is_valid():
nr_started += script.start(force_restart=init_mode)
#print "back from start. nr_started=", nr_started
else:
script.stop()
nr_stopped += 1
VALIDATE_ITERATION -= 1
return nr_started, nr_stopped
@returns_typeclass_list
def script_search(self, ostring, obj=None, only_timed=False):
"""
Search for a particular script.
ostring - search criterion - a script ID or key
obj - limit search to scripts defined on this object
only_timed - limit search only to scripts that run
on a timer.
"""
ostring = ostring.strip()
dbref = self.dbref(ostring)
if dbref or dbref == 0:
# this is a dbref, try to find the script directly
dbref_match = self.dbref_search(dbref)
if dbref_match and not ((obj and obj != dbref_match.obj)
or (only_timed and dbref_match.interval)):
return [dbref_match]
# not a dbref; normal search
obj_restriction = obj and Q(db_obj=obj) or Q()
timed_restriction = only_timed and Q(interval__gt=0) or Q()
scripts = self.filter(timed_restriction & obj_restriction & Q(db_key__iexact=ostring))
return scripts
def copy_script(self, original_script, new_key=None, new_obj=None, new_locks=None):
"""
Make an identical copy of the original_script
"""
typeclass = original_script.typeclass_path
new_key = new_key if new_key is not None else original_script.key
new_obj = new_obj if new_obj is not None else original_script.obj
new_locks = new_locks if new_locks is not None else original_script.db_lock_storage
from evennia.utils import create
new_script = create.create_script(typeclass, key=new_key, obj=new_obj,
locks=new_locks, autostart=True)
return new_script
class ScriptManager(ScriptDBManager, TypeclassManager):
pass
|
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from model_utils import FieldTracker
from fit4school.core.notifications import notify_tracker_changes
from django.conf import settings
import logging
import pendulum
logger = logging.getLogger(__name__)
# Meta
# unique_together = (("driver", "restaurant"),)
# ordering = ['-order_date']
class Program(models.Model):
class Meta:
verbose_name = _('program')
verbose_name_plural = _('programs')
name = models.CharField(max_length=50, verbose_name=_('name'))
email_account = models.EmailField(verbose_name=_('email'))
email_token = models.CharField(max_length=250, verbose_name=_('token'))
account_template = models.CharField(max_length=250,
verbose_name=_('template'))
account_password = models.CharField(max_length=250,
verbose_name=_('password'))
def __str__(self):
return self.name
def verbose_name(self, r2l=False):
return self.name
class School(models.Model):
class Meta:
verbose_name = _('school')
verbose_name_plural = _('schools')
name = models.CharField(max_length=50, verbose_name=_('name'))
city = models.CharField(max_length=50, blank=True, null=True,
verbose_name=_('city'))
program = models.ForeignKey(Program,
on_delete=models.CASCADE,
related_name="schools",
verbose_name=_('program'))
def __str__(self):
return self.name
class Classroom(models.Model):
class Meta:
verbose_name = _('classroom')
verbose_name_plural = _('classrooms')
name = models.CharField(max_length=50, verbose_name=_('name'))
school = models.ForeignKey(School, on_delete=models.CASCADE,
related_name="classrooms",
verbose_name=_('school'))
teacher = models.CharField(max_length=50, verbose_name=_('teacher'))
email = models.EmailField(verbose_name=_('email'))
telephone = models.CharField(max_length=50, blank=True,
verbose_name=_('telephone'))
verified = models.BooleanField(default=False, verbose_name=_('verified'))
last_alert_sent = models.DateField(null=True,
verbose_name=_('last notified'))
is_active = models.BooleanField(default=True, verbose_name=_('active'))
start_date = models.DateField(null=True, verbose_name=_('start_date'))
def __str__(self):
return self.name
def verbose_name(self, r2l=False):
return "%s - %s - %s" % (self.name, self.teacher, self.school.name)
def school_name(self):
return self.school.name
school_name.short_description = _('school')
class Student(models.Model):
"""
We are mixing the Student, Token and Device models as there is no need
to support multiple relationships and the model is currently very simple
"""
class Meta:
verbose_name = _('student')
verbose_name_plural = _('students')
name = models.CharField(max_length=50, verbose_name=_('name'))
classroom = models.ForeignKey(Classroom,
on_delete=models.CASCADE,
related_name="students",
verbose_name=_('classroom'))
def __str__(self):
return self.name
def verbose_name(self, r2l=False):
return "%s - %s" % (self.classroom.verbose_name(), self.name)
def program(self):
return self.classroom.school.program
def add_tracker(self, tracker_id, assign_date=None):
try:
tracker = Tracker.objects.get(program=self.program(),
tracker_id=tracker_id)
except Exception as ex:
msg = "Unable to find tracker {} in program {}".format(
tracker_id, self.program().name)
logger.error(msg)
raise models.ObjectDoesNotExist(msg)
if tracker.student:
msg = "Tracker {}/{} is already assigned to {}".format(
tracker.tracker_id,
tracker.program.name,
tracker.student.verbose_name(),
)
logger.error(msg)
raise KeyError(msg)
tracker.student = self
# there seem to be issues with pendulum
tz = pendulum.timezone(timezone.get_current_timezone_name())
tracker.date_assigned = assign_date or tz.convert(timezone.now())
tracker.save()
class TrackerChange(models.Model):
timestamp = models.DateTimeField(auto_now=True, verbose_name=_('timestamp'))
field_name = models.CharField(max_length=50, verbose_name=_('field name'))
value_old = models.CharField(max_length=50, verbose_name=_('new value'))
value_new = models.CharField(max_length=50, verbose_name=_('old value'))
tracker_id = models.IntegerField(verbose_name=_('tracker id'))
program_name = models.CharField(max_length=50, verbose_name=_('program'))
student_name = models.CharField(max_length=50, null=True, verbose_name=_('student'))
school_name = models.CharField(max_length=50, null=True, verbose_name=_('student'))
classroom_name = models.CharField(max_length=50, null=True, verbose_name=_('student'))
class Tracker(models.Model):
"""
We are mixing the Student, Token and Device models as there is no need to support multiple
relationships and the model is currently very simple
"""
BATTERY_HIGH = "H"
BATTERY_MEDIUM = "M"
BATTERY_LOW = "L"
BATTERY_EMPTY = "E"
BATTERY_UNKNOWN = "U"
DEFAULT_BATTERY_STATUS = BATTERY_UNKNOWN
BATTERY_STATUS = (
(BATTERY_HIGH, _('High')),
(BATTERY_MEDIUM, _('Medium')),
(BATTERY_LOW, _('Low')),
(BATTERY_EMPTY, _('Empty')),
(BATTERY_UNKNOWN, _('Unknown')),
)
STATUS_OK = "O"
STATUS_LOST = "L"
STATUS_BROKEN = "B"
TRACKER_STATUS = (
(STATUS_OK, _('Ok')),
(STATUS_LOST, _('Lost')),
(STATUS_BROKEN, _('Broken')),
)
class Meta:
verbose_name = _('tracker')
verbose_name_plural = _('trackers')
unique_together = ('tracker_id', 'program',)
tracker_id = models.IntegerField(verbose_name=_('tracker id'))
program = models.ForeignKey(Program,
on_delete=models.CASCADE,
related_name="trackers",
verbose_name=_('program'))
student = models.ForeignKey(Student,
on_delete=models.SET_NULL,
related_name="trackers",
verbose_name=_('student'),
null=True,
blank=True,
default=None,
# limit_choices_to={'classroom__school__program': program},
)
fitbit_user = models.CharField(max_length=32,
unique=True,
null=True,
verbose_name=_('Fitbit User ID'),
help_text=_('the fitbit user id'),
blank=True)
date_assigned = models.DateTimeField(verbose_name=_('assigned date'), null=True, blank=True)
oauth_access_token = models.TextField(verbose_name=_('access token'), null=True, blank=True)
oauth_refresh_token = models.TextField(verbose_name=_('refresh token'), null=True, blank=True)
oauth_expires_at = models.FloatField(verbose_name=_('expires at'), null=True, blank=True)
oauth_expires_in = models.FloatField(verbose_name=_('expires in'), null=True, blank=True)
tracker_count = models.IntegerField(verbose_name=_('tracker count'), default=0)
battery_status = models.CharField(max_length=1,
verbose_name=_('battery status'),
default=DEFAULT_BATTERY_STATUS,
choices=BATTERY_STATUS)
tracker_status = models.CharField(max_length=1,
verbose_name=_('tracker status'),
default=STATUS_OK,
choices=TRACKER_STATUS)
mac = models.CharField(max_length=16, null=True, blank=True, verbose_name=_('mac'))
last_sync = models.DateTimeField(verbose_name=_('last sync'), null=True, blank=True)
timezone = models.CharField(max_length=30, verbose_name=_('timezone'), null=True, blank=True)
last_checked = models.DateTimeField(verbose_name=_('last checked'), null=True, blank=True)
_tracked_fields = settings.TRACKER_TRACKED_FIELDS
field_tracker = FieldTracker(fields=_tracked_fields)
# def save(force_insert=False, force_update=False, using=DEFAULT_DB_ALIAS, update_fields=None):
def save(self, *args, **kwargs):
# print("save", args, kwargs)
super(Tracker, self).save(*args, **kwargs)
try:
changes = self.field_tracker.changed()
now = timezone.now()
if changes and list(changes.values()) != [None] * len(self._tracked_fields):
changes['_student'] = ""
changes['_tracker_id'] = self.tracker_id
student_name = classroom_name = school_name = None
if self.date_assigned:
changes['_student'] = self.student.verbose_name()
student_name = self.student.name
classroom_name = self.student.classroom.name
school_name = self.student.classroom.school.name
for k in changes.keys():
if not k.startswith('_'):
TrackerChange.objects.create(
timestamp=now,
field_name=k,
value_old=str(changes[k]),
value_new=str(getattr(self, k)),
tracker_id=self.tracker_id,
program_name=self.program.name,
student_name=student_name,
school_name=school_name,
classroom_name=classroom_name,
)
changes[k] = (changes[k], getattr(self, k))
notify_tracker_changes.delay(changes)
except Exception as ex:
logger.error("Save failed with error {}".format(str(ex)))
def is_linked(self):
return self.oauth_access_token is not None
is_linked.short_description = _('linked to fitbit')
is_linked.boolean = True
def refresh_cb(self, token):
""" Called when the OAuth token has been refreshed """
self.oauth_access_token = token['access_token']
self.oauth_refresh_token = token['refresh_token']
self.oauth_expires_at = token['expires_at']
self.save()
def get_oauth_data(self):
return {
'user_id': self.fitbit_user,
'access_token': self.oauth_access_token,
'refresh_token': self.oauth_refresh_token,
'expires_at': self.oauth_expires_at,
'refresh_cb': self.refresh_cb,
}
def get_program_name(self):
return self.program.name
def __str__(self):
return "{} {}".format(_('tracker'), self.tracker_id)
def verbose_name(self, r2l=False):
return "{} ({} {})".format(self.program.name, _('tracker'),
self.tracker_id)
def get_email(self):
try:
if not hasattr(self, 'program'):
return "undefined"
return self.program.account_template.format(
tracker_id=self.tracker_id)
except Exception as ex:
return "Bad format, error is{}".format(str(ex))
class TrackerActivity(models.Model):
timestamp = models.DateField()
steps = models.IntegerField()
minutesSedentary = models.IntegerField()
minutesLightlyActive = models.IntegerField()
minutesFairlyActive = models.IntegerField()
minutesVeryActive = models.IntegerField()
tracker = models.ForeignKey(Tracker,
on_delete=models.CASCADE,
related_name="activity",
verbose_name=_('tracker')
)
|
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import utils
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
import os
import re
from ambari_commons.os_check import OSCheck
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import hdp_select
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_hdp_stack_version
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.get_lzo_packages import get_lzo_packages
from resource_management.libraries.functions.is_empty import is_empty
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_name = default("/hostLevelParams/stack_name", None)
upgrade_direction = default("/commandParams/upgrade_direction", None)
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
hdp_stack_version = format_hdp_stack_version(stack_version_unformatted)
# New Cluster Stack Version that is defined during the RESTART of a Rolling Upgrade
version = default("/commandParams/version", None)
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = status_params.hdfs_user
root_user = "root"
hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default('/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default('/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
dfs_dn_ipc_address = config['configurations']['hdfs-site']['dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False
# hadoop default parameters
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = hdp_select.get_hadoop_dir("libexec")
hadoop_bin = hdp_select.get_hadoop_dir("sbin")
hadoop_bin_dir = hdp_select.get_hadoop_dir("bin")
hadoop_home = hdp_select.get_hadoop_dir("home")
hadoop_secure_dn_user = hdfs_user
hadoop_conf_dir = conf_select.get_hadoop_conf_dir()
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_lib_home = hdp_select.get_hadoop_dir("lib")
# hadoop parameters for 2.2+
if Script.is_hdp_stack_greater_or_equal("2.2"):
mapreduce_libs_path = "/usr/hdp/current/hadoop-mapreduce-client/*"
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = utils.get_port(dfs_dn_addr)
dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = utils.is_secure_port(dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
ambari_libs_dir = "/var/lib/ambari-agent/lib"
limits_conf_dir = "/etc/security/limits.d"
hdfs_user_nofile_limit = default("/configurations/hadoop-env/hdfs_user_nofile_limit", "128000")
hdfs_user_nproc_limit = default("/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
create_lib_snappy_symlinks = not Script.is_hdp_stack_greater_or_equal("2.2")
if Script.is_hdp_stack_greater_or_equal("2.0") and Script.is_hdp_stack_less_than("2.1") and not OSCheck.is_suse_family():
# deprecated rhel jsvc_path
jsvc_path = "/usr/libexec/bigtop-utils"
else:
jsvc_path = "/usr/lib/bigtop-utils"
execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -c unlimited ; "
snappy_so = "libsnappy.so"
so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
so_src_dir_x86 = format("{hadoop_home}/lib")
so_src_dir_x64 = format("{hadoop_home}/lib64")
so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
#security params
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
falcon_user = config['configurations']['falcon-env']['falcon_user']
#exclude file
hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
update_exclude_file_only = default("/commandParams/update_exclude_file_only",False)
command_phase = default("/commandParams/phase","")
klist_path_local = get_klist_path(default('/configurations/kerberos-env/executable_search_paths', None))
kinit_path_local = get_kinit_path(default('/configurations/kerberos-env/executable_search_paths', None))
#hosts
hostname = config["hostname"]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
nm_host = default("/clusterHostInfo/nm_host", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_namenodes = not len(namenode_host) == 0
has_jobtracker = not len(jtnode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_histroryserver = not len(hs_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_journalnode_hosts = not len(journalnode_hosts) == 0
has_zkfc_hosts = not len(zkfc_hosts) == 0
has_falcon_host = not len(falcon_host) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
#users and groups
yarn_user = config['configurations']['yarn-env']['yarn_user']
hbase_user = config['configurations']['hbase-env']['hbase_user']
oozie_user = config['configurations']['oozie-env']['oozie_user']
webhcat_user = config['configurations']['hive-env']['hcat_user']
hcat_user = config['configurations']['hive-env']['hcat_user']
hive_user = config['configurations']['hive-env']['hive_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env']['smokeuser_principal_name']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name', None)
user_group = config['configurations']['cluster-env']['user_group']
root_group = "root"
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
#hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env']['hadoop_root_logger']
dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
namenode_dirs_stub_filename = "namenode_dirs_created"
smoke_hdfs_user_dir = format("/user/{smoke_user}")
smoke_hdfs_user_mode = 0770
hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
namenode_formatted_old_mark_dirs = ["/var/run/hadoop/hdfs/namenode-formatted",
format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
"/var/lib/hdfs/namenode/formatted"]
dfs_name_dirs = dfs_name_dir.split(",")
namenode_formatted_mark_dirs = []
for dn_dir in dfs_name_dirs:
tmp_mark_dir = format("{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
namenode_formatted_mark_dirs.append(tmp_mark_dir)
# Use the namenode RPC address if configured, otherwise, fallback to the default file system
namenode_address = None
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpcaddress = config['configurations']['hdfs-site']['dfs.namenode.rpc-address']
namenode_address = format("hdfs://{namenode_rpcaddress}")
else:
namenode_address = config['configurations']['core-site']['fs.defaultFS']
fs_checkpoint_dirs = default("/configurations/hdfs-site/dfs.namenode.checkpoint.dir", "").split(',')
dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']
dfs_data_dir = ",".join([re.sub(r'^\[.+\]', '', dfs_dir.strip()) for dfs_dir in dfs_data_dir.split(",")])
data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
dfs_ha_automatic_failover_enabled = default("/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
# hostname of the active HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_active = default("/configurations/hadoop-env/dfs_ha_initial_namenode_active", None)
# hostname of the standby HDFS HA Namenode (only used when HA is enabled)
dfs_ha_namenode_standby = default("/configurations/hadoop-env/dfs_ha_initial_namenode_standby", None)
# Values for the current Host
namenode_id = None
namenode_rpc = None
dfs_ha_namemodes_ids_list = []
other_namenode_id = None
if dfs_ha_namenode_ids:
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
if hostname in nn_host:
namenode_id = nn_id
namenode_rpc = nn_host
# With HA enabled namenode_address is recomputed
namenode_address = format('hdfs://{dfs_ha_nameservices}')
# Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
other_namenode_id = list(set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
https_only = True
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.https-address', None)
else:
https_only = False
journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
if journalnode_address:
journalnode_port = journalnode_address.split(":")[1]
if security_enabled:
dn_principal_name = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
dn_keytab = config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
dn_principal_name = dn_principal_name.replace('_HOST',hostname.lower())
dn_kinit_cmd = format("{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
nn_principal_name = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
nn_keytab = config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
nn_principal_name = nn_principal_name.replace('_HOST',hostname.lower())
nn_kinit_cmd = format("{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
jn_principal_name = default("/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
if jn_principal_name:
jn_principal_name = jn_principal_name.replace('_HOST', hostname.lower())
jn_keytab = default("/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
jn_kinit_cmd = format("{kinit_path_local} -kt {jn_keytab} {jn_principal_name};")
else:
dn_kinit_cmd = ""
nn_kinit_cmd = ""
jn_kinit_cmd = ""
hdfs_site = config['configurations']['hdfs-site']
default_fs = config['configurations']['core-site']['fs.defaultFS']
dfs_type = default("/commandParams/dfs_type", "")
import functools
#create partial functions with common arguments for every HdfsResource call
#to create/delete/copyfromlocal hdfs directories/files we need to call params.HdfsResource in code
HdfsResource = functools.partial(
HdfsResource,
user=hdfs_user,
security_enabled = security_enabled,
keytab = hdfs_user_keytab,
kinit_path_local = kinit_path_local,
hadoop_bin_dir = hadoop_bin_dir,
hadoop_conf_dir = hadoop_conf_dir,
principal_name = hdfs_principal_name,
hdfs_site = hdfs_site,
default_fs = default_fs,
dfs_type = dfs_type
)
# The logic for LZO also exists in OOZIE's params.py
io_compression_codecs = default("/configurations/core-site/io.compression.codecs", None)
lzo_enabled = io_compression_codecs is not None and "com.hadoop.compression.lzo" in io_compression_codecs.lower()
lzo_packages = get_lzo_packages(stack_version_unformatted)
exclude_packages = []
if not lzo_enabled:
exclude_packages += lzo_packages
name_node_params = default("/commandParams/namenode", None)
java_home = config['hostLevelParams']['java_home']
java_version = int(config['hostLevelParams']['java_version'])
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
# ranger host
ranger_admin_hosts = default("/clusterHostInfo/ranger_admin_hosts", [])
has_ranger_admin = not len(ranger_admin_hosts) == 0
xml_configurations_supported = config['configurations']['ranger-env']['xml_configurations_supported']
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
#ranger hdfs properties
policymgr_mgr_url = config['configurations']['admin-properties']['policymgr_external_url']
sql_connector_jar = config['configurations']['admin-properties']['SQL_CONNECTOR_JAR']
xa_audit_db_name = config['configurations']['admin-properties']['audit_db_name']
xa_audit_db_user = config['configurations']['admin-properties']['audit_db_user']
xa_db_host = config['configurations']['admin-properties']['db_host']
repo_name = str(config['clusterName']) + '_hadoop'
hadoop_security_authentication = config['configurations']['core-site']['hadoop.security.authentication']
hadoop_security_authorization = config['configurations']['core-site']['hadoop.security.authorization']
fs_default_name = config['configurations']['core-site']['fs.defaultFS']
hadoop_security_auth_to_local = config['configurations']['core-site']['hadoop.security.auth_to_local']
hadoop_rpc_protection = config['configurations']['ranger-hdfs-plugin-properties']['hadoop.rpc.protection']
common_name_for_certificate = config['configurations']['ranger-hdfs-plugin-properties']['common.name.for.certificate']
repo_config_username = config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_USERNAME']
if security_enabled:
sn_principal_name = default("/configurations/hdfs-site/dfs.secondary.namenode.kerberos.principal", "nn/_HOST@EXAMPLE.COM")
sn_principal_name = sn_principal_name.replace('_HOST',hostname.lower())
ranger_env = config['configurations']['ranger-env']
ranger_plugin_properties = config['configurations']['ranger-hdfs-plugin-properties']
policy_user = config['configurations']['ranger-hdfs-plugin-properties']['policy_user']
#For curl command in ranger plugin to get db connector
jdk_location = config['hostLevelParams']['jdk_location']
java_share_dir = '/usr/share/java'
is_https_enabled = config['configurations']['hdfs-site']['dfs.https.enable'] if \
not is_empty(config['configurations']['hdfs-site']['dfs.https.enable']) else False
if has_ranger_admin:
enable_ranger_hdfs = (config['configurations']['ranger-hdfs-plugin-properties']['ranger-hdfs-plugin-enabled'].lower() == 'yes')
xa_audit_db_password = unicode(config['configurations']['admin-properties']['audit_db_password'])
repo_config_password = unicode(config['configurations']['ranger-hdfs-plugin-properties']['REPOSITORY_CONFIG_PASSWORD'])
xa_audit_db_flavor = (config['configurations']['admin-properties']['DB_FLAVOR']).lower()
if xa_audit_db_flavor == 'mysql':
jdbc_symlink_name = "mysql-jdbc-driver.jar"
jdbc_jar_name = "mysql-connector-java.jar"
audit_jdbc_url = format('jdbc:mysql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "com.mysql.jdbc.Driver"
elif xa_audit_db_flavor == 'oracle':
jdbc_jar_name = "ojdbc6.jar"
jdbc_symlink_name = "oracle-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:oracle:thin:@//{xa_db_host}')
jdbc_driver = "oracle.jdbc.OracleDriver"
elif xa_audit_db_flavor == 'postgres':
jdbc_jar_name = "postgresql.jar"
jdbc_symlink_name = "postgres-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:postgresql://{xa_db_host}/{xa_audit_db_name}')
jdbc_driver = "org.postgresql.Driver"
elif xa_audit_db_flavor == 'mssql':
jdbc_jar_name = "sqljdbc4.jar"
jdbc_symlink_name = "mssql-jdbc-driver.jar"
audit_jdbc_url = format('jdbc:sqlserver://{xa_db_host};databaseName={xa_audit_db_name}')
jdbc_driver = "com.microsoft.sqlserver.jdbc.SQLServerDriver"
elif xa_audit_db_flavor == 'sqla':
jdbc_jar_name = "sajdbc4.jar"
jdbc_symlink_name = "sqlanywhere-jdbc-driver.tar.gz"
audit_jdbc_url = format('jdbc:sqlanywhere:database={xa_audit_db_name};host={xa_db_host}')
jdbc_driver = "sap.jdbc4.sqlanywhere.IDriver"
downloaded_custom_connector = format("{tmp_dir}/{jdbc_jar_name}")
driver_curl_source = format("{jdk_location}/{jdbc_symlink_name}")
driver_curl_target = format("{hadoop_lib_home}/{jdbc_jar_name}")
hdfs_ranger_plugin_config = {
'username': repo_config_username,
'password': repo_config_password,
'hadoop.security.authentication': hadoop_security_authentication,
'hadoop.security.authorization': hadoop_security_authorization,
'fs.default.name': fs_default_name,
'hadoop.security.auth_to_local': hadoop_security_auth_to_local,
'hadoop.rpc.protection': hadoop_rpc_protection,
'commonNameForCertificate': common_name_for_certificate,
'dfs.datanode.kerberos.principal': dn_principal_name if security_enabled else '',
'dfs.namenode.kerberos.principal': nn_principal_name if security_enabled else '',
'dfs.secondary.namenode.kerberos.principal': sn_principal_name if security_enabled else ''
}
hdfs_ranger_plugin_repo = {
'isActive': 'true',
'config': json.dumps(hdfs_ranger_plugin_config),
'description': 'hdfs repo',
'name': repo_name,
'repositoryType': 'hdfs',
'assetType': '1'
}
ranger_audit_solr_urls = config['configurations']['ranger-admin-site']['ranger.audit.solr.urls']
xa_audit_db_is_enabled = config['configurations']['ranger-hdfs-audit']['xasecure.audit.destination.db'] if xml_configurations_supported else None
ssl_keystore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.keystore.password']) if xml_configurations_supported else None
ssl_truststore_password = unicode(config['configurations']['ranger-hdfs-policymgr-ssl']['xasecure.policymgr.clientssl.truststore.password']) if xml_configurations_supported else None
credential_file = format('/etc/ranger/{repo_name}/cred.jceks') if xml_configurations_supported else None
#For SQLA explicitly disable audit to DB for Ranger
if xa_audit_db_flavor == 'sqla':
xa_audit_db_is_enabled = False
|
|
from __future__ import absolute_import
from sentry.utils.compat import zip
__all__ = (
"TestCase",
"TransactionTestCase",
"APITestCase",
"TwoFactorAPITestCase",
"AuthProviderTestCase",
"RuleTestCase",
"PermissionTestCase",
"PluginTestCase",
"CliTestCase",
"AcceptanceTestCase",
"IntegrationTestCase",
"SnubaTestCase",
"BaseIncidentsTest",
"IntegrationRepositoryTestCase",
"ReleaseCommitPatchTest",
"SetRefsTestCase",
"OrganizationDashboardWidgetTestCase",
)
import os
import os.path
import pytest
import requests
import six
import time
import inspect
from uuid import uuid4
from contextlib import contextmanager
from sentry.utils.compat import mock
from click.testing import CliRunner
from datetime import datetime
from django.conf import settings
from django.contrib.auth import login
from django.contrib.auth.models import AnonymousUser
from django.core import signing
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import connections, DEFAULT_DB_ALIAS
from django.http import HttpRequest
from django.test import override_settings, TestCase, TransactionTestCase
from django.test.utils import CaptureQueriesContext
from django.utils import timezone
from django.utils.functional import cached_property
from exam import before, fixture, Exam
from sentry.utils.compat.mock import patch
from pkg_resources import iter_entry_points
from rest_framework.test import APITestCase as BaseAPITestCase
from six.moves.urllib.parse import urlencode
from sentry import auth
from sentry import eventstore
from sentry.auth.authenticators import TotpInterface
from sentry.auth.providers.dummy import DummyProvider
from sentry.auth.superuser import (
Superuser,
COOKIE_SALT as SU_COOKIE_SALT,
COOKIE_NAME as SU_COOKIE_NAME,
ORG_ID as SU_ORG_ID,
COOKIE_SECURE as SU_COOKIE_SECURE,
COOKIE_DOMAIN as SU_COOKIE_DOMAIN,
COOKIE_PATH as SU_COOKIE_PATH,
)
from sentry.constants import MODULE_ROOT
from sentry.eventstream.snuba import SnubaEventStream
from sentry.models import (
GroupMeta,
ProjectOption,
Repository,
DeletedOrganization,
Organization,
Dashboard,
DashboardWidgetQuery,
)
from sentry.plugins.base import plugins
from sentry.rules import EventState
from sentry.tagstore.snuba import SnubaTagStorage
from sentry.utils import json
from sentry.utils.auth import SSO_SESSION_KEY
from sentry.testutils.helpers.datetime import iso_format
from sentry.utils.retries import TimedRetryPolicy
from .fixtures import Fixtures
from .factories import Factories
from .skips import requires_snuba
from .helpers import AuthProvider, Feature, TaskRunner, override_options, parse_queries
DEFAULT_USER_AGENT = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
class BaseTestCase(Fixtures, Exam):
def assertRequiresAuthentication(self, path, method="GET"):
resp = getattr(self.client, method.lower())(path)
assert resp.status_code == 302
assert resp["Location"].startswith("http://testserver" + reverse("sentry-login"))
@before
def setup_dummy_auth_provider(self):
auth.register("dummy", DummyProvider)
self.addCleanup(auth.unregister, "dummy", DummyProvider)
def tasks(self):
return TaskRunner()
@classmethod
@contextmanager
def capture_on_commit_callbacks(cls, using=DEFAULT_DB_ALIAS, execute=False):
"""
Context manager to capture transaction.on_commit() callbacks.
Backported from Django:
https://github.com/django/django/pull/12944
"""
callbacks = []
start_count = len(connections[using].run_on_commit)
try:
yield callbacks
finally:
run_on_commit = connections[using].run_on_commit[start_count:]
callbacks[:] = [func for sids, func in run_on_commit]
if execute:
for callback in callbacks:
callback()
def feature(self, names):
"""
>>> with self.feature({'feature:name': True})
>>> # ...
"""
return Feature(names)
def auth_provider(self, name, cls):
"""
>>> with self.auth_provider('name', Provider)
>>> # ...
"""
return AuthProvider(name, cls)
def save_session(self):
self.session.save()
self.save_cookie(
name=settings.SESSION_COOKIE_NAME,
value=self.session.session_key,
max_age=None,
path="/",
domain=settings.SESSION_COOKIE_DOMAIN,
secure=settings.SESSION_COOKIE_SECURE or None,
expires=None,
)
def save_cookie(self, name, value, **params):
self.client.cookies[name] = value
self.client.cookies[name].update({k.replace("_", "-"): v for k, v in six.iteritems(params)})
def make_request(self, user=None, auth=None, method=None):
request = HttpRequest()
if method:
request.method = method
request.META["REMOTE_ADDR"] = "127.0.0.1"
request.META["SERVER_NAME"] = "testserver"
request.META["SERVER_PORT"] = 80
request.GET = {}
request.POST = {}
# order matters here, session -> user -> other things
request.session = self.session
request.auth = auth
request.user = user or AnonymousUser()
request.superuser = Superuser(request)
request.is_superuser = lambda: request.superuser.is_active
request.successful_authenticator = None
return request
# TODO(dcramer): ideally superuser_sso would be False by default, but that would require
# a lot of tests changing
@TimedRetryPolicy.wrap(timeout=5)
def login_as(
self, user, organization_id=None, organization_ids=None, superuser=False, superuser_sso=True
):
user.backend = settings.AUTHENTICATION_BACKENDS[0]
request = self.make_request()
login(request, user)
request.user = user
if organization_ids is None:
organization_ids = set()
else:
organization_ids = set(organization_ids)
if superuser and superuser_sso is not False:
if SU_ORG_ID:
organization_ids.add(SU_ORG_ID)
if organization_id:
organization_ids.add(organization_id)
# TODO(dcramer): ideally this would get abstracted
if organization_ids:
request.session[SSO_SESSION_KEY] = ",".join(six.text_type(o) for o in organization_ids)
# logging in implicitly binds superuser, but for test cases we
# want that action to be explicit to avoid accidentally testing
# superuser-only code
if not superuser:
# XXX(dcramer): we're calling the internal method to avoid logging
request.superuser._set_logged_out()
elif request.user.is_superuser and superuser:
request.superuser.set_logged_in(request.user)
# XXX(dcramer): awful hack to ensure future attempts to instantiate
# the Superuser object are successful
self.save_cookie(
name=SU_COOKIE_NAME,
value=signing.get_cookie_signer(salt=SU_COOKIE_NAME + SU_COOKIE_SALT).sign(
request.superuser.token
),
max_age=None,
path=SU_COOKIE_PATH,
domain=SU_COOKIE_DOMAIN,
secure=SU_COOKIE_SECURE or None,
expires=None,
)
# Save the session values.
self.save_session()
def load_fixture(self, filepath):
filepath = os.path.join(MODULE_ROOT, os.pardir, os.pardir, "tests", "fixtures", filepath)
with open(filepath, "rb") as fp:
return fp.read()
def _pre_setup(self):
super(BaseTestCase, self)._pre_setup()
cache.clear()
ProjectOption.objects.clear_local_cache()
GroupMeta.objects.clear_local_cache()
def _post_teardown(self):
super(BaseTestCase, self)._post_teardown()
def options(self, options):
"""
A context manager that temporarily sets a global option and reverts
back to the original value when exiting the context.
"""
return override_options(options)
def assert_valid_deleted_log(self, deleted_log, original_object):
assert deleted_log is not None
assert original_object.name == deleted_log.name
assert deleted_log.name == original_object.name
assert deleted_log.slug == original_object.slug
if not isinstance(deleted_log, DeletedOrganization):
assert deleted_log.organization_id == original_object.organization.id
assert deleted_log.organization_name == original_object.organization.name
assert deleted_log.organization_slug == original_object.organization.slug
assert deleted_log.date_created == original_object.date_added
assert deleted_log.date_deleted >= deleted_log.date_created
def assertWriteQueries(self, queries, debug=False, *args, **kwargs):
func = kwargs.pop("func", None)
using = kwargs.pop("using", DEFAULT_DB_ALIAS)
conn = connections[using]
context = _AssertQueriesContext(self, queries, debug, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
class _AssertQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, queries, debug, connection):
self.test_case = test_case
self.queries = queries
self.debug = debug
super(_AssertQueriesContext, self).__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super(_AssertQueriesContext, self).__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
parsed_queries = parse_queries(self.captured_queries)
if self.debug:
import pprint
pprint.pprint("====================== Raw Queries ======================")
pprint.pprint(self.captured_queries)
pprint.pprint("====================== Table writes ======================")
pprint.pprint(parsed_queries)
for table, num in parsed_queries.items():
expected = self.queries.get(table, 0)
if expected == 0:
import pprint
pprint.pprint(
"WARNING: no query against %s emitted, add debug=True to see all the queries"
% (table)
)
else:
self.test_case.assertTrue(
num == expected,
"%d write queries expected on `%s`, got %d, add debug=True to see all the queries"
% (expected, table, num),
)
for table, num in self.queries.items():
executed = parsed_queries.get(table, None)
self.test_case.assertFalse(
executed is None,
"no query against %s emitted, add debug=True to see all the queries" % (table),
)
@override_settings(ROOT_URLCONF="sentry.web.urls")
class TestCase(BaseTestCase, TestCase):
pass
class TransactionTestCase(BaseTestCase, TransactionTestCase):
pass
class APITestCase(BaseTestCase, BaseAPITestCase):
endpoint = None
method = "get"
def get_response(self, *args, **params):
if self.endpoint is None:
raise Exception("Implement self.endpoint to use this method.")
url = reverse(self.endpoint, args=args)
# In some cases we want to pass querystring params to put/post, handle
# this here.
if "qs_params" in params:
query_string = urlencode(params.pop("qs_params"), doseq=True)
url = u"{}?{}".format(url, query_string)
method = params.pop("method", self.method)
return getattr(self.client, method)(url, format="json", data=params)
def get_valid_response(self, *args, **params):
status_code = params.pop("status_code", 200)
resp = self.get_response(*args, **params)
assert resp.status_code == status_code, (resp.status_code, resp.content)
return resp
class TwoFactorAPITestCase(APITestCase):
@fixture
def path_2fa(self):
return reverse("sentry-account-settings-security")
def enable_org_2fa(self, organization):
organization.flags.require_2fa = True
organization.save()
def api_enable_org_2fa(self, organization, user):
self.login_as(user)
url = reverse(
"sentry-api-0-organization-details", kwargs={"organization_slug": organization.slug}
)
return self.client.put(url, data={"require2FA": True})
def api_disable_org_2fa(self, organization, user):
url = reverse(
"sentry-api-0-organization-details", kwargs={"organization_slug": organization.slug}
)
return self.client.put(url, data={"require2FA": False})
def assert_can_enable_org_2fa(self, organization, user, status_code=200):
self.__helper_enable_organization_2fa(organization, user, status_code)
def assert_cannot_enable_org_2fa(self, organization, user, status_code, err_msg=None):
self.__helper_enable_organization_2fa(organization, user, status_code, err_msg)
def __helper_enable_organization_2fa(self, organization, user, status_code, err_msg=None):
response = self.api_enable_org_2fa(organization, user)
assert response.status_code == status_code
if err_msg:
assert err_msg.encode("utf-8") in response.content
organization = Organization.objects.get(id=organization.id)
if status_code >= 200 and status_code < 300:
assert organization.flags.require_2fa
else:
assert not organization.flags.require_2fa
def add_2fa_users_to_org(self, organization, num_of_users=10, num_with_2fa=5):
non_compliant_members = []
for num in range(0, num_of_users):
user = self.create_user("foo_%s@example.com" % num)
self.create_member(organization=organization, user=user)
if num_with_2fa:
TotpInterface().enroll(user)
num_with_2fa -= 1
else:
non_compliant_members.append(user.email)
return non_compliant_members
class AuthProviderTestCase(TestCase):
provider = DummyProvider
provider_name = "dummy"
def setUp(self):
super(AuthProviderTestCase, self).setUp()
# TestCase automatically sets up dummy provider
if self.provider_name != "dummy" or self.provider != DummyProvider:
auth.register(self.provider_name, self.provider)
self.addCleanup(auth.unregister, self.provider_name, self.provider)
class RuleTestCase(TestCase):
rule_cls = None
def get_event(self):
return self.event
def get_rule(self, **kwargs):
kwargs.setdefault("project", self.project)
kwargs.setdefault("data", {})
return self.rule_cls(**kwargs)
def get_state(self, **kwargs):
kwargs.setdefault("is_new", True)
kwargs.setdefault("is_regression", True)
kwargs.setdefault("is_new_group_environment", True)
kwargs.setdefault("has_reappeared", True)
return EventState(**kwargs)
def assertPasses(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is True
def assertDoesNotPass(self, rule, event=None, **kwargs):
if event is None:
event = self.event
state = self.get_state(**kwargs)
assert rule.passes(event, state) is False
class PermissionTestCase(TestCase):
def setUp(self):
super(PermissionTestCase, self).setUp()
self.owner = self.create_user(is_superuser=False)
self.organization = self.create_organization(
owner=self.owner, flags=0 # disable default allow_joinleave access
)
self.team = self.create_team(organization=self.organization)
def assert_can_access(self, user, path, method="GET", **kwargs):
self.login_as(user, superuser=user.is_superuser)
resp = getattr(self.client, method.lower())(path, **kwargs)
assert resp.status_code >= 200 and resp.status_code < 300
def assert_cannot_access(self, user, path, method="GET", **kwargs):
self.login_as(user, superuser=user.is_superuser)
resp = getattr(self.client, method.lower())(path, **kwargs)
assert resp.status_code >= 300
def assert_member_can_access(self, path, **kwargs):
return self.assert_role_can_access(path, "member", **kwargs)
def assert_teamless_member_can_access(self, path, **kwargs):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="member", teams=[])
self.assert_can_access(user, path, **kwargs)
def assert_member_cannot_access(self, path, **kwargs):
return self.assert_role_cannot_access(path, "member", **kwargs)
def assert_manager_cannot_access(self, path, **kwargs):
return self.assert_role_cannot_access(path, "manager", **kwargs)
def assert_teamless_member_cannot_access(self, path, **kwargs):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="member", teams=[])
self.assert_cannot_access(user, path, **kwargs)
def assert_team_admin_can_access(self, path, **kwargs):
return self.assert_role_can_access(path, "admin", **kwargs)
def assert_teamless_admin_can_access(self, path, **kwargs):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="admin", teams=[])
self.assert_can_access(user, path, **kwargs)
def assert_team_admin_cannot_access(self, path, **kwargs):
return self.assert_role_cannot_access(path, "admin", **kwargs)
def assert_teamless_admin_cannot_access(self, path, **kwargs):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="admin", teams=[])
self.assert_cannot_access(user, path, **kwargs)
def assert_team_owner_can_access(self, path, **kwargs):
return self.assert_role_can_access(path, "owner", **kwargs)
def assert_owner_can_access(self, path, **kwargs):
return self.assert_role_can_access(path, "owner", **kwargs)
def assert_owner_cannot_access(self, path, **kwargs):
return self.assert_role_cannot_access(path, "owner", **kwargs)
def assert_non_member_cannot_access(self, path, **kwargs):
user = self.create_user(is_superuser=False)
self.assert_cannot_access(user, path, **kwargs)
def assert_role_can_access(self, path, role, **kwargs):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role=role, teams=[self.team])
self.assert_can_access(user, path, **kwargs)
def assert_role_cannot_access(self, path, role, **kwargs):
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role=role, teams=[self.team])
self.assert_cannot_access(user, path, **kwargs)
class PluginTestCase(TestCase):
plugin = None
def setUp(self):
super(PluginTestCase, self).setUp()
# Old plugins, plugin is a class, new plugins, it's an instance
# New plugins don't need to be registered
if inspect.isclass(self.plugin):
plugins.register(self.plugin)
self.addCleanup(plugins.unregister, self.plugin)
def assertAppInstalled(self, name, path):
for ep in iter_entry_points("sentry.apps"):
if ep.name == name:
ep_path = ep.module_name
if ep_path == path:
return
self.fail(
"Found app in entry_points, but wrong class. Got %r, expected %r"
% (ep_path, path)
)
self.fail("Missing app from entry_points: %r" % (name,))
def assertPluginInstalled(self, name, plugin):
path = type(plugin).__module__ + ":" + type(plugin).__name__
for ep in iter_entry_points("sentry.plugins"):
if ep.name == name:
ep_path = ep.module_name + ":" + ".".join(ep.attrs)
if ep_path == path:
return
self.fail(
"Found plugin in entry_points, but wrong class. Got %r, expected %r"
% (ep_path, path)
)
self.fail("Missing plugin from entry_points: %r" % (name,))
class CliTestCase(TestCase):
runner = fixture(CliRunner)
command = None
default_args = []
def invoke(self, *args):
args += tuple(self.default_args)
return self.runner.invoke(self.command, args, obj={})
@pytest.mark.usefixtures("browser")
class AcceptanceTestCase(TransactionTestCase):
def setUp(self):
patcher = patch(
"django.utils.timezone.now",
return_value=(datetime(2013, 5, 18, 15, 13, 58, 132928, tzinfo=timezone.utc)),
)
patcher.start()
self.addCleanup(patcher.stop)
super(AcceptanceTestCase, self).setUp()
def save_cookie(self, name, value, **params):
self.browser.save_cookie(name=name, value=value, **params)
def save_session(self):
self.session.save()
self.save_cookie(name=settings.SESSION_COOKIE_NAME, value=self.session.session_key)
# Forward session cookie to django client.
self.client.cookies[settings.SESSION_COOKIE_NAME] = self.session.session_key
def dismiss_assistant(self, which=None):
if which is None:
which = ("issue", "issue_stream")
if isinstance(which, six.string_types):
which = [which]
for item in which:
res = self.client.put(
"/api/0/assistant/?v2",
content_type="application/json",
data=json.dumps({"guide": item, "status": "viewed", "useful": True}),
)
assert res.status_code == 201, res.content
class IntegrationTestCase(TestCase):
provider = None
def setUp(self):
from sentry.integrations.pipeline import IntegrationPipeline
super(IntegrationTestCase, self).setUp()
self.organization = self.create_organization(name="foo", owner=self.user)
self.login_as(self.user)
self.request = self.make_request(self.user)
# XXX(dcramer): this is a bit of a hack, but it helps contain this test
self.pipeline = IntegrationPipeline(
request=self.request, organization=self.organization, provider_key=self.provider.key
)
self.init_path = reverse(
"sentry-organization-integrations-setup",
kwargs={"organization_slug": self.organization.slug, "provider_id": self.provider.key},
)
self.setup_path = reverse(
"sentry-extension-setup", kwargs={"provider_id": self.provider.key}
)
self.configure_path = u"/extensions/{}/configure/".format(self.provider.key)
self.pipeline.initialize()
self.save_session()
def assertDialogSuccess(self, resp):
assert b"window.opener.postMessage(" in resp.content
@pytest.mark.snuba
@requires_snuba
class SnubaTestCase(BaseTestCase):
"""
Mixin for enabling test case classes to talk to snuba
Useful when you are working on acceptance tests or integration
tests that require snuba.
"""
def setUp(self):
super(SnubaTestCase, self).setUp()
self.init_snuba()
@pytest.fixture(autouse=True)
def initialize(self, reset_snuba, call_snuba):
self.call_snuba = call_snuba
def init_snuba(self):
self.snuba_eventstream = SnubaEventStream()
self.snuba_tagstore = SnubaTagStorage()
def store_event(self, *args, **kwargs):
with mock.patch("sentry.eventstream.insert", self.snuba_eventstream.insert):
stored_event = Factories.store_event(*args, **kwargs)
stored_group = stored_event.group
if stored_group is not None:
self.store_group(stored_group)
return stored_event
def wait_for_event_count(self, project_id, total, attempts=2):
"""
Wait until the event count reaches the provided value or until attempts is reached.
Useful when you're storing several events and need to ensure that snuba/clickhouse
state has settled.
"""
# Verify that events have settled in snuba's storage.
# While snuba is synchronous, clickhouse isn't entirely synchronous.
attempt = 0
snuba_filter = eventstore.Filter(project_ids=[project_id])
while attempt < attempts:
events = eventstore.get_events(snuba_filter)
if len(events) >= total:
break
attempt += 1
time.sleep(0.05)
if attempt == attempts:
assert False, "Could not ensure event was persisted within {} attempt(s)".format(
attempt
)
def store_session(self, session):
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/sessions/insert", data=json.dumps([session])
).status_code
== 200
)
def store_group(self, group):
data = [self.__wrap_group(group)]
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/groupedmessage/insert", data=json.dumps(data)
).status_code
== 200
)
def to_snuba_time_format(self, datetime_value):
date_format = "%Y-%m-%d %H:%M:%S%z"
return datetime_value.strftime(date_format)
def __wrap_group(self, group):
return {
"event": "change",
"kind": "insert",
"table": "sentry_groupedmessage",
"columnnames": [
"id",
"logger",
"level",
"message",
"status",
"times_seen",
"last_seen",
"first_seen",
"data",
"score",
"project_id",
"time_spent_total",
"time_spent_count",
"resolved_at",
"active_at",
"is_public",
"platform",
"num_comments",
"first_release_id",
"short_id",
],
"columnvalues": [
group.id,
group.logger,
group.level,
group.message,
group.status,
group.times_seen,
self.to_snuba_time_format(group.last_seen),
self.to_snuba_time_format(group.first_seen),
group.data,
group.score,
group.project.id,
group.time_spent_total,
group.time_spent_count,
group.resolved_at,
self.to_snuba_time_format(group.active_at),
group.is_public,
group.platform,
group.num_comments,
group.first_release.id if group.first_release else None,
group.short_id,
],
}
def snuba_insert(self, events):
"Write a (wrapped) event (or events) to Snuba."
if not isinstance(events, list):
events = [events]
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/events/insert", data=json.dumps(events)
).status_code
== 200
)
class BaseIncidentsTest(SnubaTestCase):
def create_event(self, timestamp, fingerprint=None, user=None):
event_id = uuid4().hex
if fingerprint is None:
fingerprint = event_id
data = {
"event_id": event_id,
"fingerprint": [fingerprint],
"timestamp": iso_format(timestamp),
"type": "error",
# This is necessary because event type error should not exist without
# an exception being in the payload
"exception": [{"type": "Foo"}],
}
if user:
data["user"] = user
return self.store_event(data=data, project_id=self.project.id)
@cached_property
def now(self):
return timezone.now().replace(minute=0, second=0, microsecond=0)
@pytest.mark.snuba
@requires_snuba
class OutcomesSnubaTest(TestCase):
def setUp(self):
super(OutcomesSnubaTest, self).setUp()
assert requests.post(settings.SENTRY_SNUBA + "/tests/outcomes/drop").status_code == 200
def __format(self, org_id, project_id, outcome, timestamp, key_id):
return {
"project_id": project_id,
"timestamp": timestamp.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"org_id": org_id,
"reason": None,
"key_id": key_id,
"outcome": outcome,
}
def store_outcomes(self, org_id, project_id, outcome, timestamp, key_id, num_times):
outcomes = []
for _ in range(num_times):
outcomes.append(self.__format(org_id, project_id, outcome, timestamp, key_id))
assert (
requests.post(
settings.SENTRY_SNUBA + "/tests/outcomes/insert", data=json.dumps(outcomes)
).status_code
== 200
)
class IntegrationRepositoryTestCase(APITestCase):
def setUp(self):
super(IntegrationRepositoryTestCase, self).setUp()
self.login_as(self.user)
def add_create_repository_responses(self, repository_config):
raise NotImplementedError
def create_repository(
self, repository_config, integration_id, organization_slug=None, add_responses=True
):
if add_responses:
self.add_create_repository_responses(repository_config)
if not integration_id:
data = {"provider": self.provider_name, "identifier": repository_config["id"]}
else:
data = {
"provider": self.provider_name,
"installation": integration_id,
"identifier": repository_config["id"],
}
response = self.client.post(
path=reverse(
"sentry-api-0-organization-repositories",
args=[organization_slug or self.organization.slug],
),
data=data,
)
return response
def assert_error_message(self, response, error_type, error_message):
assert response.data["error_type"] == error_type
assert error_message in response.data["errors"]["__all__"]
class ReleaseCommitPatchTest(APITestCase):
def setUp(self):
user = self.create_user(is_staff=False, is_superuser=False)
self.org = self.create_organization()
self.org.save()
team = self.create_team(organization=self.org)
self.project = self.create_project(name="foo", organization=self.org, teams=[team])
self.create_member(teams=[team], user=user, organization=self.org)
self.login_as(user=user)
@fixture
def url(self):
raise NotImplementedError
def assert_commit(self, commit, repo_id, key, author_id, message):
assert commit.organization_id == self.org.id
assert commit.repository_id == repo_id
assert commit.key == key
assert commit.author_id == author_id
assert commit.message == message
def assert_file_change(self, file_change, type, filename, commit_id):
assert file_change.type == type
assert file_change.filename == filename
assert file_change.commit_id == commit_id
class SetRefsTestCase(APITestCase):
def setUp(self):
super(SetRefsTestCase, self).setUp()
self.user = self.create_user(is_staff=False, is_superuser=False)
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(name="foo", organization=self.org, teams=[self.team])
self.create_member(teams=[self.team], user=self.user, organization=self.org)
self.login_as(user=self.user)
self.group = self.create_group(project=self.project)
self.repo = Repository.objects.create(organization_id=self.org.id, name="test/repo")
def assert_fetch_commits(self, mock_fetch_commit, prev_release_id, release_id, refs):
assert len(mock_fetch_commit.method_calls) == 1
kwargs = mock_fetch_commit.method_calls[0][2]["kwargs"]
assert kwargs == {
"prev_release_id": prev_release_id,
"refs": refs,
"release_id": release_id,
"user_id": self.user.id,
}
def assert_head_commit(self, head_commit, commit_key, release_id=None):
assert self.org.id == head_commit.organization_id
assert self.repo.id == head_commit.repository_id
if release_id:
assert release_id == head_commit.release_id
else:
assert self.release.id == head_commit.release_id
self.assert_commit(head_commit.commit, commit_key)
def assert_commit(self, commit, key):
assert self.org.id == commit.organization_id
assert self.repo.id == commit.repository_id
assert commit.key == key
class OrganizationDashboardWidgetTestCase(APITestCase):
def setUp(self):
super(OrganizationDashboardWidgetTestCase, self).setUp()
self.login_as(self.user)
self.dashboard = Dashboard.objects.create(
title="Dashboard 1", created_by=self.user, organization=self.organization
)
self.anon_users_query = {
"name": "Anonymous Users",
"fields": ["count()"],
"conditions": "!has:user.email",
}
self.known_users_query = {
"name": "Known Users",
"fields": ["count_unique(user.email)"],
"conditions": "has:user.email",
}
self.geo_errors_query = {
"name": "Errors by Geo",
"fields": ["count()", "geo.country_code"],
"conditions": "has:geo.country_code",
}
def assert_widget_queries(self, widget_id, data):
result_queries = DashboardWidgetQuery.objects.filter(widget_id=widget_id).order_by("order")
for ds, expected_ds in zip(result_queries, data):
assert ds.name == expected_ds["name"]
assert ds.fields == expected_ds["fields"]
assert ds.conditions == expected_ds["conditions"]
def assert_widget(self, widget, order, title, display_type, queries=None):
assert widget.order == order
assert widget.display_type == display_type
assert widget.title == title
if not queries:
return
self.assert_widget_queries(widget.id, queries)
def assert_widget_data(self, data, title, display_type, queries=None):
assert data["displayType"] == display_type
assert data["title"] == title
if not queries:
return
self.assert_widget_queries(data["id"], queries)
|
|
"""
HTML Widget classes
"""
try:
set # Only available in Python 2.4+
except NameError:
from sets import Set as set # Python 2.3 fallback
from itertools import chain
from django.utils.datastructures import MultiValueDict
from django.utils.html import escape
from django.utils.translation import gettext
from django.utils.encoding import StrAndUnicode, smart_unicode
from util import flatatt
__all__ = (
'Widget', 'TextInput', 'PasswordInput',
'HiddenInput', 'MultipleHiddenInput',
'FileInput', 'Textarea', 'CheckboxInput',
'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect',
'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget',
)
class Widget(object):
is_hidden = False # Determines whether this corresponds to an <input type="hidden">.
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
def render(self, name, value, attrs=None):
"""
Returns this Widget rendered as HTML, as a Unicode string.
The 'value' given is not guaranteed to be valid input, so subclass
implementations should program defensively.
"""
raise NotImplementedError
def build_attrs(self, extra_attrs=None, **kwargs):
"Helper function for building an attribute dictionary."
attrs = dict(self.attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
return attrs
def value_from_datadict(self, data, name):
"""
Given a dictionary of data and this widget's name, returns the value
of this widget. Returns None if it's not provided.
"""
return data.get(name, None)
def id_for_label(self, id_):
"""
Returns the HTML ID attribute of this Widget for use by a <label>,
given the ID of the field. Returns None if no ID is available.
This hook is necessary because some widgets have multiple HTML
elements and, thus, multiple IDs. In that case, this method should
return an ID value that corresponds to the first ID in the widget's
tags.
"""
return id_
id_for_label = classmethod(id_for_label)
class Input(Widget):
"""
Base class for all <input> widgets (except type='checkbox' and
type='radio', which are special).
"""
input_type = None # Subclasses must define this.
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '': final_attrs['value'] = smart_unicode(value) # Only add the 'value' attribute if a value is non-empty.
return u'<input%s />' % flatatt(final_attrs)
class TextInput(Input):
input_type = 'text'
class PasswordInput(Input):
input_type = 'password'
def __init__(self, attrs=None, render_value=True):
self.attrs = attrs or {}
self.render_value = render_value
def render(self, name, value, attrs=None):
if not self.render_value: value=None
return super(PasswordInput, self).render(name, value, attrs)
class HiddenInput(Input):
input_type = 'hidden'
is_hidden = True
class MultipleHiddenInput(HiddenInput):
"""
A widget that handles <input type="hidden"> for fields that have a list
of values.
"""
def __init__(self, attrs=None, choices=()):
# choices can be any iterable
self.attrs = attrs or {}
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
return u'\n'.join([(u'<input%s />' % flatatt(dict(value=smart_unicode(v), **final_attrs))) for v in value])
def value_from_datadict(self, data, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name, None)
class FileInput(Input):
input_type = 'file'
class Textarea(Widget):
def __init__(self, attrs=None):
# The 'rows' and 'cols' attributes are required for HTML correctness.
self.attrs = {'cols': '40', 'rows': '10'}
if attrs:
self.attrs.update(attrs)
def render(self, name, value, attrs=None):
if value is None: value = ''
value = smart_unicode(value)
final_attrs = self.build_attrs(attrs, name=name)
return u'<textarea%s>%s</textarea>' % (flatatt(final_attrs), escape(value))
class CheckboxInput(Widget):
def __init__(self, attrs=None, check_test=bool):
# check_test is a callable that takes a value and returns True
# if the checkbox should be checked for that value.
self.attrs = attrs or {}
self.check_test = check_test
def render(self, name, value, attrs=None):
final_attrs = self.build_attrs(attrs, type='checkbox', name=name)
try:
result = self.check_test(value)
except: # Silently catch exceptions
result = False
if result:
final_attrs['checked'] = 'checked'
if value not in ('', True, False, None):
final_attrs['value'] = smart_unicode(value) # Only add the 'value' attribute if a value is non-empty.
return u'<input%s />' % flatatt(final_attrs)
class Select(Widget):
def __init__(self, attrs=None, choices=()):
self.attrs = attrs or {}
# choices can be any iterable, but we may need to render this widget
# multiple times. Thus, collapse it into a list so it can be consumed
# more than once.
self.choices = list(choices)
def render(self, name, value, attrs=None, choices=()):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select%s>' % flatatt(final_attrs)]
str_value = smart_unicode(value) # Normalize to string.
for option_value, option_label in chain(self.choices, choices):
option_value = smart_unicode(option_value)
selected_html = (option_value == str_value) and u' selected="selected"' or ''
output.append(u'<option value="%s"%s>%s</option>' % (escape(option_value), selected_html, escape(smart_unicode(option_label))))
output.append(u'</select>')
return u'\n'.join(output)
class NullBooleanSelect(Select):
"""
A Select Widget intended to be used with NullBooleanField.
"""
def __init__(self, attrs=None):
choices = ((u'1', gettext('Unknown')), (u'2', gettext('Yes')), (u'3', gettext('No')))
super(NullBooleanSelect, self).__init__(attrs, choices)
def render(self, name, value, attrs=None, choices=()):
try:
value = {True: u'2', False: u'3', u'2': u'2', u'3': u'3'}[value]
except KeyError:
value = u'1'
return super(NullBooleanSelect, self).render(name, value, attrs, choices)
def value_from_datadict(self, data, name):
value = data.get(name, None)
return {u'2': True, u'3': False, True: True, False: False}.get(value, None)
class SelectMultiple(Widget):
def __init__(self, attrs=None, choices=()):
# choices can be any iterable
self.attrs = attrs or {}
self.choices = choices
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)]
str_values = set([smart_unicode(v) for v in value]) # Normalize to strings.
for option_value, option_label in chain(self.choices, choices):
option_value = smart_unicode(option_value)
selected_html = (option_value in str_values) and ' selected="selected"' or ''
output.append(u'<option value="%s"%s>%s</option>' % (escape(option_value), selected_html, escape(smart_unicode(option_label))))
output.append(u'</select>')
return u'\n'.join(output)
def value_from_datadict(self, data, name):
if isinstance(data, MultiValueDict):
return data.getlist(name)
return data.get(name, None)
class RadioInput(StrAndUnicode):
"An object used by RadioFieldRenderer that represents a single <input type='radio'>."
def __init__(self, name, value, attrs, choice, index):
self.name, self.value = name, value
self.attrs = attrs
self.choice_value = smart_unicode(choice[0])
self.choice_label = smart_unicode(choice[1])
self.index = index
def __unicode__(self):
return u'<label>%s %s</label>' % (self.tag(), self.choice_label)
def is_checked(self):
return self.value == self.choice_value
def tag(self):
if 'id' in self.attrs:
self.attrs['id'] = '%s_%s' % (self.attrs['id'], self.index)
final_attrs = dict(self.attrs, type='radio', name=self.name, value=self.choice_value)
if self.is_checked():
final_attrs['checked'] = 'checked'
return u'<input%s />' % flatatt(final_attrs)
class RadioFieldRenderer(StrAndUnicode):
"An object used by RadioSelect to enable customization of radio widgets."
def __init__(self, name, value, attrs, choices):
self.name, self.value, self.attrs = name, value, attrs
self.choices = choices
def __iter__(self):
for i, choice in enumerate(self.choices):
yield RadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return RadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def __unicode__(self):
"Outputs a <ul> for this set of radio fields."
return u'<ul>\n%s\n</ul>' % u'\n'.join([u'<li>%s</li>' % w for w in self])
class RadioSelect(Select):
def render(self, name, value, attrs=None, choices=()):
"Returns a RadioFieldRenderer instance rather than a Unicode string."
if value is None: value = ''
str_value = smart_unicode(value) # Normalize to string.
final_attrs = self.build_attrs(attrs)
return RadioFieldRenderer(name, str_value, final_attrs, list(chain(self.choices, choices)))
def id_for_label(self, id_):
# RadioSelect is represented by multiple <input type="radio"> fields,
# each of which has a distinct ID. The IDs are made distinct by a "_X"
# suffix, where X is the zero-based index of the radio field. Thus,
# the label for a RadioSelect should reference the first one ('_0').
if id_:
id_ += '_0'
return id_
id_for_label = classmethod(id_for_label)
class CheckboxSelectMultiple(SelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None: value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<ul>']
str_values = set([smart_unicode(v) for v in value]) # Normalize to strings.
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
cb = CheckboxInput(final_attrs, check_test=lambda value: value in str_values)
option_value = smart_unicode(option_value)
rendered_cb = cb.render(name, option_value)
output.append(u'<li><label>%s %s</label></li>' % (rendered_cb, escape(smart_unicode(option_label))))
output.append(u'</ul>')
return u'\n'.join(output)
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
id_for_label = classmethod(id_for_label)
class MultiWidget(Widget):
"""
A widget that is composed of multiple widgets.
Its render() method takes a "decompressed" list of values, not a single
value. Each value in this list is rendered in the corresponding widget --
the first value is rendered in the first widget, the second value is
rendered in the second widget, etc.
Subclasses should implement decompress(), which specifies how a single
value should be converted to a list of values. Subclasses should not
have to implement clean().
Subclasses may implement format_output(), which takes the list of rendered
widgets and returns HTML that formats them any way you'd like.
You'll probably want to use this with MultiValueField.
"""
def __init__(self, widgets, attrs=None):
self.widgets = [isinstance(w, type) and w() or w for w in widgets]
super(MultiWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
return self.format_output(output)
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, name):
return [widget.value_from_datadict(data, name + '_%s' % i) for i, widget in enumerate(self.widgets)]
def format_output(self, rendered_widgets):
return u''.join(rendered_widgets)
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
class SplitDateTimeWidget(MultiWidget):
"""
A Widget that splits datetime input into two <input type="text"> boxes.
"""
def __init__(self, attrs=None):
widgets = (TextInput(attrs=attrs), TextInput(attrs=attrs))
super(SplitDateTimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return [value.date(), value.time()]
return [None, None]
|
|
# Copyright (C) 2009-2014 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
r"""usb.core - Core USB features.
This module exports:
Device - a class representing a USB device.
Configuration - a class representing a configuration descriptor.
Interface - a class representing an interface descriptor.
Endpoint - a class representing an endpoint descriptor.
find() - a function to find USB devices.
show_devices() - a function to show the devices present.
"""
__author__ = 'Wander Lairson Costa'
__all__ = [ 'Device', 'Configuration', 'Interface', 'Endpoint', 'find',
'show_devices' ]
import usb.util as util
import copy
import operator
import usb._interop as _interop
import usb._objfinalizer as _objfinalizer
import usb._lookup as _lu
import logging
import array
_logger = logging.getLogger('usb.core')
_DEFAULT_TIMEOUT = 1000
def _set_attr(input, output, fields):
for f in fields:
setattr(output, f, getattr(input, f))
def _try_get_string(dev, index, langid = None, default_str_i0 = "",
default_access_error = "Error Accessing String"):
""" try to get a string, but return a string no matter what
"""
if index == 0 :
string = default_str_i0
else:
try:
if langid is None:
string = util.get_string(dev, index)
else:
string = util.get_string(dev, index, langid)
except :
string = default_access_error
return string
def _try_lookup(table, value, default = ""):
""" try to get a string from the lookup table, return "" instead of key
error
"""
try:
string = table[ value ]
except KeyError:
string = default
return string
class _DescriptorInfo(str):
""" this class is used so that when a descriptor is shown on the
terminal it is propely formatted """
def __repr__(self):
return self
class _ResourceManager(object):
def __init__(self, dev, backend):
self.backend = backend
self._active_cfg_index = None
self.dev = dev
self.handle = None
self._claimed_intf = _interop._set()
self._ep_info = {}
def managed_open(self):
if self.handle is None:
self.handle = self.backend.open_device(self.dev)
return self.handle
def managed_close(self):
if self.handle is not None:
self.backend.close_device(self.handle)
self.handle = None
def managed_set_configuration(self, device, config):
if config is None:
cfg = device[0]
elif isinstance(config, Configuration):
cfg = config
elif config == 0: # unconfigured state
class MockConfiguration(object):
def __init__(self):
self.index = None
self.bConfigurationValue = 0
cfg = MockConfiguration()
else:
cfg = util.find_descriptor(device, bConfigurationValue=config)
self.managed_open()
self.backend.set_configuration(self.handle, cfg.bConfigurationValue)
# cache the index instead of the object to avoid cyclic references
# of the device and Configuration (Device tracks the _ResourceManager,
# which tracks the Configuration, which tracks the Device)
self._active_cfg_index = cfg.index
self._ep_info.clear()
def managed_claim_interface(self, device, intf):
self.managed_open()
if isinstance(intf, Interface):
i = intf.bInterfaceNumber
else:
i = intf
if i not in self._claimed_intf:
self.backend.claim_interface(self.handle, i)
self._claimed_intf.add(i)
def managed_release_interface(self, device, intf):
if intf is None:
cfg = self.get_active_configuration(device)
i = cfg[(0,0)].bInterfaceNumber
elif isinstance(intf, Interface):
i = intf.bInterfaceNumber
else:
i = intf
if i in self._claimed_intf:
try:
self.backend.release_interface(self.handle, i)
finally:
self._claimed_intf.remove(i)
def managed_set_interface(self, device, intf, alt):
if isinstance(intf, Interface):
i = intf
else:
cfg = self.get_active_configuration(device)
if intf is None:
intf = cfg[(0,0)].bInterfaceNumber
if alt is not None:
i = util.find_descriptor(cfg, bInterfaceNumber=intf, bAlternateSetting=alt)
else:
i = util.find_descriptor(cfg, bInterfaceNumber=intf)
self.managed_claim_interface(device, i)
if alt is None:
alt = i.bAlternateSetting
self.backend.set_interface_altsetting(self.handle, i.bInterfaceNumber, alt)
def setup_request(self, device, endpoint):
# we need the endpoint address, but the "endpoint" parameter
# can be either the a Endpoint object or the endpoint address itself
if isinstance(endpoint, Endpoint):
endpoint_address = endpoint.bEndpointAddress
else:
endpoint_address = endpoint
intf, ep = self.get_interface_and_endpoint(device, endpoint_address)
self.managed_claim_interface(device, intf)
return (intf, ep)
# Find the interface and endpoint objects which endpoint address belongs to
def get_interface_and_endpoint(self, device, endpoint_address):
try:
return self._ep_info[endpoint_address]
except KeyError:
for intf in self.get_active_configuration(device):
ep = util.find_descriptor(intf, bEndpointAddress=endpoint_address)
if ep is not None:
self._ep_info[endpoint_address] = (intf, ep)
return intf, ep
raise ValueError('Invalid endpoint address ' + hex(endpoint_address))
def get_active_configuration(self, device):
if self._active_cfg_index is None:
self.managed_open()
cfg = util.find_descriptor(
device,
bConfigurationValue=self.backend.get_configuration(self.handle)
)
if cfg is None:
raise USBError('Configuration not set')
self._active_cfg_index = cfg.index
return cfg
return device[self._active_cfg_index]
def release_all_interfaces(self, device):
claimed = copy.copy(self._claimed_intf)
for i in claimed:
try:
self.managed_release_interface(device, i)
except USBError:
# Ignore errors when releasing the interfaces
# When the device is disconnected, the call may fail
pass
def dispose(self, device, close_handle = True):
self.release_all_interfaces(device)
if close_handle:
self.managed_close()
self._ep_info.clear()
self._active_cfg_index = None
class USBError(IOError):
r"""Exception class for USB errors.
Backends must raise this exception when USB related errors occur. The
backend specific error code is available through the 'backend_error_code'
member variable.
"""
def __init__(self, strerror, error_code = None, errno = None):
r"""Initialize the object.
This initializes the USBError object. The strerror and errno are passed
to the parent object. The error_code parameter is attributed to the
backend_error_code member variable.
"""
IOError.__init__(self, errno, strerror)
self.backend_error_code = error_code
class Endpoint(object):
r"""Represent an endpoint object.
This class contains all fields of the Endpoint Descriptor according to the
USB Specification. You can access them as class properties. For example, to
access the field bEndpointAddress of the endpoint descriptor, you can do so:
>>> import usb.core
>>> dev = usb.core.find()
>>> for cfg in dev:
>>> for i in cfg:
>>> for e in i:
>>> print e.bEndpointAddress
"""
def __init__(self, device, endpoint, interface = 0,
alternate_setting = 0, configuration = 0):
r"""Initialize the Endpoint object.
The device parameter is the device object returned by the find()
function. endpoint is the endpoint logical index (not the endpoint
address). The configuration parameter is the logical index of the
configuration (not the bConfigurationValue field). The interface
parameter is the interface logical index (not the bInterfaceNumber
field) and alternate_setting is the alternate setting logical index
(not the bAlternateSetting value). An interface may have only one
alternate setting. In this case, the alternate_setting parameter
should be zero. By "logical index" we mean the relative order of the
configurations returned by the peripheral as a result of GET_DESCRIPTOR
request.
"""
self.device = device
self.index = endpoint
backend = device._ctx.backend
desc = backend.get_endpoint_descriptor(
device._ctx.dev,
endpoint,
interface,
alternate_setting,
configuration
)
_set_attr(
desc,
self,
(
'bLength',
'bDescriptorType',
'bEndpointAddress',
'bmAttributes',
'wMaxPacketSize',
'bInterval',
'bRefresh',
'bSynchAddress',
'extra_descriptors'
)
)
def __repr__(self):
return "<" + self._str() + ">"
def __str__(self):
headstr = " " + self._str() + " "
if util.endpoint_direction(self.bEndpointAddress) == util.ENDPOINT_IN:
direction = "IN"
else:
direction = "OUT"
return "%s%s\n" % (headstr, "=" * (60 - len(headstr))) + \
" %-17s:%#7x (7 bytes)\n" % (
"bLength", self.bLength) + \
" %-17s:%#7x %s\n" % (
"bDescriptorType", self.bDescriptorType,
_try_lookup(_lu.descriptors, self.bDescriptorType)) + \
" %-17s:%#7x %s\n" % (
"bEndpointAddress", self.bEndpointAddress, direction) + \
" %-17s:%#7x %s\n" % (
"bmAttributes", self.bmAttributes,
_lu.ep_attributes[(self.bmAttributes & 0x3)]) + \
" %-17s:%#7x (%d bytes)\n" % (
"wMaxPacketSize", self.wMaxPacketSize, self.wMaxPacketSize) + \
" %-17s:%#7x" % ("bInterval", self.bInterval)
def write(self, data, timeout = None):
r"""Write data to the endpoint.
The parameter data contains the data to be sent to the endpoint and
timeout is the time limit of the operation. The transfer type and
endpoint address are automatically inferred.
The method returns the number of bytes written.
For details, see the Device.write() method.
"""
return self.device.write(self, data, timeout)
def read(self, size_or_buffer, timeout = None):
r"""Read data from the endpoint.
The parameter size_or_buffer is either the number of bytes to
read or an array object where the data will be put in and timeout is the
time limit of the operation. The transfer type and endpoint address
are automatically inferred.
The method returns either an array object or the number of bytes
actually read.
For details, see the Device.read() method.
"""
return self.device.read(self, size_or_buffer, timeout)
def clear_halt(self):
r"""Clear the halt/status condition of the endpoint."""
self.device.clear_halt(self.bEndpointAddress)
def _str(self):
if util.endpoint_direction(self.bEndpointAddress) == util.ENDPOINT_IN:
direction = "IN"
else:
direction = "OUT"
return (
"ENDPOINT 0x%X: %s %s" % (self.bEndpointAddress,
_lu.ep_attributes[(self.bmAttributes & 0x3)],
direction))
class Interface(object):
r"""Represent an interface object.
This class contains all fields of the Interface Descriptor
according to the USB Specification. You may access them as class
properties. For example, to access the field bInterfaceNumber
of the interface descriptor, you can do so:
>>> import usb.core
>>> dev = usb.core.find()
>>> for cfg in dev:
>>> for i in cfg:
>>> print i.bInterfaceNumber
"""
def __init__(self, device, interface = 0,
alternate_setting = 0, configuration = 0):
r"""Initialize the interface object.
The device parameter is the device object returned by the find()
function. The configuration parameter is the logical index of the
configuration (not the bConfigurationValue field). The interface
parameter is the interface logical index (not the bInterfaceNumber
field) and alternate_setting is the alternate setting logical index
(not the bAlternateSetting value). An interface may have only one
alternate setting. In this case, the alternate_setting parameter
should be zero. By "logical index" we mean the relative order of
the configurations returned by the peripheral as a result of
GET_DESCRIPTOR request.
"""
self.device = device
self.alternate_index = alternate_setting
self.index = interface
self.configuration = configuration
backend = device._ctx.backend
desc = backend.get_interface_descriptor(
self.device._ctx.dev,
interface,
alternate_setting,
configuration
)
_set_attr(
desc,
self,
(
'bLength',
'bDescriptorType',
'bInterfaceNumber',
'bAlternateSetting',
'bNumEndpoints',
'bInterfaceClass',
'bInterfaceSubClass',
'bInterfaceProtocol',
'iInterface',
'extra_descriptors'
)
)
def __repr__(self):
return "<" + self._str() + ">"
def __str__(self):
"""Show all information for the interface."""
string = self._get_full_descriptor_str()
for endpoint in self:
string += "\n" + str(endpoint)
return string
def endpoints(self):
r"""Return a tuple of the interface endpoints."""
return tuple(self)
def set_altsetting(self):
r"""Set the interface alternate setting."""
self.device.set_interface_altsetting(
self.bInterfaceNumber,
self.bAlternateSetting)
def __iter__(self):
r"""Iterate over all endpoints of the interface."""
for i in range(self.bNumEndpoints):
yield Endpoint(
self.device,
i,
self.index,
self.alternate_index,
self.configuration)
def __getitem__(self, index):
r"""Return the Endpoint object in the given position."""
return Endpoint(
self.device,
index,
self.index,
self.alternate_index,
self.configuration)
def _str(self):
if self.bAlternateSetting:
alt_setting = ", %d" % self.bAlternateSetting
else:
alt_setting = ""
return "INTERFACE %d%s: %s" % (self.bInterfaceNumber, alt_setting,
_try_lookup(_lu.interface_classes, self.bInterfaceClass,
default = "Unknown Class"))
def _get_full_descriptor_str(self):
headstr = " " + self._str() + " "
return "%s%s\n" % (headstr, "=" * (60 - len(headstr))) + \
" %-19s:%#7x (9 bytes)\n" % (
"bLength", self.bLength) + \
" %-19s:%#7x %s\n" % (
"bDescriptorType", self.bDescriptorType,
_try_lookup(_lu.descriptors, self.bDescriptorType)) + \
" %-19s:%#7x\n" % (
"bInterfaceNumber", self.bInterfaceNumber) + \
" %-19s:%#7x\n" % (
"bAlternateSetting", self.bAlternateSetting) + \
" %-19s:%#7x\n" % (
"bNumEndpoints", self.bNumEndpoints) + \
" %-19s:%#7x %s\n" % (
"bInterfaceClass", self.bInterfaceClass,
_try_lookup(_lu.interface_classes, self.bInterfaceClass)) + \
" %-19s:%#7x\n" % (
"bInterfaceSubClass", self.bInterfaceSubClass) + \
" %-19s:%#7x\n" % (
"bInterfaceProtocol", self.bInterfaceProtocol) + \
" %-19s:%#7x %s" % (
"iInterface", self.iInterface,
_try_get_string(self.device, self.iInterface))
class Configuration(object):
r"""Represent a configuration object.
This class contains all fields of the Configuration Descriptor according to
the USB Specification. You may access them as class properties. For
example, to access the field bConfigurationValue of the configuration
descriptor, you can do so:
>>> import usb.core
>>> dev = usb.core.find()
>>> for cfg in dev:
>>> print cfg.bConfigurationValue
"""
def __init__(self, device, configuration = 0):
r"""Initialize the configuration object.
The device parameter is the device object returned by the find()
function. The configuration parameter is the logical index of the
configuration (not the bConfigurationValue field). By "logical index"
we mean the relative order of the configurations returned by the
peripheral as a result of GET_DESCRIPTOR request.
"""
self.device = device
self.index = configuration
backend = device._ctx.backend
desc = backend.get_configuration_descriptor(
self.device._ctx.dev,
configuration
)
_set_attr(
desc,
self,
(
'bLength',
'bDescriptorType',
'wTotalLength',
'bNumInterfaces',
'bConfigurationValue',
'iConfiguration',
'bmAttributes',
'bMaxPower',
'extra_descriptors'
)
)
def __repr__(self):
return "<" + self._str() + ">"
def __str__(self):
string = self._get_full_descriptor_str()
for interface in self:
string += "\n%s" % str(interface)
return string
def interfaces(self):
r"""Return a tuple of the configuration interfaces."""
return tuple(self)
def set(self):
r"""Set this configuration as the active one."""
self.device.set_configuration(self.bConfigurationValue)
def __iter__(self):
r"""Iterate over all interfaces of the configuration."""
for i in range(self.bNumInterfaces):
alt = 0
try:
while True:
yield Interface(self.device, i, alt, self.index)
alt += 1
except (USBError, IndexError):
pass
def __getitem__(self, index):
r"""Return the Interface object in the given position.
index is a tuple of two values with interface index and
alternate setting index, respectivally. Example:
>>> interface = config[(0, 0)]
"""
return Interface(self.device, index[0], index[1], self.index)
def _str(self):
return "CONFIGURATION %d: %d mA" % (
self.bConfigurationValue,
_lu.MAX_POWER_UNITS_USB2p0 * self.bMaxPower)
def _get_full_descriptor_str(self):
headstr = " " + self._str() + " "
if self.bmAttributes & (1<<6):
powered = "Self"
else:
powered = "Bus"
if self.bmAttributes & (1<<5):
remote_wakeup = ", Remote Wakeup"
else:
remote_wakeup = ""
return "%s%s\n" % (headstr, "=" * (60 - len(headstr))) + \
" %-21s:%#7x (9 bytes)\n" % (
"bLength", self.bLength) + \
" %-21s:%#7x %s\n" % (
"bDescriptorType", self.bDescriptorType,
_try_lookup(_lu.descriptors, self.bDescriptorType)) + \
" %-21s:%#7x (%d bytes)\n" % (
"wTotalLength", self.wTotalLength, self.wTotalLength) + \
" %-21s:%#7x\n" % (
"bNumInterfaces", self.bNumInterfaces) + \
" %-21s:%#7x\n" % (
"bConfigurationValue", self.bConfigurationValue) + \
" %-21s:%#7x %s\n" % (
"iConfiguration", self.iConfiguration,
_try_get_string(self.device, self.iConfiguration)) + \
" %-21s:%#7x %s Powered%s\n" % (
"bmAttributes", self.bmAttributes, powered, remote_wakeup
# bit 7 is high, bit 4..0 are 0
) + \
" %-21s:%#7x (%d mA)" % (
"bMaxPower", self.bMaxPower,
_lu.MAX_POWER_UNITS_USB2p0 * self.bMaxPower)
# FIXME : add a check for superspeed vs usb 2.0
class Device(_objfinalizer.AutoFinalizedObject):
r"""Device object.
This class contains all fields of the Device Descriptor according to the
USB Specification. You may access them as class properties. For example,
to access the field bDescriptorType of the device descriptor, you can
do so:
>>> import usb.core
>>> dev = usb.core.find()
>>> dev.bDescriptorType
Additionally, the class provides methods to communicate with the hardware.
Typically, an application will first call the set_configuration() method to
put the device in a known configured state, optionally call the
set_interface_altsetting() to select the alternate setting (if there is
more than one) of the interface used, and call the write() and read()
methods to send and receive data, respectively.
When working in a new hardware, the first try could be like this:
>>> import usb.core
>>> dev = usb.core.find(idVendor=myVendorId, idProduct=myProductId)
>>> dev.set_configuration()
>>> dev.write(1, 'test')
This sample finds the device of interest (myVendorId and myProductId should
be replaced by the corresponding values of your device), then configures
the device (by default, the configuration value is 1, which is a typical
value for most devices) and then writes some data to the endpoint 0x01.
Timeout values for the write, read and ctrl_transfer methods are specified
in miliseconds. If the parameter is omitted, Device.default_timeout value
will be used instead. This property can be set by the user at anytime.
"""
def __repr__(self):
return "<" + self._str() + ">"
def __str__(self):
string = self._get_full_descriptor_str()
try:
for configuration in self:
string += "\n%s" % str(configuration)
except USBError:
try:
configuration = self.get_active_configuration()
string += "\n%s" % (configuration.info)
except USBError:
string += " USBError Accessing Configurations"
return string
def configurations(self):
r"""Return a tuple of the device configurations."""
return tuple(self)
def __init__(self, dev, backend):
r"""Initialize the Device object.
Library users should normally get a Device instance through
the find function. The dev parameter is the identification
of a device to the backend and its meaning is opaque outside
of it. The backend parameter is a instance of a backend
object.
"""
self._ctx = _ResourceManager(dev, backend)
self.__default_timeout = _DEFAULT_TIMEOUT
self._serial_number, self._product, self._manufacturer = None, None, None
desc = backend.get_device_descriptor(dev)
_set_attr(
desc,
self,
(
'bLength',
'bDescriptorType',
'bcdUSB',
'bDeviceClass',
'bDeviceSubClass',
'bDeviceProtocol',
'bMaxPacketSize0',
'idVendor',
'idProduct',
'bcdDevice',
'iManufacturer',
'iProduct',
'iSerialNumber',
'bNumConfigurations',
'address',
'bus',
'port_number',
'port_numbers',
'speed',
)
)
if desc.bus is not None:
self.bus = int(desc.bus)
else:
self.bus = None
if desc.address is not None:
self.address = int(desc.address)
else:
self.address = None
if desc.port_number is not None:
self.port_number = int(desc.port_number)
else:
self.port_number = None
if desc.speed is not None:
self.speed = int(desc.speed)
else:
self.speed = None
@property
def serial_number(self):
""" Return the USB device's serial number string descriptor.
This property will cause some USB traffic the first time it is accessed
and cache the resulting value for future use.
"""
if self._serial_number is None:
self._serial_number = util.get_string(self, self.iSerialNumber)
return self._serial_number
@property
def product(self):
""" Return the USB device's product string descriptor.
This property will cause some USB traffic the first time it is accessed
and cache the resulting value for future use.
"""
if self._product is None:
self._product = util.get_string(self, self.iProduct)
return self._product
@property
def manufacturer(self):
""" Return the USB device's manufacturer string descriptor.
This property will cause some USB traffic the first time it is accessed
and cache the resulting value for future use.
"""
if self._manufacturer is None:
self._manufacturer = util.get_string(self, self.iManufacturer)
return self._manufacturer
@property
def backend(self):
"""Return the backend being used by the device."""
return self._ctx.backend
def set_configuration(self, configuration = None):
r"""Set the active configuration.
The configuration parameter is the bConfigurationValue field of the
configuration you want to set as active. If you call this method
without parameter, it will use the first configuration found. As a
device hardly ever has more than one configuration, calling the method
without arguments is enough to get the device ready.
"""
self._ctx.managed_set_configuration(self, configuration)
def get_active_configuration(self):
r"""Return a Configuration object representing the current
configuration set.
"""
return self._ctx.get_active_configuration(self)
def set_interface_altsetting(self, interface = None, alternate_setting = None):
r"""Set the alternate setting for an interface.
When you want to use an interface and it has more than one alternate
setting, you should call this method to select the appropriate
alternate setting. If you call the method without one or the two
parameters, it will be selected the first one found in the Device in
the same way of the set_configuration method.
Commonly, an interface has only one alternate setting and this call is
not necessary. For most devices, either it has more than one
alternate setting or not, it is not harmful to make a call to this
method with no arguments, as devices will silently ignore the request
when there is only one alternate setting, though the USB Spec allows
devices with no additional alternate setting return an error to the
Host in response to a SET_INTERFACE request.
If you are in doubt, you may want to call it with no arguments wrapped
by a try/except clause:
>>> try:
>>> dev.set_interface_altsetting()
>>> except usb.core.USBError:
>>> pass
"""
self._ctx.managed_set_interface(self, interface, alternate_setting)
def clear_halt(self, ep):
r""" Clear the halt/stall condition for the endpoint ep."""
if isinstance(ep, Endpoint):
ep = ep.bEndpointAddress
self._ctx.managed_open()
self._ctx.backend.clear_halt(self._ctx.handle, ep)
def reset(self):
r"""Reset the device."""
self._ctx.managed_open()
self._ctx.dispose(self, False)
self._ctx.backend.reset_device(self._ctx.handle)
self._ctx.dispose(self, True)
def write(self, endpoint, data, timeout = None):
r"""Write data to the endpoint.
This method is used to send data to the device. The endpoint parameter
corresponds to the bEndpointAddress member whose endpoint you want to
communicate with.
The data parameter should be a sequence like type convertible to
the array type (see array module).
The timeout is specified in miliseconds.
The method returns the number of bytes written.
"""
backend = self._ctx.backend
fn_map = {
util.ENDPOINT_TYPE_BULK:backend.bulk_write,
util.ENDPOINT_TYPE_INTR:backend.intr_write,
util.ENDPOINT_TYPE_ISO:backend.iso_write
}
intf, ep = self._ctx.setup_request(self, endpoint)
fn = fn_map[util.endpoint_type(ep.bmAttributes)]
return fn(
self._ctx.handle,
ep.bEndpointAddress,
intf.bInterfaceNumber,
_interop.as_array(data),
self.__get_timeout(timeout)
)
def read(self, endpoint, size_or_buffer, timeout = None):
r"""Read data from the endpoint.
This method is used to receive data from the device. The endpoint
parameter corresponds to the bEndpointAddress member whose endpoint
you want to communicate with. The size_or_buffer parameter either
tells how many bytes you want to read or supplies the buffer to
receive the data (it *must* be an object of the type array).
The timeout is specified in miliseconds.
If the size_or_buffer parameter is the number of bytes to read, the
method returns an array object with the data read. If the
size_or_buffer parameter is an array object, it returns the number
of bytes actually read.
"""
backend = self._ctx.backend
fn_map = {
util.ENDPOINT_TYPE_BULK:backend.bulk_read,
util.ENDPOINT_TYPE_INTR:backend.intr_read,
util.ENDPOINT_TYPE_ISO:backend.iso_read
}
intf, ep = self._ctx.setup_request(self, endpoint)
fn = fn_map[util.endpoint_type(ep.bmAttributes)]
if isinstance(size_or_buffer, array.array):
buff = size_or_buffer
else: # here we consider it is a integer
buff = util.create_buffer(size_or_buffer)
ret = fn(
self._ctx.handle,
ep.bEndpointAddress,
intf.bInterfaceNumber,
buff,
self.__get_timeout(timeout))
if isinstance(size_or_buffer, array.array):
return ret
elif ret != len(buff) * buff.itemsize:
return buff[:ret]
else:
return buff
def ctrl_transfer(self, bmRequestType, bRequest, wValue=0, wIndex=0,
data_or_wLength = None, timeout = None):
r"""Do a control transfer on the endpoint 0.
This method is used to issue a control transfer over the endpoint 0
(endpoint 0 is required to always be a control endpoint).
The parameters bmRequestType, bRequest, wValue and wIndex are the same
of the USB Standard Control Request format.
Control requests may or may not have a data payload to write/read.
In cases which it has, the direction bit of the bmRequestType
field is used to infere the desired request direction. For
host to device requests (OUT), data_or_wLength parameter is
the data payload to send, and it must be a sequence type convertible
to an array object. In this case, the return value is the number
of bytes written in the data payload. For device to host requests
(IN), data_or_wLength is either the wLength parameter of the control
request specifying the number of bytes to read in data payload, and
the return value is an array object with data read, or an array
object which the data will be read to, and the return value is the
number of bytes read.
"""
try:
buff = util.create_buffer(data_or_wLength)
except TypeError:
buff = _interop.as_array(data_or_wLength)
self._ctx.managed_open()
# Thanks to Johannes Stezenbach to point me out that we need to
# claim the recipient interface
recipient = bmRequestType & 3
if recipient == util.CTRL_RECIPIENT_INTERFACE:
interface_number = wIndex & 0xff
self._ctx.managed_claim_interface(self, interface_number)
ret = self._ctx.backend.ctrl_transfer(
self._ctx.handle,
bmRequestType,
bRequest,
wValue,
wIndex,
buff,
self.__get_timeout(timeout))
if isinstance(data_or_wLength, array.array) \
or util.ctrl_direction(bmRequestType) == util.CTRL_OUT:
return ret
elif ret != len(buff) * buff.itemsize:
return buff[:ret]
else:
return buff
def is_kernel_driver_active(self, interface):
r"""Determine if there is kernel driver associated with the interface.
If a kernel driver is active, the object will be unable to perform
I/O.
The interface parameter is the device interface number to check.
"""
self._ctx.managed_open()
return self._ctx.backend.is_kernel_driver_active(
self._ctx.handle,
interface)
def detach_kernel_driver(self, interface):
r"""Detach a kernel driver.
If successful, you will then be able to perform I/O.
The interface parameter is the device interface number to detach the
driver from.
"""
self._ctx.managed_open()
self._ctx.backend.detach_kernel_driver(
self._ctx.handle,
interface)
def attach_kernel_driver(self, interface):
r"""Re-attach an interface's kernel driver, which was previously
detached using detach_kernel_driver().
The interface parameter is the device interface number to attach the
driver to.
"""
self._ctx.managed_open()
self._ctx.backend.attach_kernel_driver(
self._ctx.handle,
interface)
def __iter__(self):
r"""Iterate over all configurations of the device."""
for i in range(self.bNumConfigurations):
yield Configuration(self, i)
def __getitem__(self, index):
r"""Return the Configuration object in the given position."""
return Configuration(self, index)
def _finalize_object(self):
self._ctx.dispose(self)
def __get_timeout(self, timeout):
if timeout is not None:
return timeout
return self.__default_timeout
def __set_def_tmo(self, tmo):
if tmo < 0:
raise ValueError('Timeout cannot be a negative value')
self.__default_timeout = tmo
def __get_def_tmo(self):
return self.__default_timeout
def _str(self):
return "DEVICE ID %04x:%04x on Bus %03d Address %03d" % (
self.idVendor, self.idProduct, self.bus, self.address)
def _get_full_descriptor_str(self):
headstr = self._str() + " "
if self.bcdUSB & 0xf:
low_bcd_usb = str(self.bcdUSB & 0xf)
else:
low_bcd_usb = ""
if self.bcdDevice & 0xf:
low_bcd_device = str(self.bcdDevice & 0xf)
else:
low_bcd_device = ""
return "%s%s\n" % (headstr, "=" * (60 - len(headstr))) + \
" %-23s:%#7x (18 bytes)\n" % (
"bLength", self.bLength) + \
" %-23s:%#7x %s\n" % (
"bDescriptorType", self.bDescriptorType,
_try_lookup(_lu.descriptors, self.bDescriptorType)) + \
" %-23s:%#7x USB %d.%d%s\n" % (
"bcdUSB", self.bcdUSB, (self.bcdUSB & 0xff00)>>8,
(self.bcdUSB & 0xf0) >> 4, low_bcd_usb) + \
" %-23s:%#7x %s\n" % (
"bDeviceClass", self.bDeviceClass,
_try_lookup(_lu.device_classes, self.bDeviceClass)) + \
" %-23s:%#7x\n" % (
"bDeviceSubClass", self.bDeviceSubClass) + \
" %-23s:%#7x\n" % (
"bDeviceProtocol", self.bDeviceProtocol) + \
" %-23s:%#7x (%d bytes)\n" % (
"bMaxPacketSize0", self.bMaxPacketSize0, self.bMaxPacketSize0) + \
" %-23s: %#06x\n" % (
"idVendor", self.idVendor) + \
" %-23s: %#06x\n" % (
"idProduct", self.idProduct) + \
" %-23s:%#7x Device %d.%d%s\n" % (
"bcdDevice", self.bcdDevice, (self.bcdDevice & 0xff00)>>8,
(self.bcdDevice & 0xf0) >> 4, low_bcd_device) + \
" %-23s:%#7x %s\n" % (
"iManufacturer", self.iManufacturer,
_try_get_string(self, self.iManufacturer)) + \
" %-23s:%#7x %s\n" % (
"iProduct", self.iProduct,
_try_get_string(self, self.iProduct)) + \
" %-23s:%#7x %s\n" % (
"iSerialNumber", self.iSerialNumber,
_try_get_string(self, self.iSerialNumber)) + \
" %-23s:%#7x" % (
"bNumConfigurations", self.bNumConfigurations)
default_timeout = property(
__get_def_tmo,
__set_def_tmo,
doc = 'Default timeout for transfer I/O functions'
)
def find(find_all=False, backend = None, custom_match = None, **args):
r"""Find an USB device and return it.
find() is the function used to discover USB devices. You can pass as
arguments any combination of the USB Device Descriptor fields to match a
device. For example:
find(idVendor=0x3f4, idProduct=0x2009)
will return the Device object for the device with idVendor field equals
to 0x3f4 and idProduct equals to 0x2009.
If there is more than one device which matchs the criteria, the first one
found will be returned. If a matching device cannot be found the function
returns None. If you want to get all devices, you can set the parameter
find_all to True, then find will return an iterator with all matched devices.
If no matching device is found, it will return an empty iterator. Example:
for printer in find(find_all=True, bDeviceClass=7):
print (printer)
This call will get all the USB printers connected to the system. (actually
may be not, because some devices put their class information in the
Interface Descriptor).
You can also use a customized match criteria:
dev = find(custom_match = lambda d: d.idProduct=0x3f4 and d.idvendor=0x2009)
A more accurate printer finder using a customized match would be like
so:
def is_printer(dev):
import usb.util
if dev.bDeviceClass == 7:
return True
for cfg in dev:
if usb.util.find_descriptor(cfg, bInterfaceClass=7) is not None:
return True
for printer in find(find_all=True, custom_match = is_printer):
print (printer)
Now even if the device class code is in the interface descriptor the
printer will be found.
You can combine a customized match with device descriptor fields. In this
case, the fields must match and the custom_match must return True. In the
our previous example, if we would like to get all printers belonging to the
manufacturer 0x3f4, the code would be like so:
printers = list(find(find_all=True, idVendor=0x3f4, custom_match=is_printer))
If you want to use find as a 'list all devices' function, just call
it with find_all = True:
devices = list(find(find_all=True))
Finally, you can pass a custom backend to the find function:
find(backend = MyBackend())
PyUSB has builtin backends for libusb 0.1, libusb 1.0 and OpenUSB. If you
do not supply a backend explicitly, find() function will select one of the
predefineds backends according to system availability.
Backends are explained in the usb.backend module.
"""
def device_iter(k, v):
for dev in backend.enumerate_devices():
d = Device(dev, backend)
if _interop._reduce(
lambda a, b: a and b,
map(
operator.eq,
v,
map(lambda i: getattr(d, i), k)
),
True
) and (custom_match is None or custom_match(d)):
yield d
if backend is None:
import usb.backend.libusb1 as libusb1
import usb.backend.libusb0 as libusb0
import usb.backend.openusb as openusb
for m in (libusb1, openusb, libusb0):
backend = m.get_backend()
if backend is not None:
_logger.info('find(): using backend "%s"', m.__name__)
break
else:
raise ValueError('No backend available')
k, v = args.keys(), args.values()
if find_all:
return device_iter(k, v)
else:
try:
return _interop._next(device_iter(k, v))
except StopIteration:
return None
def show_devices(verbose=False, **kwargs):
"""Show information about connected devices.
The verbose flag sets to verbose or not.
**kwargs are passed directly to the find() function.
"""
kwargs["find_all"] = True
devices = find(**kwargs)
strings = ""
for device in devices:
if not verbose:
strings += "%s, %s\n" % (device._str(), _try_lookup(
_lu.device_classes, device.bDeviceClass))
else:
strings += "%s\n\n" % str(device)
return _DescriptorInfo(strings)
|
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.golf.PhysicsWorldBase
import sys
if sys.platform != 'android':
from panda3d.ode import OdeBody, OdeBoxGeom, OdeHingeJoint, OdeJoint, OdeJointGroup, OdeMass, OdePlaneGeom, OdeSimpleSpace, OdeSliderJoint, OdeSphereGeom, OdeTriMeshData, OdeTriMeshGeom, OdeWorld
from panda3d.core import BitMask32, NodePath, Point3, Quat, Vec3, Vec4
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from math import *
import math
from direct.fsm.FSM import FSM
from toontown.minigame import ArrowKeys
from direct.showbase import PythonUtil
from direct.task import Task
from direct.distributed.ClockDelta import *
import BuildGeometry
from toontown.golf import GolfGlobals
import random, time
def scalp(vec, scal):
vec0 = vec[0] * scal
vec1 = vec[1] * scal
vec2 = vec[2] * scal
vec = Vec3(vec0, vec1, vec2)
def length(vec):
return sqrt(vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2)
class PhysicsWorldBase:
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPhysicsWorld')
def __init__(self, canRender = 0):
self.canRender = canRender
if sys.platform != 'android':
self.world = OdeWorld()
self.space = OdeSimpleSpace()
self.contactgroup = OdeJointGroup()
else:
self.world = None
self.space = None
self.contactgroup = None
self.bodyList = []
self.geomList = []
self.massList = []
self.rayList = []
self.showContacts = 0
self.jointMarkers = []
self.jointMarkerCount = 64
self.meshDataList = []
self.geomDataList = []
self.commonObjectInfoDict = {}
self.maxColCount = 0
if self.canRender:
self.odePandaRelationList = self.bodyList
self.root = render.attachNewNode('physics root node')
else:
self.root = NodePath('physics root node')
self.placerNode = self.root.attachNewNode('Placer')
self.subPlacerNode = self.placerNode.attachNewNode('Placer Sub Node')
self.commonObjectDict = {}
self.commonId = 0
self.worldAttach = self.root.attachNewNode('physics geom attach point')
self.timingCycleLength = 10.0
self.timingCycleOffset = 0.0
self.timingSimTime = 0.0
self.FPS = 90.0
self.refFPS = 60.0
self.DTAStep = 1.0 / self.FPS
self.refCon = 1.2
self.collisionEventName = 'ode-collision-%d' % id(self)
self.space.setCollisionEvent(self.collisionEventName)
self.accept(self.collisionEventName, self.__collisionHandler)
return
def delete(self):
self.notify.debug('Max Collision Count was %s' % self.maxColCount)
self.stopSim()
self.commonObjectDict = None
if self.canRender:
for pair in self.odePandaRelationList:
pair[0].removeNode()
pair[1].destroy()
self.odePandaRelationList = None
else:
for body in self.bodyList:
body[1].destroy()
self.bodyList = None
for mass in self.massList:
mass = None
for geom in self.geomList:
geom.destroy()
geom = None
for ray in self.rayList:
ray.destroy()
ray = None
self.placerNode.removeNode()
self.root.removeNode()
for marker in self.jointMarkers:
marker.removeNode()
self.jointMarkers = None
for data in self.geomDataList:
data.destroy()
for data in self.meshDataList:
data.destroy()
self.floor.destroy()
self.floor = None
if self.contactgroup:
self.contactgroup.empty()
if self.world:
self.world.destroy()
if self.space:
self.space.destroy()
self.world = None
self.space = None
self.ignore(self.collisionEventName)
return
def setupSimulation(self):
self.world.setAutoDisableFlag(0)
self.world.setAutoDisableLinearThreshold(0.15)
self.world.setAutoDisableAngularThreshold(0.15)
self.world.setAutoDisableSteps(2)
self.world.setGravity(0, 0, -25)
self.world.setErp(0.8)
self.world.setCfm(1e-05)
self.world.initSurfaceTable(5)
self.world.setSurfaceEntry(0, 0, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 1, 1500, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(2, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 2, 150, 0.05, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(0, 3, 150, 0.0, 0.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 3, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 1.0 / self.refCon)
self.world.setSurfaceEntry(2, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(3, 3, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(4, 4, 150, 0.0, 9.1, 0.9, 1e-05, 0.0, 0.4 / self.refCon)
self.world.setSurfaceEntry(1, 4, 150, 0.0, 99.1, 0.9, 1e-05, 0.0, 0.001 / self.refCon)
self.world.setSurfaceEntry(pos1=0, pos2=1, mu=80, bounce=0.15, bounce_vel=0.1, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.35 / self.refCon)
self.world.setSurfaceEntry(pos1=2, pos2=1, mu=1500, bounce=0.9, bounce_vel=0.01, soft_erp=0.9, soft_cfm=1e-05, slip=0.0, dampen=0.001 / self.refCon)
self.floor = OdePlaneGeom(self.space, Vec4(0.0, 0.0, 1.0, -20.0))
self.floor.setCollideBits(BitMask32(0))
self.floor.setCategoryBits(BitMask32(3840))
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.contactgroup)
self.world.setQuickStepNumIterations(8)
self.DTA = 0.0
self.frameCounter = 0
if self.canRender:
for count in xrange(self.jointMarkerCount):
testMarker = render.attachNewNode('Joint Marker')
ballmodel = loader.loadModel('phase_3/models/misc/sphere')
ballmodel.reparentTo(testMarker)
ballmodel.setScale(0.1)
testMarker.setPos(0.0, 0.0, -100.0)
self.jointMarkers.append(testMarker)
def setTimingCycleLength(self, time):
self.timingCycleLength = time
def getTimingCycleLength(self):
return self.timingCycleLength
def getCycleTime(self, doprint = 0):
cycleTime = (globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength
if doprint:
print 'Get Cycle Time %s' % cycleTime
return cycleTime
def setTimeIntoCycle(self, time, doprint = 0):
trueCycleTime = globalClock.getRealTime() % self.timingCycleLength
self.timingCycleOffset = time - trueCycleTime
if doprint:
self.notify.debug('Set Cycle Time %s' % self.timingCycleOffset)
self.notify.debug('SET cycle time %s' % ((globalClock.getRealTime() + self.timingCycleOffset) % self.timingCycleLength))
def getSimCycleTime(self):
return
return self.timingSimTime % self.timingCycleLength
def startSim(self):
taskMgr.add(self.__simulationTask, 'simulation task')
def stopSim(self):
taskMgr.remove('simulation task')
def __simulationTask(self, task):
self.DTA += globalClock.getDt()
self.frameCounter += 1
if self.frameCounter >= 10:
self.frameCounter = 0
startTime = globalClock.getRealTime()
colCount = 0
while self.DTA >= self.DTAStep:
self.DTA -= self.DTAStep
self.preStep()
self.simulate()
self.postStep()
if self.canRender:
self.placeBodies()
if self.frameCounter == 0:
endTime = globalClock.getRealTime() - startTime
return task.cont
def __collisionHandler(self, entry):
self.colEntries.append(entry)
def simulate(self):
self.colEntries = []
self.space.autoCollide()
eventMgr.doEvents()
self.colCount = len(self.colEntries)
if self.maxColCount < self.colCount:
self.maxColCount = self.colCount
self.notify.debug('New Max Collision Count %s' % self.maxColCount)
self.world.quickStep(self.DTAStep)
for bodyPair in self.bodyList:
self.world.applyDampening(self.DTAStep, bodyPair[1])
self.contactgroup.empty()
self.commonObjectControl()
self.timingSimTime = self.timingSimTime + self.DTAStep
def placeBodies(self):
for pair in self.odePandaRelationList:
pandaNodePathGeom = pair[0]
odeBody = pair[1]
if pandaNodePathGeom:
pandaNodePathGeom.setPos(odeBody.getPosition())
rotation = odeBody.getRotation() * (180.0 / math.pi)
pandaNodePathGeom.setQuat(Quat(odeBody.getQuaternion()[0], odeBody.getQuaternion()[1], odeBody.getQuaternion()[2], odeBody.getQuaternion()[3]))
def preStep(self):
pass
def postStep(self):
if self.showContacts and self.canRender:
for count in xrange(self.jointMarkerCount):
pandaNodePathGeom = self.jointMarkers[count]
if count < self.colCount:
pandaNodePathGeom.setPos(self.space.getContactData(count * 3 + 0), self.space.getContactData(count * 3 + 1), self.space.getContactData(count * 3 + 2))
else:
pandaNodePathGeom.setPos(0.0, 0.0, -100.0)
def commonObjectControl(self):
time = self.getCycleTime()
for key in self.commonObjectDict:
if key not in self.commonObjectInfoDict:
self.commonObjectInfoDict[key] = None
entry = self.commonObjectDict[key]
if entry[1] in (2, 4):
type = entry[1]
body = entry[2]
motor = entry[3]
timeData = entry[4]
forceData = entry[5]
eventData = entry[6]
model = entry[7]
force = 0.0
for index in xrange(len(timeData)):
if index == len(timeData) - 1 and timeData[index] < time or timeData[index] < time and timeData[index + 1] > time:
force = forceData[index]
event = eventData[index]
if event != self.commonObjectInfoDict[key]:
self.commonObjectEvent(key, model, type, force, event)
self.commonObjectInfoDict[key] = event
motor.setParamVel(force)
return
def commonObjectEvent(self, key, model, type, force, event):
self.notify.debug('commonObjectForceEvent %s %s %s %s %s' % (key,
model,
type,
force,
event))
def getCommonObjectData(self):
objectStream = [(0,
0,
self.getCycleTime(),
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0)]
for key in self.commonObjectDict:
objectPair = self.commonObjectDict[key]
object = objectPair[2]
pos3 = object.getPosition()
quat4 = object.getQuaternion()
anV3 = object.getAngularVel()
lnV3 = object.getLinearVel()
data = (objectPair[0],
objectPair[1],
pos3[0],
pos3[1],
pos3[2],
quat4[0],
quat4[1],
quat4[2],
quat4[3],
anV3[0],
anV3[1],
anV3[2],
lnV3[0],
lnV3[1],
lnV3[2])
objectStream.append(data)
if len(objectStream) <= 1:
data = (0, 99, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
objectStream.append(data)
return objectStream
def useCommonObjectData(self, objectData, enable = 1):
if not objectData:
return
if objectData[1][1] == 99:
return
time = objectData[0]
self.setTimeIntoCycle(time[2])
if time[2] > self.timingCycleLength:
pass
for dataIndex in xrange(1, len(objectData)):
data = objectData[dataIndex]
commonObject = self.commonObjectDict[data[0]]
commonObject[2].setPosition(data[2], data[3], data[4])
commonObject[2].setQuaternion(Quat(data[5], data[6], data[7], data[8]))
commonObject[2].setAngularVel(data[9], data[10], data[11])
commonObject[2].setLinearVel(data[12], data[13], data[14])
if enable:
commonObject[2].enable()
else:
commonObject[2].disable()
def createCommonObject(self, type, commonId, pos, hpr, sizeX = 0, sizeY = 0, moveDistance = 0):
if commonId == None:
commonId = self.commonId
self.commonId += 1
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
rHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
if type == 0:
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0)
box.setPosition(vPos)
self.placerNode.setHpr(vHpr)
box.setQuaternion(self.placerNode.getQuat())
self.commonObjectDict[commonId] = (commonId, type, box)
elif type == 1:
model, cross = self.createCross(self.world, self.space, 1.0, 3.0, 12.0, 2.0, 2)
motor = OdeHingeJoint(self.world)
cross.setPosition(vPos)
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
motor.setParamVel(1.5)
motor.setParamFMax(500000000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attachBody(cross, 0)
motor.setAnchor(vPos)
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 2:
ourAxis = render.getRelativeVector(self.placerNode, Vec3(0, 0, 1))
model, box = self.createBox(self.world, self.space, 10.0, 5.0, 5.0, 5.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attachBody(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(3.0)
motor.setParamFMax(5000000.0)
motor.setParamHiStop(10.0)
motor.setParamLoStop(-10.0)
timeData = (0.0, 5.0)
forceData = (3.0, -3.0)
eventData = (1, 2)
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model)
elif type == 3:
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]))
vHpr = Vec3(float(hpr[0]), float(hpr[1]), float(hpr[2]))
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(0, 0, 0)
if self.canRender:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
else:
myModel = loader.loadModel('phase_6/models/golf/golf_windmill_b')
myModel.reparentTo(self.root)
myModel.setPos(vPos)
myModel.setHpr(vHpr)
millFan = myModel.find('**/windmillFan0')
millBase = myModel.find('**/arm')
rod = myModel.find('**/rod')
rod.wrtReparentTo(millBase)
self.windmillFanNodePath = millFan
self.windmillBaseNodePath = millBase
millData = OdeTriMeshData(millBase)
millGeom = OdeTriMeshGeom(self.space, millData)
self.meshDataList.append(millData)
millGeom.setPosition(self.subPlacerNode.getPos(self.root))
millGeom.setQuaternion(self.subPlacerNode.getQuat())
millGeom.setCollideBits(BitMask32(251658240))
millGeom.setCategoryBits(BitMask32(8388608))
self.space.setCollideId(millGeom, 8)
vPos = Point3(float(pos[0]), float(pos[1]), float(pos[2]) + 5)
vHpr = Vec3(float(hpr[0]), float(hpr[1] + 90), float(hpr[2]) - 90)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
model, cross = self.createPinWheel(self.world, self.space, 10.0, 1.6, 4.0, 0.6, 5, 3.7, 1.2, 1, millFan, (0, 0, 90), (-4.6, -0.5, -0.25), 20)
self.placerNode.setHpr(vHpr)
self.placerNode.setPos(vPos)
self.subPlacerNode.setPos(-1, 0, 0.0)
motor = OdeHingeJoint(self.world)
cross.setPosition(self.subPlacerNode.getPos(self.root))
cross.setQuaternion(self.placerNode.getQuat())
ourAxis = self.root.getRelativeVector(self.subPlacerNode, Vec3(0, 0, 1))
motor.setParamVel(1.0)
motor.setParamFMax(50000.0)
boxsize = Vec3(1.0, 1.0, 1.0)
motor.attachBody(cross, 0)
motor.setAnchor(self.subPlacerNode.getPos(self.root))
motor.setAxis(ourAxis)
self.cross = cross
cross.enable()
self.commonObjectDict[commonId] = (commonId, type, cross)
elif type == 4:
ourAxis = self.root.getRelativeVector(self.placerNode, Vec3(0, 1, 0))
model, box = self.createBox(self.world, self.space, 50.0, sizeX, sizeY, 1.0, 2)
box.setPosition(vPos)
box.setQuaternion(self.placerNode.getQuat())
motor = OdeSliderJoint(self.world)
motor.attachBody(box, 0)
motor.setAxis(ourAxis)
motor.setParamVel(moveDistance / 4.0)
motor.setParamFMax(25000.0)
motor.setParamHiStop(moveDistance)
motor.setParamLoStop(0)
timeData = (0.0, 1.0, 5.0, 6.0)
forceData = (-moveDistance / 4.0,
moveDistance / 4.0,
moveDistance / 4.0,
-moveDistance / 4.0)
eventData = (-1, 1, -2, 2)
radius = moveDistance + sizeY * 0.5
self.commonObjectDict[commonId] = (commonId,
type,
box,
motor,
timeData,
forceData,
eventData,
model,
radius)
return [type,
commonId,
(pos[0], pos[1], pos[2]),
(hpr[0], hpr[1], hpr[2]),
sizeX,
sizeY,
moveDistance]
def createSphere(self, world, space, density, radius, ballIndex = None):
self.notify.debug('create sphere index %s' % ballIndex)
body = OdeBody(world)
M = OdeMass()
M.setSphere(density, radius)
body.setMass(M)
body.setPosition(0, 0, -100)
geom = OdeSphereGeom(space, radius)
self.space.setSurfaceType(geom, 1)
self.notify.debug('collide ID is %s' % self.space.setCollideId(geom, 42))
self.massList.append(M)
self.geomList.append(geom)
if ballIndex == 1:
self.notify.debug('1')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 2:
self.notify.debug('2')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 3:
self.notify.debug('3')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
elif ballIndex == 4:
self.notify.debug('4')
geom.setCollideBits(BitMask32(16777215))
geom.setCategoryBits(BitMask32(4278190080L))
else:
geom.setCollideBits(BitMask32(4294967295L))
geom.setCategoryBits(BitMask32(4294967295L))
geom.setBody(body)
if self.notify.getDebug():
self.notify.debug('golf ball geom id')
geom.write()
self.notify.debug(' -')
self.notify.debug('Collide Bits %s' % geom.getCollideBits())
if self.canRender:
testball = render.attachNewNode('Ball Holder')
ballmodel = loader.loadModel('phase_6/models/golf/golf_ball')
ballmodel.reparentTo(testball)
ballmodel.setColor(*GolfGlobals.PlayerColors[ballIndex - 1])
testball.setPos(0, 0, -100)
self.odePandaRelationList.append((testball, body))
else:
testball = None
self.bodyList.append((None, body))
return (testball, body, geom)
def createBox(self, world, space, density, lx, ly, lz, colOnlyBall = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setSphere(density, 0.3 * (lx + ly + lz))
body.setMass(M)
boxsize = Vec3(lx, ly, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 7)
self.massList.append(M)
self.geomList.append(geom)
if colOnlyBall:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if self.canRender:
color = random.choice([Vec4(1.0, 0.0, 0.5, 1.0), Vec4(0.5, 0.5, 1.0, 1.0), Vec4(0.5, 1.0, 0.5, 1.0)])
boxsize = Vec3(lx, ly, lz)
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, color, 1)
boxNodePathGeom.setPos(0, 0, -100)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross(self, world, space, density, lx, ly, lz, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly, lz)
boxsize2 = Vec3(ly, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 26)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.odePandaRelationList.append((boxNodePathGeom, body))
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
if self.canRender:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(self.worldAttach, lx, ly, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(0, 0, -100)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(boxNodePathGeom, ly, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(0, 0, 0)
if attachedGeo:
attachedGeo.reparentTo(boxNodePathGeom)
attachedGeo.setHpr(0, 0, 90)
attachedGeo.setPos(-4.8, 0, -2.0)
self.odePandaRelationList.append((boxNodePathGeom, body))
else:
boxNodePathGeom = None
self.bodyList.append((None, body))
return (boxNodePathGeom, body)
def createCross2(self, world, space, density, lx, ly, lz, latSlide, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(-latSlide, ly * 0.25, 0)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
geom2 = OdeBoxGeom(space, boxsize2)
geom2.setBody(body)
geom2.setOffsetPosition(ly * 0.25, latSlide, 0)
self.space.setSurfaceType(geom2, 0)
self.space.setCollideId(geom2, 13)
geom3 = OdeBoxGeom(space, boxsize)
geom3.setBody(body)
geom3.setOffsetPosition(latSlide, -ly * 0.25, 0)
self.space.setSurfaceType(geom3, 0)
self.space.setCollideId(geom3, 13)
geom4 = OdeBoxGeom(space, boxsize2)
geom4.setBody(body)
geom4.setOffsetPosition(-ly * 0.25, -latSlide, 0)
self.space.setSurfaceType(geom4, 0)
self.space.setCollideId(geom4, 13)
self.massList.append(M)
self.geomList.append(geom)
self.geomList.append(geom2)
self.geomList.append(geom3)
self.geomList.append(geom4)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(251658240))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(251658240))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(251658240))
geom4.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
geom2.setCollideBits(BitMask32(0))
geom2.setCategoryBits(BitMask32(0))
geom3.setCollideBits(BitMask32(0))
geom3.setCategoryBits(BitMask32(0))
geom4.setCollideBits(BitMask32(0))
geom4.setCategoryBits(BitMask32(0))
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
if attachedGeo:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(-latSlide, ly * 0.25, 0)
boxNodePathGeom2, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom2.setPos(ly * 0.25, latSlide, 0)
boxNodePathGeom3, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom3.setPos(latSlide, -ly * 0.25, 0)
boxNodePathGeom4, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, ly * 0.5, lx, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom4.setPos(-ly * 0.25, -latSlide, 0)
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
def createPinWheel(self, world, space, density, lx, ly, lz, numBoxes, disV, disH, colOnlyBall = 0, attachedGeo = None, aHPR = None, aPos = None, offRot = 0):
body = OdeBody(self.world)
M = OdeMass()
M.setBox(density, lx, ly, lz)
body.setMass(M)
body.setFiniteRotationMode(1)
boxsize = Vec3(lx, ly * 0.5, lz)
boxsize2 = Vec3(ly * 0.5, lx, lz)
self.massList.append(M)
self.placerNode.setPos(0, 0, 0)
self.placerNode.setHpr(0, 0, 0)
self.subPlacerNode.setHpr(0, 0, 0)
self.subPlacerNode.setPos(disH, disV, 0)
if self.canRender:
someNodePathGeom = render.attachNewNode('pinwheel')
else:
someNodePathGeom = self.root.attachNewNode('pinwheel')
for num in xrange(numBoxes):
spin = 360.0 * float(num) / float(numBoxes) + float(offRot)
self.placerNode.setH(spin)
geom = OdeBoxGeom(space, boxsize)
geom.setBody(body)
geom.setOffsetPosition(self.subPlacerNode.getPos(self.root))
geom.setOffsetQuaternion(self.subPlacerNode.getQuat(self.root))
self.geomList.append(geom)
self.space.setSurfaceType(geom, 0)
self.space.setCollideId(geom, 13)
if colOnlyBall == 1:
geom.setCollideBits(BitMask32(251658240))
geom.setCategoryBits(BitMask32(0))
elif colOnlyBall == 2:
geom.setCollideBits(BitMask32(0))
geom.setCategoryBits(BitMask32(0))
if not attachedGeo:
boxNodePathGeom, t1, t2 = BuildGeometry.addBoxGeom(someNodePathGeom, lx, ly * 0.5, lz, Vec4(1.0, 1.0, 1.0, 1.0), 1)
boxNodePathGeom.setPos(self.subPlacerNode.getPos(self.root))
boxNodePathGeom.setHpr(self.subPlacerNode.getHpr(self.root))
if attachedGeo and self.canRender:
attachedGeo.reparentTo(someNodePathGeom)
attachedGeo.setHpr(aHPR[0], aHPR[1], aHPR[2])
attachedGeo.setPos(aPos[0], aPos[1], aPos[2])
if self.canRender:
self.odePandaRelationList.append((someNodePathGeom, body))
else:
someNodePathGeom = None
self.bodyList.append((None, body))
return (someNodePathGeom, body)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Building Blocks of the TensorFlow Debugger CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
class CommandLineExitTest(test_util.TensorFlowTestCase):
def testConstructionWithoutToken(self):
exit_exc = debugger_cli_common.CommandLineExit()
self.assertTrue(isinstance(exit_exc, Exception))
def testConstructionWithToken(self):
exit_exc = debugger_cli_common.CommandLineExit(exit_token={"foo": "bar"})
self.assertTrue(isinstance(exit_exc, Exception))
self.assertEqual({"foo": "bar"}, exit_exc.exit_token)
class RichTextLinesTest(test_util.TensorFlowTestCase):
def testRichTextLinesConstructorComplete(self):
# Test RichTextLines constructor.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual(2, len(screen_output.annotations))
self.assertEqual(2, screen_output.num_lines())
def testRichTextLinesConstructorWithInvalidType(self):
with self.assertRaisesRegexp(ValueError, "Unexpected type in lines"):
debugger_cli_common.RichTextLines(123)
def testRichTextLinesConstructorWithString(self):
# Test constructing a RichTextLines object with a string, instead of a list
# of strings.
screen_output = debugger_cli_common.RichTextLines(
"Roses are red",
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
self.assertEqual(1, len(screen_output.lines))
self.assertEqual(1, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.annotations))
def testRichTextLinesConstructorIncomplete(self):
# Test RichTextLines constructor, with incomplete keyword arguments.
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
self.assertEqual(2, len(screen_output.lines))
self.assertEqual(2, len(screen_output.font_attr_segs))
self.assertEqual(1, len(screen_output.font_attr_segs[0]))
self.assertEqual(1, len(screen_output.font_attr_segs[1]))
self.assertEqual({}, screen_output.annotations)
def testModifyRichTextLinesObject(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
self.assertEqual(2, len(screen_output.lines))
screen_output.lines.append("Sugar is sweet")
self.assertEqual(3, len(screen_output.lines))
def testMergeRichTextLines(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines(
["Lilies are white", "Sunflowers are yellow"],
font_attr_segs={0: [(0, 6, "white")],
1: [(0, 7, "yellow")]},
annotations={
"metadata": "foo",
0: "full spectrum",
1: "medium wavelength"
})
screen_output_1.extend(screen_output_2)
self.assertEqual(4, screen_output_1.num_lines())
self.assertEqual([
"Roses are red", "Violets are blue", "Lilies are white",
"Sunflowers are yellow"
], screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
2: [(0, 6, "white")],
3: [(0, 7, "yellow")]
}, screen_output_1.font_attr_segs)
self.assertEqual({
"metadata": "foo",
0: "longer wavelength",
1: "shorter wavelength",
2: "full spectrum",
3: "medium wavelength"
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptyOther(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_2 = debugger_cli_common.RichTextLines([])
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testMergeRichTextLinesEmptySelf(self):
screen_output_1 = debugger_cli_common.RichTextLines([])
screen_output_2 = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={0: "longer wavelength",
1: "shorter wavelength"})
screen_output_1.extend(screen_output_2)
self.assertEqual(2, screen_output_1.num_lines())
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
self.assertEqual({
0: "longer wavelength",
1: "shorter wavelength",
}, screen_output_1.annotations)
def testAppendALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.append("Violets are blue", [(0, 7, "blue")])
self.assertEqual(["Roses are red", "Violets are blue"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 5, "red")],
1: [(0, 7, "blue")],
}, screen_output_1.font_attr_segs)
def testPrependALineWithAttributeSegmentsWorks(self):
screen_output_1 = debugger_cli_common.RichTextLines(
["Roses are red"],
font_attr_segs={0: [(0, 5, "red")]},
annotations={0: "longer wavelength"})
screen_output_1.prepend("Violets are blue", font_attr_segs=[(0, 7, "blue")])
self.assertEqual(["Violets are blue", "Roses are red"],
screen_output_1.lines)
self.assertEqual({
0: [(0, 7, "blue")],
1: [(0, 5, "red")],
}, screen_output_1.font_attr_segs)
def testWriteToFileSucceeds(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = tempfile.mktemp()
screen_output.write_to_file(file_path)
with gfile.Open(file_path, "r") as f:
self.assertEqual(b"Roses are red\nViolets are blue\n", f.read())
# Clean up.
gfile.Remove(file_path)
def testAttemptToWriteToADirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
with self.assertRaises(Exception):
screen_output.write_to_file("/")
def testAttemptToWriteToFileInNonexistentDirectoryFails(self):
screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]})
file_path = os.path.join(tempfile.mkdtemp(), "foo", "bar.txt")
with self.assertRaises(Exception):
screen_output.write_to_file(file_path)
class CommandHandlerRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._intentional_error_msg = "Intentionally raised exception"
def _noop_handler(self, argv, screen_info=None):
# A handler that does nothing other than returning "Done."
return debugger_cli_common.RichTextLines(["Done."])
def _handler_raising_exception(self, argv, screen_info=None):
# A handler that intentionally raises an exception.
raise RuntimeError(self._intentional_error_msg)
def _handler_returning_wrong_type(self, argv, screen_info=None):
# A handler that returns a wrong type, instead of the correct type
# (RichTextLines).
return "Hello"
def _echo_screen_cols(self, argv, screen_info=None):
# A handler that uses screen_info.
return debugger_cli_common.RichTextLines(
["cols = %d" % screen_info["cols"]])
def _exiting_handler(self, argv, screen_info=None):
"""A handler that exits with an exit token."""
if argv:
exit_token = argv[0]
else:
exit_token = None
raise debugger_cli_common.CommandLineExit(exit_token=exit_token)
def testRegisterEmptyCommandPrefix(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register an empty-string as a command prefix should trigger
# an exception.
with self.assertRaisesRegexp(ValueError, "Empty command prefix"):
registry.register_command_handler("", self._noop_handler, "")
def testRegisterAndInvokeHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
self.assertTrue(registry.is_registered("noop"))
self.assertFalse(registry.is_registered("beep"))
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
# Attempt to invoke an unregistered command prefix should trigger an
# exception.
with self.assertRaisesRegexp(ValueError, "No handler is registered"):
registry.dispatch_command("beep", [])
# Empty command prefix should trigger an exception.
with self.assertRaisesRegexp(ValueError, "Prefix is empty"):
registry.dispatch_command("", [])
def testExitingHandler(self):
"""Test that exit exception is correctly raised."""
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("exit", self._exiting_handler, "")
self.assertTrue(registry.is_registered("exit"))
exit_token = None
try:
registry.dispatch_command("exit", ["foo"])
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
self.assertEqual("foo", exit_token)
def testInvokeHandlerWithScreenInfo(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Register and invoke a command handler that uses screen_info.
registry.register_command_handler("cols", self._echo_screen_cols, "")
cmd_output = registry.dispatch_command(
"cols", [], screen_info={"cols": 100})
self.assertEqual(["cols = 100"], cmd_output.lines)
def testRegisterAndInvokeHandlerWithAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n", "NOOP"])
# is_registered() should work for full prefix and aliases.
self.assertTrue(registry.is_registered("noop"))
self.assertTrue(registry.is_registered("n"))
self.assertTrue(registry.is_registered("NOOP"))
cmd_output = registry.dispatch_command("n", [])
self.assertEqual(["Done."], cmd_output.lines)
cmd_output = registry.dispatch_command("NOOP", [])
self.assertEqual(["Done."], cmd_output.lines)
def testHandlerWithWrongReturnType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("wrong_return",
self._handler_returning_wrong_type, "")
# If the command handler fails to return a RichTextLines instance, an error
# should be triggered.
with self.assertRaisesRegexp(
ValueError,
"Return value from command handler.*is not None or a RichTextLines "
"instance"):
registry.dispatch_command("wrong_return", [])
def testRegisterDuplicateHandlers(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("noop", self._noop_handler, "")
# Registering the same command prefix more than once should trigger an
# exception.
with self.assertRaisesRegexp(
ValueError, "A handler is already registered for command prefix"):
registry.register_command_handler("noop", self._noop_handler, "")
cmd_output = registry.dispatch_command("noop", [])
self.assertEqual(["Done."], cmd_output.lines)
def testRegisterDuplicateAliases(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop", self._noop_handler, "", prefix_aliases=["n"])
# Clash with existing alias.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["n"])
# The name clash should have prevent the handler from being registered.
self.assertFalse(registry.is_registered("cols"))
# Aliases can also clash with command prefixes.
with self.assertRaisesRegexp(ValueError,
"clashes with existing prefixes or aliases"):
registry.register_command_handler(
"cols", self._echo_screen_cols, "", prefix_aliases=["noop"])
self.assertFalse(registry.is_registered("cols"))
def testDispatchHandlerRaisingException(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler("raise_exception",
self._handler_raising_exception, "")
# The registry should catch and wrap exceptions that occur during command
# handling.
cmd_output = registry.dispatch_command("raise_exception", [])
# The error output contains a stack trace.
# So the line count should be >= 2.
self.assertGreater(len(cmd_output.lines), 2)
self.assertTrue(cmd_output.lines[0].startswith(
"Error occurred during handling of command"))
self.assertTrue(cmd_output.lines[1].endswith(self._intentional_error_msg))
def testRegisterNonCallableHandler(self):
registry = debugger_cli_common.CommandHandlerRegistry()
# Attempt to register a non-callable handler should fail.
with self.assertRaisesRegexp(ValueError, "handler is not callable"):
registry.register_command_handler("non_callable", 1, "")
def testRegisterHandlerWithInvalidHelpInfoType(self):
registry = debugger_cli_common.CommandHandlerRegistry()
with self.assertRaisesRegexp(ValueError, "help_info is not a str"):
registry.register_command_handler("noop", self._noop_handler, ["foo"])
def testGetHelpFull(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
help_lines = registry.get_help().lines
# The help info should list commands in alphabetically sorted order,
# regardless of order in which the commands are reigstered.
self.assertEqual("cols", help_lines[0])
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
self.assertFalse(help_lines[4])
self.assertFalse(help_lines[5])
# The default help command should appear in the help output.
self.assertEqual("help", help_lines[6])
self.assertEqual("noop", help_lines[12])
self.assertTrue(help_lines[13].endswith("Aliases: n, NOOP"))
self.assertFalse(help_lines[14])
self.assertTrue(help_lines[15].endswith("No operation."))
self.assertTrue(help_lines[16].endswith("I.e., do nothing."))
def testGetHelpSingleCommand(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help info for one of the two commands, using full prefix.
help_lines = registry.get_help("cols").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for one of the two commands, using alias.
help_lines = registry.get_help("c").lines
self.assertTrue(help_lines[0].endswith("cols"))
self.assertTrue(help_lines[1].endswith("Aliases: c"))
self.assertFalse(help_lines[2])
self.assertTrue(help_lines[3].endswith(
"Show screen width in number of columns."))
# Get help info for a nonexistent command.
help_lines = registry.get_help("foo").lines
self.assertEqual("Invalid command prefix: \"foo\"", help_lines[0])
def testHelpCommandWithoutIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
registry.register_command_handler(
"cols",
self._echo_screen_cols,
"Show screen width in number of columns.",
prefix_aliases=["c"])
# Get help for all commands.
output = registry.dispatch_command("help", [])
self.assertEqual(["cols", " Aliases: c", "",
" Show screen width in number of columns.", "", "",
"help", " Aliases: h", "", " Print this help message.",
"", "", "noop", " Aliases: n, NOOP", "",
" No operation.", " I.e., do nothing.", "", ""],
output.lines)
# Get help for one specific command prefix.
output = registry.dispatch_command("help", ["noop"])
self.assertEqual(["noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing."], output.lines)
# Get help for a nonexistent command prefix.
output = registry.dispatch_command("help", ["foo"])
self.assertEqual(["Invalid command prefix: \"foo\""], output.lines)
def testHelpCommandWithIntro(self):
registry = debugger_cli_common.CommandHandlerRegistry()
registry.register_command_handler(
"noop",
self._noop_handler,
"No operation.\nI.e., do nothing.",
prefix_aliases=["n", "NOOP"])
help_intro = debugger_cli_common.RichTextLines(
["Introductory comments.", ""])
registry.set_help_intro(help_intro)
output = registry.dispatch_command("help", [])
self.assertEqual(help_intro.lines + [
"help", " Aliases: h", "", " Print this help message.", "", "",
"noop", " Aliases: n, NOOP", "", " No operation.",
" I.e., do nothing.", "", ""
], output.lines)
class RegexFindTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"])
def testRegexFindWithoutExistingFontAttrSegs(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow")], new_screen_output.font_attr_segs[0])
self.assertEqual([(8, 11, "yellow")], new_screen_output.font_attr_segs[1])
# Check field in annotations carrying a list of matching line indices.
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithExistingFontAttrSegs(self):
# Add a font attribute segment first.
self._orig_screen_output.font_attr_segs[0] = [(9, 12, "red")]
self.assertEqual(1, len(self._orig_screen_output.font_attr_segs))
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"are", "yellow")
self.assertEqual(2, len(new_screen_output.font_attr_segs))
self.assertEqual([(6, 9, "yellow"), (9, 12, "red")],
new_screen_output.font_attr_segs[0])
self.assertEqual([0, 1], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testRegexFindWithNoMatches(self):
new_screen_output = debugger_cli_common.regex_find(self._orig_screen_output,
"infrared", "yellow")
self.assertEqual({}, new_screen_output.font_attr_segs)
self.assertEqual([], new_screen_output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY])
def testInvalidRegex(self):
with self.assertRaisesRegexp(ValueError, "Invalid regular expression"):
debugger_cli_common.regex_find(self._orig_screen_output, "[", "yellow")
class WrapScreenOutputTest(test_util.TensorFlowTestCase):
def setUp(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 5, "red"), (6, 9, "gray"), (10, 12, "red"),
(12, 13, "crimson")],
2: [(0, 7, "blue"), (8, 11, "gray"), (12, 14, "blue"),
(14, 16, "indigo")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
def testNoActualWrapping(self):
# Large column limit should lead to no actual wrapping.
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 100)
self.assertEqual(self._orig_screen_output.lines, out.lines)
self.assertEqual(self._orig_screen_output.font_attr_segs,
out.font_attr_segs)
self.assertEqual(self._orig_screen_output.annotations, out.annotations)
self.assertEqual(new_line_indices, [0, 1, 2])
def testWrappingWithAttrCutoff(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 11)
# Add non-row-index field to out.
out.annotations["metadata"] = "foo"
# Check wrapped text.
self.assertEqual(5, len(out.lines))
self.assertEqual("Folk song:", out.lines[0])
self.assertEqual("Roses are r", out.lines[1])
self.assertEqual("ed", out.lines[2])
self.assertEqual("Violets are", out.lines[3])
self.assertEqual(" blue", out.lines[4])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertEqual([(0, 5, "red"), (6, 9, "gray"), (10, 11, "red")],
out.font_attr_segs[1])
self.assertEqual([(0, 1, "red"), (1, 2, "crimson")], out.font_attr_segs[2])
self.assertEqual([(0, 7, "blue"), (8, 11, "gray")], out.font_attr_segs[3])
self.assertEqual([(1, 3, "blue"), (3, 5, "indigo")], out.font_attr_segs[4])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[1])
self.assertFalse(2 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[3])
self.assertFalse(4 in out.annotations)
# Chec that the non-row-index field is present in output.
self.assertEqual("foo", out.annotations["metadata"])
self.assertEqual(new_line_indices, [0, 1, 3])
def testWrappingWithMultipleAttrCutoff(self):
self._orig_screen_output = debugger_cli_common.RichTextLines(
["Folk song:", "Roses are red", "Violets are blue"],
font_attr_segs={1: [(0, 12, "red")],
2: [(1, 16, "blue")]},
annotations={1: "longer wavelength",
2: "shorter wavelength"})
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
self._orig_screen_output, 5)
# Check wrapped text.
self.assertEqual(9, len(out.lines))
self.assertEqual("Folk ", out.lines[0])
self.assertEqual("song:", out.lines[1])
self.assertEqual("Roses", out.lines[2])
self.assertEqual(" are ", out.lines[3])
self.assertEqual("red", out.lines[4])
self.assertEqual("Viole", out.lines[5])
self.assertEqual("ts ar", out.lines[6])
self.assertEqual("e blu", out.lines[7])
self.assertEqual("e", out.lines[8])
# Check wrapped font_attr_segs.
self.assertFalse(0 in out.font_attr_segs)
self.assertFalse(1 in out.font_attr_segs)
self.assertEqual([(0, 5, "red")], out.font_attr_segs[2])
self.assertEqual([(0, 5, "red")], out.font_attr_segs[3])
self.assertEqual([(0, 2, "red")], out.font_attr_segs[4])
self.assertEqual([(1, 5, "blue")], out.font_attr_segs[5])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[6])
self.assertEqual([(0, 5, "blue")], out.font_attr_segs[7])
self.assertEqual([(0, 1, "blue")], out.font_attr_segs[8])
# Check annotations.
self.assertFalse(0 in out.annotations)
self.assertFalse(1 in out.annotations)
self.assertEqual("longer wavelength", out.annotations[2])
self.assertFalse(3 in out.annotations)
self.assertFalse(4 in out.annotations)
self.assertEqual("shorter wavelength", out.annotations[5])
self.assertFalse(6 in out.annotations)
self.assertFalse(7 in out.annotations)
self.assertFalse(8 in out.annotations)
self.assertEqual(new_line_indices, [0, 2, 5])
def testWrappingInvalidArguments(self):
with self.assertRaisesRegexp(ValueError,
"Invalid type of input screen_output"):
debugger_cli_common.wrap_rich_text_lines("foo", 12)
with self.assertRaisesRegexp(ValueError, "Invalid type of input cols"):
debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines(["foo", "bar"]), "12")
def testWrappingEmptyInput(self):
out, new_line_indices = debugger_cli_common.wrap_rich_text_lines(
debugger_cli_common.RichTextLines([]), 10)
self.assertEqual([], out.lines)
self.assertEqual([], new_line_indices)
class SliceRichTextLinesTest(test_util.TensorFlowTestCase):
def setUp(self):
self._original = debugger_cli_common.RichTextLines(
["Roses are red", "Violets are blue"],
font_attr_segs={0: [(0, 5, "red")],
1: [(0, 7, "blue")]},
annotations={
0: "longer wavelength",
1: "shorter wavelength",
"foo_metadata": "bar"
})
def testSliceBeginning(self):
sliced = self._original.slice(0, 1)
self.assertEqual(["Roses are red"], sliced.lines)
self.assertEqual({0: [(0, 5, "red")]}, sliced.font_attr_segs)
# Non-line-number metadata should be preseved.
self.assertEqual({
0: "longer wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testSliceEnd(self):
sliced = self._original.slice(1, 2)
self.assertEqual(["Violets are blue"], sliced.lines)
# The line index should have changed from 1 to 0.
self.assertEqual({0: [(0, 7, "blue")]}, sliced.font_attr_segs)
self.assertEqual({
0: "shorter wavelength",
"foo_metadata": "bar"
}, sliced.annotations)
self.assertEqual(1, sliced.num_lines())
def testAttemptSliceWithNegativeIndex(self):
with self.assertRaisesRegexp(ValueError, "Encountered negative index"):
self._original.slice(0, -1)
class TabCompletionRegistryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tc_reg = debugger_cli_common.TabCompletionRegistry()
# Register the items in an unsorted order deliberately, to test the sorted
# output from get_completions().
self._tc_reg.register_tab_comp_context(
["print_tensor", "pt"],
["node_b:1", "node_b:2", "node_a:1", "node_a:2"])
self._tc_reg.register_tab_comp_context(["node_info"],
["node_c", "node_b", "node_a"])
def testTabCompletion(self):
# The returned completions should have sorted order.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a:1", "node_a:2", "node_b:1", "node_b:2"],
"node_"), self._tc_reg.get_completions("pt", ""))
self.assertEqual((["node_a:1", "node_a:2"], "node_a:"),
self._tc_reg.get_completions("print_tensor", "node_a"))
self.assertEqual((["node_a:1"], "node_a:1"),
self._tc_reg.get_completions("pt", "node_a:1"))
self.assertEqual(([], ""),
self._tc_reg.get_completions("print_tensor", "node_a:3"))
self.assertEqual((None, None), self._tc_reg.get_completions("foo", "node_"))
def testExtendCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.extend_comp_items("print_tensor", ["node_A:1", "node_A:2"])
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
# Extending the completions for one of the context's context words should
# have taken effect on other context words of the same context as well.
self.assertEqual((["node_A:1", "node_A:2", "node_a:1", "node_a:2",
"node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testExtendCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.extend_comp_items("foo", ["node_A:1", "node_A:2"])
def testRemoveCompletionItems(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.remove_comp_items("pt", ["node_a:1", "node_a:2"])
self.assertEqual((["node_b:1", "node_b:2"], "node_b:"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
def testRemoveCompletionItemsNonexistentContext(self):
with self.assertRaisesRegexp(
KeyError, "Context word \"foo\" has not been registered"):
self._tc_reg.remove_comp_items("foo", ["node_a:1", "node_a:2"])
def testDeregisterContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
self.assertEqual((None, None),
self._tc_reg.get_completions("print_tensor", "node_"))
# The alternative context word should be unaffected.
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("pt", "node_"))
def testDeregisterNonexistentContext(self):
self.assertEqual(
(["node_a:1", "node_a:2", "node_b:1", "node_b:2"], "node_"),
self._tc_reg.get_completions("print_tensor", "node_"))
self.assertEqual((["node_a", "node_b", "node_c"], "node_"),
self._tc_reg.get_completions("node_info", "node_"))
self._tc_reg.deregister_context(["print_tensor"])
with self.assertRaisesRegexp(
KeyError,
"Cannot deregister unregistered context word \"print_tensor\""):
self._tc_reg.deregister_context(["print_tensor"])
class CommandHistoryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._cmd_hist = debugger_cli_common.CommandHistory(limit=3)
def testLookUpMostRecent(self):
self.assertEqual([], self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self._cmd_hist.add_command("node_info node_b")
self.assertEqual(["node_info node_b"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(["list_tensors", "node_info node_a", "node_info node_b"],
self._cmd_hist.most_recent_n(4))
# Go over the limit.
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_a"], self._cmd_hist.most_recent_n(1))
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(2))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(3))
self.assertEqual(
["node_info node_a", "node_info node_b", "node_info node_a"],
self._cmd_hist.most_recent_n(4))
def testLookUpPrefix(self):
self._cmd_hist.add_command("node_info node_b")
self._cmd_hist.add_command("list_tensors")
self._cmd_hist.add_command("node_info node_a")
self.assertEqual(["node_info node_b", "node_info node_a"],
self._cmd_hist.lookup_prefix("node_info", 10))
self.assertEqual(["node_info node_a"], self._cmd_hist.lookup_prefix(
"node_info", 1))
self.assertEqual([], self._cmd_hist.lookup_prefix("print_tensor", 10))
def testAddNonStrCommand(self):
with self.assertRaisesRegexp(
TypeError, "Attempt to enter non-str entry to command history"):
self._cmd_hist.add_command(["print_tensor node_a:0"])
class MenuNodeTest(test_util.TensorFlowTestCase):
def testCommandTypeConstructorSucceeds(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertEqual("water flower", menu_node.caption)
self.assertEqual("water_flower", menu_node.content)
def testDisableWorks(self):
menu_node = debugger_cli_common.MenuItem("water flower", "water_flower")
self.assertTrue(menu_node.is_enabled())
menu_node.disable()
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
def testConstructAsDisabledWorks(self):
menu_node = debugger_cli_common.MenuItem(
"water flower", "water_flower", enabled=False)
self.assertFalse(menu_node.is_enabled())
menu_node.enable()
self.assertTrue(menu_node.is_enabled())
class MenuTest(test_util.TensorFlowTestCase):
def setUp(self):
self.menu = debugger_cli_common.Menu()
self.assertEqual(0, self.menu.num_items())
self.node1 = debugger_cli_common.MenuItem("water flower", "water_flower")
self.node2 = debugger_cli_common.MenuItem(
"measure wavelength", "measure_wavelength")
self.menu.append(self.node1)
self.menu.append(self.node2)
self.assertEqual(2, self.menu.num_items())
def testFormatAsSingleLineWithStrItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs="underline")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithListItemAttrsWorks(self):
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", enabled_item_attrs=["underline", "bold"])
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1, "underline", "bold"]),
output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2, "underline", "bold"]),
output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testFormatAsSingleLineWithNoneItemAttrsWorks(self):
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, measure wavelength, "], output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual({}, output.annotations)
def testInsertNode(self):
self.assertEqual(["water flower", "measure wavelength"],
self.menu.captions())
node2 = debugger_cli_common.MenuItem("write poem", "write_poem")
self.menu.insert(1, node2)
self.assertEqual(["water flower", "write poem", "measure wavelength"],
self.menu.captions())
output = self.menu.format_as_single_line(prefix="Menu: ", divider=", ")
self.assertEqual(["Menu: water flower, write poem, measure wavelength, "],
output.lines)
def testFormatAsSingleLineWithDisabledNode(self):
node2 = debugger_cli_common.MenuItem(
"write poem", "write_poem", enabled=False)
self.menu.append(node2)
output = self.menu.format_as_single_line(
prefix="Menu: ", divider=", ", disabled_item_attrs="bold")
self.assertEqual(["Menu: water flower, measure wavelength, write poem, "],
output.lines)
self.assertEqual((6, 18, [self.node1]), output.font_attr_segs[0][0])
self.assertEqual((20, 38, [self.node2]), output.font_attr_segs[0][1])
self.assertEqual((40, 50, ["bold"]), output.font_attr_segs[0][2])
if __name__ == "__main__":
googletest.main()
|
|
import sys
import inspect
from django import template
from django.conf import settings
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str
from django.utils.html import escape
from django.utils.safestring import mark_safe, SafeData
register = template.Library()
#
# {% chart %}
#
@register.tag
def chart(parser, token):
bits = iter(token.split_contents())
name = bits.next()
varname = None
saveas = None
extends = None
for bit in bits:
if bit == "as":
varname = bits.next()
elif bit == "saveas":
raise template.TemplateSyntaxError("Sorry, 'saveas' isn't implemented yet!")
saveas = template.Variable(bits.next())
elif bit == "extends":
extends = template.Variable(bits.next())
else:
raise template.TemplateSyntaxError("Unknown argument to '%s': '%s'" % (name, bit))
nodelist = parser.parse("end%s" % name)
parser.delete_first_token()
return ChartNode(nodelist, varname, saveas, extends)
class ChartNode(template.Node):
def __init__(self, nodelist, varname, saveas, extends):
self.nodelist = nodelist
self.saveas = saveas
self.varname = varname
self.extends = extends
def render(self, context):
c = Chart()
if self.extends:
try:
parent = self.extends.resolve(context)
except template.VariableDoesNotExist:
pass
else:
c = parent.clone()
for node in self.nodelist:
if isinstance(node, ChartDataNode):
c.datasets.extend(node.resolve(context))
elif isinstance(node, ChartOptionNode):
node.update_chart(c, context)
elif isinstance(node, AxisNode):
c.axes.append(node.resolve(context))
if self.varname:
context[self.varname] = c
return ""
else:
return c.img()
class Chart(object):
BASE = "http://chart.apis.google.com/chart"
defaults = {
"chs": "200x200",
"cht": "lc"
}
def __init__(self):
# Use a SortedDict for the opeions so they are added in a
# deterministic manner; this eases things like dealing with cache keys
# or writing unit tests.
self.options = SortedDict()
self.datasets = []
self.axes = []
self.datarange = None
self.alt = None
def clone(self):
clone = self.__class__()
clone.options = self.options.copy()
clone.datasets = self.datasets[:]
clone.axes = self.axes[:]
return clone
def img(self):
url = self.url()
width, height = self.options["chs"].split("x")
if self.alt:
alt = 'alt="%s" ' % escape(self.alt)
else:
alt = ''
s = mark_safe('<img src="%s" width="%s" height="%s" %s/>' % (escape(url), width, height, alt))
return s
def url(self):
if self.options.get('cht', None) == 't':
self.datasets.append(self.options.pop("_mapdata"))
# Figure out the chart's data range
if not self.datarange:
if self.datasets == [[]]:
maxvalue = 0
minvalue = 0
else:
maxvalue = max(max(d) for d in self.datasets if d)
minvalue = min(min(d) for d in self.datasets if d)
self.datarange = (minvalue, maxvalue)
# Encode data
if "chds" in self.options or self.options.get('cht', None) == 'gom':
# text encoding if scaling provided, or for google-o-meter type
data = "|".join(encode_text(d) for d in self.datasets)
encoded_data = "t:%s" % data
else:
# extended encoding otherwise
data = extended_separator.join(encode_extended(d, self.datarange) for d in self.datasets)
encoded_data = "e:%s" % data
# Update defaults
for k in self.defaults:
if k not in self.options:
self.options[k] = self.defaults[k]
# Start to calcuate the URL
url = "%s?%s&chd=%s" % (self.BASE, urlencode(self.options), encoded_data)
# Calculate axis options
if self.axes:
axis_options = SortedDict()
axis_sides = []
for i, axis in enumerate(self.axes):
axis_sides.append(axis.side)
for opt in axis.options:
axis_options.setdefault(opt, []).append(axis.options[opt] % i)
# Turn the option lists into strings
axis_sides = smart_join(",", *axis_sides)
for opt in axis_options:
axis_options[opt] = smart_join("|", *axis_options[opt])
url += "&chxt=%s&%s" % (axis_sides, urlencode(axis_options))
return url
#
# {% chart-data %}
#
@register.tag(name="chart-data")
def chart_data(parser, token):
bits = iter(token.split_contents())
name = bits.next()
datasets = map(parser.compile_filter, bits)
return ChartDataNode(datasets)
class ChartDataNode(template.Node):
def __init__(self, datasets):
self.datasets = datasets
def resolve(self, context):
resolved = []
for data in self.datasets:
try:
data = data.resolve(context)
except template.VariableDoesNotExist:
data = []
# XXX need different ways of representing pre-encoded data, data with
# different separators, etc.
if isinstance(data, basestring):
data = filter(None, map(safefloat, data.split(",")))
else:
data = filter(None, map(safefloat, data))
resolved.append(data)
return resolved
def render(self, context):
return ""
#
# Chart options
#
class OptionNode(template.Node):
def __init__(self, callback, args, multi=None):
self.callback = callback
self.args = args
self.multi = multi
def render(self, context):
return ""
def resolve_arguments(self, context):
for arg in self.args:
try:
yield arg.resolve(context)
except template.VariableDoesNotExist:
yield None
def update_options(self, options, context):
data = self.callback(*self.resolve_arguments(context))
if self.multi:
for key in data:
if key in options:
options[key] = options[key] + self.multi + data[key]
else:
options[key] = data[key]
else:
options.update(data)
class ChartOptionNode(OptionNode):
def update_chart(self, chart, context):
self.update_options(chart.options, context)
class AxisOptionNode(OptionNode):
pass
def option(tagname, multi=None, nodeclass=ChartOptionNode):
"""
Decorator-helper to register a chart-foo option tag. The decorated function
will be called at resolution time with the proper arity (determined from
inspecting the decorated function). This callback should return a dictionary
which will be used as arguments in the chart URL.
"""
def decorator(func):
# Figure out how to validate the args to the tag
args, varargs, varkw, defaults = inspect.getargspec(func)
max_args = min_args = 0
if args:
max_args = len(args)
if defaults:
min_args = max_args - len(defaults)
unlimited = bool(varargs)
def template_tag_callback(parser, token):
bits = iter(token.split_contents())
name = bits.next()
args = map(template.Variable, bits)
if not unlimited and len(args) < min_args:
raise template.TemplateSyntaxError("Too few arguments to '%s'" % name)
if not unlimited and len(args) > max_args:
raise template.TemplateSyntaxError("Too many arguments to '%s'" % name)
return nodeclass(func, args, multi)
template_tag_callback.__name__ = func.__name__
template_tag_callback.__doc__ = func.__doc__
register.tag(tagname, template_tag_callback)
return func
return decorator
@option("chart-type")
def chart_type(arg):
"""
Set the chart type. Valid arguments are anything the chart API understands,
or the following human-readable alternates:
* 'line'
* 'xy'
* 'xy' / 'line-xy'
* 'bar' / 'bar-grouped'
* 'column' / 'column-grouped'
* 'bar-stacked'
* 'column-stacked'
* 'pie'
* 'pie-3d'
* 'venn'
* 'scatter'
* 'map'
"""
types = {
'line': 'lc',
'xy': 'lxy',
'line-xy': 'lxy',
'bar': 'bhg',
'column': 'bvg',
'bar-stacked': 'bhs',
'column-stacked': 'bvs',
'bar-grouped': 'bhg',
'column-grouped': 'bvg',
'pie': 'p',
'pie-3d': 'p3',
'venn': 'v',
'scatter': 's',
'google-o-meter': 'gom',
'map': 't',
}
return {"cht": types.get(arg, arg)}
@option("chart-data-scale", multi=",")
def chart_colors(*args):
return {"chds": smart_join(",", *args)}
@option("chart-colors", multi=",")
def chart_colors(*args):
return {"chco": smart_join(",", *args)}
@option("chart-size")
def chart_size(arg1, arg2=None):
if arg2:
return {"chs": smart_join("x", arg1, arg2)}
else:
return {"chs": arg1}
@option("chart-background", multi="|")
def chart_background(color):
return _solid("bg", color)
@option("chart-fill", multi="|")
def chart_fill(color):
return _solid("c", color)
def _solid(type, color):
return {"chf": "%s,s,%s" % (type, color)}
@option("chart-background-gradient", multi="|")
def chart_background_gradient(angle, *colors):
return _fancy_background("bg", "lg", angle, colors)
@option("chart-fill-gradient", multi="|")
def chart_fill_gradient(angle, *colors):
return _fancy_background("c", "lg", angle, colors)
@option("chart-background-stripes", multi="|")
def chart_background_stripes(angle, *colors):
return _fancy_background("bg", "ls", angle, colors)
@option("chart-fill-stripes", multi="|")
def chart_fill_stripes(angle, *colors):
return _fancy_background("c", "ls", angle, colors)
def _fancy_background(bgtype, fancytype, angle, colors):
return {"chf": smart_join(",", bgtype, fancytype, angle, *colors)}
@option("chart-title")
def chart_title(title, fontsize=None, color="000000"):
title = title.replace("\n", "|")
if fontsize:
return {"chtt":title, "chts":"%s,%s" % (color, fontsize)}
else:
return {"chtt": title}
@option("chart-legend", multi="|")
def chart_legend(*labels):
return {"chdl": smart_join("|", *flatten(labels))}
@option("chart-labels", multi="|")
def chart_labels(*labels):
return {"chl": smart_join("|", *flatten(labels))}
@option("chart-bar-width")
def chart_bar_width(width, barspace=None, groupspace=None):
return {"chbh": smart_join(",", width, barspace, groupspace)}
@option("chart-line-style", multi="|")
def chart_line_style(thickness, line_length=None, space_length=None):
return {"chls": smart_join(",", thickness, line_length, space_length)}
@option("chart-grid")
def chart_grid(xstep, ystep, line_length=None, space_length=None):
return {"chg": smart_join(",", xstep, ystep, line_length, space_length)}
rangetypes = {
"h": "r",
"horiz": "r",
"horizontal": "r",
"v": "R",
"vert": "R",
"vertical": "R",
}
@option("chart-range-marker", multi="|")
def chart_range_marker(range_type, color, start, end):
rt = rangetypes.get(range_type, range_type)
return {"chm": smart_join(",", rt, color, "0", start, end)}
@option("chart-fill-area", multi="|")
def chart_fill_area(color, startindex=0, endindex=0):
if startindex or endindex:
filltype = "b"
else:
filltype = "B"
return {"chm": smart_join(",", filltype, color, startindex, endindex, "0")}
marker_types = {
'arrow': 'a',
'cross': 'c',
'diamond': 'd',
'circle': 'o',
'square': 's',
'line': 'v',
'full-line': 'V',
'h-line': 'h',
'horiz-line': 'h',
'horizontal-line': 'h',
}
@option("chart-marker", multi="|")
def chart_marker(marker, color, dataset_index, data_point, size):
marker = marker_types.get(marker, marker)
return {"chm": smart_join(",", marker, color, dataset_index, data_point, size)}
@option("chart-makers", multi="|")
def chart_markers(dataset_index, iterable):
"""Provide an iterable yielding (type, color, point, size)"""
try:
it = iter(iterable)
except TypeError:
return {}
markers = []
for m in it:
try:
marker, color, point, size = m
except ValueError:
continue
marker - marker_types.get(marker, marker)
markers.append(smart_join(",", marker, color, dataset_index, data_point, size))
return {"chm": smart_join("|", markers)}
@option("chart-map-area")
def chart_map_area(where):
return {'chtm': where}
@option("chart-map-data")
def chart_map_data(data):
place_list = []
value_list = []
for (k, v) in data.items():
place_list.append(k)
value_list.append(v)
return {
"chld": smart_join("", *place_list),
"_mapdata": value_list
}
#
# {% axis %}
#
@register.tag
def axis(parser, token):
bits = token.split_contents()
if len(bits) == 2:
# {% axis <side> %} ... {% endaxis %}
name, side = bits
nodelist = parser.parse("end%s" % name)
parser.delete_first_token()
return AxisNode(template.Variable(side), nodelist)
elif len(bits) == 3:
# {% axis <side> hide %}
name, side = bits[0:2]
if bits[2].lower() != "hide":
raise template.TemplateSyntaxError("%s tag expected 'hide' as last argument" % name)
return NoAxisNode(template.Variable(side))
else:
raise template.TemplateSyntaxError("axis tag takes one or two arguments")
class AxisNode(template.Node):
sides = {
'left': 'y',
'right': 'r',
'top': 't',
'bottom': 'x',
}
def __init__(self, side, nodelist=None):
self.side = side
self.nodelist = nodelist
def render(self, context):
return ''
def resolve(self, context):
axis = self.get_axis(context)
for node in self.nodelist:
if isinstance(node, AxisOptionNode):
node.update_options(axis.options, context)
return axis
def get_axis(self, context):
try:
side = self.side.resolve(context)
except template.VariableDoesNotExist:
return None
side = self.sides.get(side, side)
return Axis(side)
class NoAxisNode(AxisNode):
def resolve(self, context):
axis = self.get_axis(context)
axis.options["chxs"] = "%s,000000,11,0,_"
axis.options["chxl"] = "%s:||"
return axis
class Axis(object):
def __init__(self, side):
self.side = side
self.options = SortedDict()
# Axis options use %s placeholders for the axis index; this gets
# filled in by Chart.url()
@option("axis-labels", nodeclass=AxisOptionNode)
def axis_labels(*labels):
return {"chxl": "%s:|" + smart_join("|", *flatten(labels))}
@option("axis-label-positions", nodeclass=AxisOptionNode)
def axis_label_position(*positions):
return {"chxp": smart_join(",", "%s", *flatten(positions))}
@option("axis-range", nodeclass=AxisOptionNode)
def axis_range(start, end):
return {"chxr": "%%s,%s,%s" % (start, end)}
alignments = {
'left': -1,
'right': 1,
'center': 0,
}
@option("axis-style", nodeclass=AxisOptionNode)
def axis_style(color, font_size=None, alignment=None):
alignment = alignments.get(alignment, alignment)
return {"chxs": smart_join(",", "%s", color, font_size, alignment)}
#
# "Metadata" nodes
#
class MetadataNode(ChartOptionNode):
def update_chart(self, chart, context):
self.callback(chart, *self.resolve_arguments(context))
@option("chart-data-range", nodeclass=MetadataNode)
def chart_data_range(chart, lower=None, upper=None):
if lower and upper:
try:
map(float, (lower, upper))
except ValueError:
return
chart.datarange = (lower, upper)
elif lower == "auto":
chart.datarange = None
@option("chart-alt", nodeclass=MetadataNode)
def chart_alt(chart, alt=None):
chart.alt = alt
#
# Helper functions
#
extended_separator = ","
def encode_text(values):
return extended_separator.join(str(v) for v in values)
def encode_extended(values, value_range):
"""Encode data using Google's "extended" encoding for the most granularity."""
return "".join(num2chars(v, value_range) for v in values)
_encoding_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-."
_num2chars = [a+b for a in _encoding_chars for b in _encoding_chars]
def num2chars(n, value_range):
if n is not None:
return _num2chars[norm(n, value_range)]
else:
return '__'
def norm(n, value_range):
minvalue, maxvalue = value_range
if minvalue >= 0:
return int(round(float(n) / maxvalue * 4095, 0))
elif maxvalue <= 0:
return 4095 - int(round(float(n) * 4095 / minvalue))
else:
return int(round((n - minvalue) * (float(4095) / (maxvalue - minvalue))))
def safefloat(n):
try:
return float(n)
except (TypeError, ValueError):
return None
def smart_join(sep, *args):
return sep.join(smart_str(s, errors="ignore") for s in args if s is not None)
# I'm annoyed with the fact the urllib.urlencode doesn't allow specifying
# "safe" characters -- specifically ":", ",", and "|" since those characters
# make reading gchart URLs much easier.
from urllib import quote_plus
def urlencode(query, safe="/:,|"):
q = lambda v: quote_plus(v, safe=safe)
query = getattr(query, 'items', lambda: query)()
qlist = ["%s=%s" % (q(k), q(v)) for (k,v) in query]
return "&".join(qlist)
def flatten(iterator):
for i in iterator:
if hasattr(i, "__iter__"):
for j in flatten(iter(i)):
yield j
else:
yield i
|
|
import json
import os
import random
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from functools import partial
from urlparse import SplitResult, urlsplit, urlunsplit
from django import forms, test
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.files.storage import default_storage as storage
from django.core.urlresolvers import reverse
from django.test.client import Client, RequestFactory
from django.utils import translation
from django.utils.translation import trans_real
import caching
import elasticsearch
import mock
import tower
from dateutil.parser import parse as dateutil_parser
from django_browserid.tests import mock_browserid
from nose.exc import SkipTest
from nose.tools import eq_
from pyquery import PyQuery as pq
from redisutils import mock_redis, reset_redis
from waffle import cache_sample, cache_switch
from waffle.models import Flag, Sample, Switch
import mkt
from lib.es.management.commands import reindex
from lib.post_request_task import task as post_request_task
from mkt.access.acl import check_ownership
from mkt.access.models import Group, GroupUser
from mkt.constants import regions
from mkt.constants.payments import PROVIDER_REFERENCE
from mkt.files.helpers import copyfileobj
from mkt.prices.models import AddonPremium, Price, PriceCurrency
from mkt.search.indexers import BaseIndexer
from mkt.site.fixtures import fixture
from mkt.site.utils import app_factory
from mkt.translations.hold import clean_translations
from mkt.translations.models import delete_translation, Translation
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
from mkt.webapps.tasks import unindex_webapps
# We might now have gettext available in jinja2.env.globals when running tests.
# It's only added to the globals when activating a language with tower (which
# is usually done in the middlewares). During tests, however, we might not be
# running middlewares, and thus not activating a language, and thus not
# installing gettext in the globals, and thus not have it in the context when
# rendering templates.
tower.activate('en-us')
class DynamicBoolFieldsTestMixin():
def setUp(self):
"""
Create an instance of the DynamicBoolFields model and call super
on the inheriting setUp.
(e.g. RatingDescriptors.objects.create(addon=self.app))
"""
self.app = app_factory()
self.model = None
self.related_name = '' # Related name of the bool table on the Webapp.
self.BOOL_DICT = []
self.flags = [] # Flag names.
self.expected = [] # Translation names.
def _get_related_bool_obj(self):
return getattr(self.app, self.related_name)
def _flag(self):
"""Flag app with a handful of flags for testing."""
self._get_related_bool_obj().update(
**dict(('has_%s' % f.lower(), True) for f in self.flags))
def _check(self, obj=None):
if not obj:
obj = self._get_related_bool_obj()
for bool_name in self.BOOL_DICT:
field = 'has_%s' % bool_name.lower()
value = bool_name in self.flags
if isinstance(obj, dict):
eq_(obj[field], value,
u'Unexpected value for field: %s' % field)
else:
eq_(getattr(obj, field), value,
u'Unexpected value for field: %s' % field)
def to_unicode(self, items):
"""
Force unicode evaluation of lazy items in the passed list, for set
comparison to a list of already-evaluated unicode strings.
"""
return [unicode(i) for i in items]
def test_bools_set(self):
self._flag()
self._check()
def test_to_dict(self):
self._flag()
self._check(self._get_related_bool_obj().to_dict())
def test_default_false(self):
obj = self.model(addon=self.app)
eq_(getattr(obj, 'has_%s' % self.flags[0].lower()), False)
def formset(*args, **kw):
"""
Build up a formset-happy POST.
*args is a sequence of forms going into the formset.
prefix and initial_count can be set in **kw.
"""
prefix = kw.pop('prefix', 'form')
total_count = kw.pop('total_count', len(args))
initial_count = kw.pop('initial_count', len(args))
data = {prefix + '-TOTAL_FORMS': total_count,
prefix + '-INITIAL_FORMS': initial_count}
for idx, d in enumerate(args):
data.update(('%s-%s-%s' % (prefix, idx, k), v)
for k, v in d.items())
data.update(kw)
return data
def initial(form):
"""Gather initial data from the form into a dict."""
data = {}
for name, field in form.fields.items():
if form.is_bound:
data[name] = form[name].data
else:
data[name] = form.initial.get(name, field.initial)
# The browser sends nothing for an unchecked checkbox.
if isinstance(field, forms.BooleanField):
val = field.to_python(data[name])
if not val:
del data[name]
return data
def check_links(expected, elements, selected=None, verify=True):
"""Useful for comparing an `expected` list of links against PyQuery
`elements`. Expected format of links is a list of tuples, like so:
[
('Home', '/'),
('Extensions', reverse('browse.extensions')),
...
]
If you'd like to check if a particular item in the list is selected,
pass as `selected` the title of the link.
Links are verified by default.
"""
for idx, item in enumerate(expected):
# List item could be `(text, link)`.
if isinstance(item, tuple):
text, link = item
# Or list item could be `link`.
elif isinstance(item, basestring):
text, link = None, item
e = elements.eq(idx)
if text is not None:
eq_(e.text(), text)
if link is not None:
# If we passed an <li>, try to find an <a>.
if not e.filter('a'):
e = e.find('a')
eq_(e.attr('href'), link)
if verify and link != '#':
eq_(Client().head(link, follow=True).status_code, 200,
'%r is dead' % link)
if text is not None and selected is not None:
e = e.filter('.selected, .sel') or e.parents('.selected, .sel')
eq_(bool(e.length), text == selected)
class RedisTest(object):
"""Mixin for when you need to mock redis for testing."""
def _pre_setup(self):
self._redis = mock_redis()
super(RedisTest, self)._pre_setup()
def _post_teardown(self):
super(RedisTest, self)._post_teardown()
reset_redis(self._redis)
class TestClient(Client):
def __getattr__(self, name):
"""
Provides get_ajax, post_ajax, head_ajax methods etc in the
test_client so that you don't need to specify the headers.
"""
if name.endswith('_ajax'):
method = getattr(self, name.split('_')[0])
return partial(method, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
else:
raise AttributeError
class _JSONifiedResponse(object):
def __init__(self, response):
self._orig_response = response
def __getattr__(self, n):
return getattr(self._orig_response, n)
def __getitem__(self, n):
return self._orig_response[n]
def __iter__(self):
return iter(self._orig_response)
@property
def json(self):
"""Will return parsed JSON on response if there is any."""
if self.content and 'application/json' in self['Content-Type']:
if not hasattr(self, '_content_json'):
self._content_json = json.loads(self.content)
return self._content_json
class JSONClient(TestClient):
def _with_json(self, response):
if hasattr(response, 'json'):
return response
else:
return _JSONifiedResponse(response)
def get(self, *args, **kw):
return self._with_json(super(JSONClient, self).get(*args, **kw))
def delete(self, *args, **kw):
return self._with_json(super(JSONClient, self).delete(*args, **kw))
def post(self, *args, **kw):
return self._with_json(super(JSONClient, self).post(*args, **kw))
def put(self, *args, **kw):
return self._with_json(super(JSONClient, self).put(*args, **kw))
def patch(self, *args, **kw):
return self._with_json(super(JSONClient, self).patch(*args, **kw))
def options(self, *args, **kw):
return self._with_json(super(JSONClient, self).options(*args, **kw))
ES_patchers = [mock.patch('elasticsearch.Elasticsearch'),
mock.patch('mkt.webapps.indexers.WebappIndexer', spec=True),
mock.patch('mkt.search.indexers.index', spec=True),
mock.patch('mkt.search.indexers.Reindexing', spec=True,
side_effect=lambda i: [i])]
def start_es_mock():
for patch in ES_patchers:
patch.start()
def stop_es_mock():
for patch in ES_patchers:
patch.stop()
# Reset cached Elasticsearch objects.
BaseIndexer._es = {}
def days_ago(days):
return datetime.now().replace(microsecond=0) - timedelta(days=days)
class MockEsMixin(object):
mock_es = True
@classmethod
def setUpClass(cls):
if cls.mock_es:
start_es_mock()
try:
super(MockEsMixin, cls).setUpClass()
except Exception:
# We need to unpatch here because tearDownClass will not be
# called.
if cls.mock_es:
stop_es_mock()
raise
@classmethod
def tearDownClass(cls):
try:
super(MockEsMixin, cls).tearDownClass()
finally:
if cls.mock_es:
stop_es_mock()
class MockBrowserIdMixin(object):
def mock_browser_id(self):
cache.clear()
# Override django-cache-machine caching.base.TIMEOUT because it's
# computed too early, before settings_test.py is imported.
caching.base.TIMEOUT = settings.CACHE_COUNT_TIMEOUT
real_login = self.client.login
def fake_login(email, password=None):
with mock_browserid(email=email):
return real_login(email=email, assertion='test',
audience='test')
self.client.login = fake_login
def login(self, profile):
email = getattr(profile, 'email', profile)
if '@' not in email:
email += '@mozilla.com'
assert self.client.login(email=email, password='password')
JINJA_INSTRUMENTED = False
class TestCase(MockEsMixin, RedisTest, MockBrowserIdMixin, test.TestCase):
"""Base class for all mkt tests."""
client_class = TestClient
def shortDescription(self):
# Stop nose using the test docstring and instead the test method name.
pass
def _pre_setup(self):
super(TestCase, self)._pre_setup()
# Clean the slate.
cache.clear()
post_request_task._discard_tasks()
trans_real.deactivate()
trans_real._translations = {} # Django fails to clear this cache.
trans_real.activate(settings.LANGUAGE_CODE)
self.mock_browser_id()
global JINJA_INSTRUMENTED
if not JINJA_INSTRUMENTED:
import jinja2
old_render = jinja2.Template.render
def instrumented_render(self, *args, **kwargs):
context = dict(*args, **kwargs)
test.signals.template_rendered.send(sender=self, template=self,
context=context)
return old_render(self, *args, **kwargs)
jinja2.Template.render = instrumented_render
JINJA_INSTRUMENTED = True
def _post_teardown(self):
mkt.set_user(None)
clean_translations(None) # Make sure queued translations are removed.
super(TestCase, self)._post_teardown()
@contextmanager
def activate(self, locale=None):
"""Active a locale."""
old_locale = translation.get_language()
if locale:
translation.activate(locale)
yield
translation.activate(old_locale)
def assertNoFormErrors(self, response):
"""Asserts that no form in the context has errors.
If you add this check before checking the status code of the response
you'll see a more informative error.
"""
# TODO(Kumar) liberate upstream to Django?
if response.context is None:
# It's probably a redirect.
return
if len(response.templates) == 1:
tpl = [response.context]
else:
# There are multiple contexts so iter all of them.
tpl = response.context
for ctx in tpl:
for k, v in ctx.iteritems():
if isinstance(v, (forms.BaseForm, forms.formsets.BaseFormSet)):
if isinstance(v, forms.formsets.BaseFormSet):
# Concatenate errors from each form in the formset.
msg = '\n'.join(f.errors.as_text() for f in v.forms)
else:
# Otherwise, just return the errors for this form.
msg = v.errors.as_text()
msg = msg.strip()
if msg != '':
self.fail('form %r had the following error(s):\n%s'
% (k, msg))
if hasattr(v, 'non_field_errors'):
self.assertEquals(v.non_field_errors(), [])
if hasattr(v, 'non_form_errors'):
self.assertEquals(v.non_form_errors(), [])
def assertLoginRedirects(self, response, to, status_code=302):
# Not using urlparams, because that escapes the variables, which
# is good, but bad for assertRedirects which will fail.
self.assert3xx(response,
'%s?to=%s' % (reverse('users.login'), to), status_code)
def assert3xx(self, response, expected_url, status_code=302,
target_status_code=200):
"""Asserts redirect and final redirect matches expected URL.
Similar to Django's `assertRedirects` but skips the final GET
verification for speed.
"""
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
"Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
"Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
"Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if (scheme and not e_scheme) and (netloc and not e_netloc):
expected_url = urlunsplit(('http', 'testserver', e_path, e_query,
e_fragment))
self.assertEqual(
url, expected_url,
"Response redirected to '%s', expected '%s'" % (url, expected_url))
def assertLoginRequired(self, response, status_code=302):
"""
A simpler version of assertLoginRedirects that just checks that we
get the matched status code and bounced to the correct login page.
"""
assert response.status_code == status_code, (
'Response returned: %s, expected: %s'
% (response.status_code, status_code))
path = urlsplit(response['Location'])[2]
assert path == reverse('users.login'), (
'Redirected to: %s, expected: %s'
% (path, reverse('users.login')))
def assertSetEqual(self, a, b, message=None):
"""
This is a thing in unittest in 2.7,
but until then this is the thing.
Oh, and Django's `assertSetEqual` is lame and requires actual sets:
http://bit.ly/RO9sTr
"""
eq_(set(a), set(b), message)
eq_(len(a), len(b), message)
def assertCloseToNow(self, dt, now=None):
"""
Make sure the datetime is within a minute from `now`.
"""
# Try parsing the string if it's not a datetime.
if isinstance(dt, basestring):
try:
dt = dateutil_parser(dt)
except ValueError, e:
raise AssertionError(
'Expected valid date; got %s\n%s' % (dt, e))
if not dt:
raise AssertionError('Expected datetime; got %s' % dt)
dt_later_ts = time.mktime((dt + timedelta(minutes=1)).timetuple())
dt_earlier_ts = time.mktime((dt - timedelta(minutes=1)).timetuple())
if not now:
now = datetime.now()
now_ts = time.mktime(now.timetuple())
assert dt_earlier_ts < now_ts < dt_later_ts, (
'Expected datetime to be within a minute of %s. Got %r.' % (now,
dt))
def assertCORS(self, res, *verbs, **kw):
"""
Determines if a response has suitable CORS headers. Appends 'OPTIONS'
on to the list of verbs.
"""
headers = kw.pop('headers', None)
if not headers:
headers = ['X-HTTP-Method-Override', 'Content-Type']
eq_(res['Access-Control-Allow-Origin'], '*')
assert 'API-Status' in res['Access-Control-Expose-Headers']
assert 'API-Version' in res['Access-Control-Expose-Headers']
verbs = map(str.upper, verbs) + ['OPTIONS']
actual = res['Access-Control-Allow-Methods'].split(', ')
self.assertSetEqual(verbs, actual)
eq_(res['Access-Control-Allow-Headers'], ', '.join(headers))
def assertApiUrlEqual(self, *args, **kwargs):
"""
Allows equality comparison of two or more URLs agnostic of API version.
This is done by prepending '/api/vx' (where x is equal to the `version`
keyword argument or API_CURRENT_VERSION) to each string passed as a
positional argument if that URL doesn't already start with that string.
Also accepts 'netloc' and 'scheme' optional keyword arguments to
compare absolute URLs.
Example usage:
url = '/api/v1/apps/app/bastacorp/'
self.assertApiUrlEqual(url, '/apps/app/bastacorp1/')
# settings.API_CURRENT_VERSION = 2
url = '/api/v1/apps/app/bastacorp/'
self.assertApiUrlEqual(url, '/apps/app/bastacorp/', version=1)
"""
# Constants for the positions of the URL components in the tuple
# returned by urlsplit. Only here for readability purposes.
SCHEME = 0
NETLOC = 1
PATH = 2
version = kwargs.get('version', settings.API_CURRENT_VERSION)
scheme = kwargs.get('scheme', None)
netloc = kwargs.get('netloc', None)
urls = list(args)
prefix = '/api/v%d' % version
for idx, url in enumerate(urls):
urls[idx] = list(urlsplit(url))
if not urls[idx][PATH].startswith(prefix):
urls[idx][PATH] = prefix + urls[idx][PATH]
if scheme and not urls[idx][SCHEME]:
urls[idx][SCHEME] = scheme
if netloc and not urls[idx][NETLOC]:
urls[idx][NETLOC] = netloc
urls[idx] = SplitResult(*urls[idx])
eq_(*urls)
def make_price(self, price='1.00'):
price_obj, created = Price.objects.get_or_create(price=price,
name='1')
for region in [regions.USA.id, regions.RESTOFWORLD.id]:
PriceCurrency.objects.create(region=region, currency='USD',
price=price, tier=price_obj,
provider=PROVIDER_REFERENCE)
# Call Price transformer in order to repopulate _currencies cache.
Price.transformer([])
return price_obj
def make_premium(self, addon, price='1.00'):
price_obj = self.make_price(price=Decimal(price))
addon.update(premium_type=mkt.ADDON_PREMIUM)
addon._premium = AddonPremium.objects.create(addon=addon,
price=price_obj)
if hasattr(Price, '_currencies'):
del Price._currencies
return addon._premium
def create_sample(self, name=None, db=False, **kw):
if name is not None:
kw['name'] = name
kw.setdefault('percent', 100)
sample = Sample(**kw)
sample.save() if db else cache_sample(instance=sample)
return sample
def create_switch(self, name=None, db=False, **kw):
kw.setdefault('active', True)
if name is not None:
kw['name'] = name
switch = Switch(**kw)
switch.save() if db else cache_switch(instance=switch)
return switch
def create_flag(self, name=None, **kw):
if name is not None:
kw['name'] = name
kw.setdefault('everyone', True)
return Flag.objects.create(**kw)
@staticmethod
def grant_permission(user_obj, rules, name='Test Group'):
"""Creates group with rule, and adds user to group."""
group = Group.objects.create(name=name, rules=rules)
GroupUser.objects.create(group=group, user=user_obj)
return group
def remove_permission(self, user_obj, rules):
"""Remove a permission from a user."""
group = Group.objects.get(rules=rules)
GroupUser.objects.filter(user=user_obj, group=group).delete()
def days_ago(self, days):
return days_ago(days)
def trans_eq(self, trans, locale, localized_string):
eq_(Translation.objects.get(id=trans.id,
locale=locale).localized_string,
localized_string)
def extract_script_template(self, html, template_selector):
"""Extracts the inner JavaScript text/template from a html page.
Example::
>>> template = extract_script_template(res.content, '#template-id')
>>> template('#my-jquery-selector')
Returns a PyQuery object that you can refine using jQuery selectors.
"""
return pq(pq(html)(template_selector).html())
class MktPaths(object):
"""Mixin for getting common Marketplace Paths."""
def manifest_path(self, name):
return os.path.join(settings.ROOT,
'mkt/submit/tests/webapps/%s' % name)
def manifest_copy_over(self, dest, name):
with storage.open(dest, 'wb') as f:
copyfileobj(open(self.manifest_path(name)), f)
@staticmethod
def sample_key():
return os.path.join(settings.ROOT,
'mkt/webapps/tests/sample.key')
def sample_packaged_key(self):
return os.path.join(settings.ROOT,
'mkt/webapps/tests/sample.packaged.pem')
def mozball_image(self):
return os.path.join(settings.ROOT,
'mkt/developers/tests/addons/mozball-128.png')
def packaged_app_path(self, name):
return os.path.join(
settings.ROOT, 'mkt/submit/tests/packaged/%s' % name)
def packaged_copy_over(self, dest, name):
with storage.open(dest, 'wb') as f:
copyfileobj(open(self.packaged_app_path(name)), f)
def assert_no_validation_errors(validation):
"""Assert that the validation (JSON) does not contain a traceback.
Note that this does not test whether the addon passed
validation or not.
"""
if hasattr(validation, 'task_error'):
# FileUpload object:
error = validation.task_error
else:
# Upload detail - JSON output
error = validation['error']
if error:
print '-' * 70
print error
print '-' * 70
raise AssertionError("Unexpected task error: %s" %
error.rstrip().split("\n")[-1])
def _get_created(created):
"""
Returns a datetime.
If `created` is "now", it returns `datetime.datetime.now()`. If `created`
is set use that. Otherwise generate a random datetime in the year 2011.
"""
if created == 'now':
return datetime.now()
elif created:
return created
else:
return datetime(2011,
random.randint(1, 12), # Month
random.randint(1, 28), # Day
random.randint(0, 23), # Hour
random.randint(0, 59), # Minute
random.randint(0, 59)) # Seconds
def req_factory_factory(url='', user=None, post=False, data=None, **kwargs):
"""Creates a request factory, logged in with the user."""
req = RequestFactory()
if post:
req = req.post(url, data or {})
else:
req = req.get(url, data or {})
if user:
req.user = UserProfile.objects.get(id=user.id)
req.groups = user.groups.all()
else:
req.user = AnonymousUser()
req.check_ownership = partial(check_ownership, req)
req.REGION = kwargs.pop('region', mkt.regions.REGIONS_CHOICES[0][1])
req.API_VERSION = 2
for key in kwargs:
setattr(req, key, kwargs[key])
return req
user_factory_counter = 0
def user_factory(**kw):
"""
If not provided, email will be 'factoryuser<number>@mozilla.com'.
If email has no '@' it will be corrected to 'email@mozilla.com'
"""
global user_factory_counter
email = kw.pop('email', 'factoryuser%d' % user_factory_counter)
if '@' not in email:
email = '%s@mozilla.com' % email
user = UserProfile.objects.create(email=email, **kw)
if 'email' not in kw:
user_factory_counter = user.id + 1
return user
class ESTestCase(TestCase):
"""Base class for tests that require elasticsearch."""
# ES is slow to set up so this uses class setup/teardown. That happens
# outside Django transactions so be careful to clean up afterwards.
test_es = True
mock_es = False
exempt_from_fixture_bundling = True # ES doesn't support bundling (yet?)
@classmethod
def setUpClass(cls):
if not settings.RUN_ES_TESTS:
raise SkipTest('ES disabled')
cls.es = elasticsearch.Elasticsearch(hosts=settings.ES_HOSTS)
# The ES setting are set before we call super()
# because we may have indexation occuring in upper classes.
for key, index in settings.ES_INDEXES.items():
if not index.startswith('test_'):
settings.ES_INDEXES[key] = 'test_%s' % index
super(ESTestCase, cls).setUpClass()
try:
cls.es.cluster.health()
except Exception, e:
e.args = tuple([u'%s (it looks like ES is not running, '
'try starting it or set RUN_ES_TESTS=False)'
% e.args[0]] + list(e.args[1:]))
raise
cls._SEARCH_ANALYZER_MAP = mkt.SEARCH_ANALYZER_MAP
mkt.SEARCH_ANALYZER_MAP = {
'english': ['en-us'],
'spanish': ['es'],
}
for index in set(settings.ES_INDEXES.values()):
# Get the index that's pointed to by the alias.
try:
indices = cls.es.indices.get_aliases(index=index)
assert indices[index]['aliases']
except (KeyError, AssertionError):
# There's no alias, just use the index.
print 'Found no alias for %s.' % index
except elasticsearch.NotFoundError:
pass
# Remove any alias as well.
try:
cls.es.indices.delete(index=index)
except elasticsearch.NotFoundError as e:
print 'Could not delete index %r: %s' % (index, e)
for index, indexer, batch in reindex.INDEXES:
indexer.setup_mapping()
@classmethod
def tearDownClass(cls):
try:
if hasattr(cls, '_addons'):
addons = Webapp.objects.filter(
pk__in=[a.id for a in cls._addons])
# First delete all the translations.
for addon in addons:
for field in addon._meta.translated_fields:
delete_translation(addon, field.name)
addons.delete()
unindex_webapps([a.id for a in cls._addons])
mkt.SEARCH_ANALYZER_MAP = cls._SEARCH_ANALYZER_MAP
finally:
# Make sure we're calling super's tearDownClass even if something
# went wrong in the code above, as otherwise we'd run into bug
# 960598.
super(ESTestCase, cls).tearDownClass()
def tearDown(self):
post_request_task._send_tasks()
super(ESTestCase, self).tearDown()
@classmethod
def setUpIndex(cls):
cls.refresh()
@classmethod
def refresh(cls, doctype='webapp', timesleep=0):
post_request_task._send_tasks()
index = settings.ES_INDEXES[doctype]
try:
cls.es.indices.refresh(index=index)
except elasticsearch.NotFoundError as e:
print "Could not refresh index '%s': %s" % (index, e)
@classmethod
def reindex(cls, model, index='default'):
# Emit post-save signal so all of the objects get reindexed.
[o.save() for o in model.objects.all()]
cls.refresh(index)
class WebappTestCase(TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = self.get_app()
def get_app(self):
return Webapp.objects.get(id=337141)
def make_game(self, app=None, rated=False):
app = make_game(self.app or app, rated)
def make_game(app, rated):
app.update(categories=['games'])
if rated:
make_rated(app)
app = app.reload()
return app
def make_rated(app):
app.set_content_ratings(
dict((body, body.ratings[0]) for body in
mkt.ratingsbodies.ALL_RATINGS_BODIES))
app.set_iarc_info(123, 'abc')
app.set_descriptors([])
app.set_interactives([])
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd or "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 4:
continue
name = pkg_infos[5]
name = name.lstrip('[').rstrip(']')
print "name is: %s" % name
if pkg_name == name:
test_pkg_id = pkg_infos[3]
test_pkg_id = test_pkg_id.lstrip('[').rstrip(']')
print test_pkg_id
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t xpk -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".xpk"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
#########################################################################
# Copyright 2011 Cloud Sidekick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#########################################################################
# This file is a derived work from Eucalyptus
# euca2ools/euca2ools/commands/eucacommands.py
# released under the BSD license
# original copyright:
# (c) 2009-2011, Eucalyptus Systems, Inc.`
# original authors:
# Neil Soman neil@eucalyptus.com
# Mitch Garnaat mgarnaat@eucalyptus.com
import getopt
import os
import sys
import textwrap
import urlparse
import hashlib
import base64
import hmac
import urllib
import urllib2
import httplib
import json
from urlparse import urlparse
from datetime import datetime
from catoclient.param import Param
try:
import xml.etree.cElementTree as ET
except (AttributeError, ImportError):
import xml.etree.ElementTree as ET
class CatoCommand(object):
Description = ''
API = ''
Examples = ''
Info = ''
StandardOptions = [
Param(name='access_key',
short_name='A', long_name='access-key',
doc="A defined username.",
optional=True),
Param(name='secret_key',
short_name='S', long_name='secret-key',
doc="A valid password.",
optional=True),
Param(name='config_file',
short_name='C', long_name='config',
doc="""Read credentials and URL from the specified json formatted config file. If a config file and
-A, -U or -S flags are used on the same command, the flag option parameters take precendence""",
optional=True),
Param(name='output_format', short_name='F', long_name='format',
doc='The output format. (default=text, values=xml/json.)',
optional=True, ptype='string', choices=['text', 'json', 'xml']),
Param(name='output_delimiter', short_name='L', long_name='output_delimiter',
doc='Delimiter for "Text" output format. (Default is TAB)',
optional=True, ptype='string'),
Param(name='debug', short_name='D', long_name='debug',
doc='Turn on debugging output.',
optional=True, ptype='boolean'),
Param(name='help', short_name='H', long_name='help',
doc='Display this help message.',
optional=True, ptype='boolean'),
Param(name='url', short_name='U', long_name='url',
doc='URL of the REST API endpoint. E.g.: http://address:port',
optional=True),
Param(name='force', long_name='force',
doc='Force "yes" on "Are you sure?" prompts.',
optional=True, ptype='boolean'),
Param(name='noheader', long_name='noheader',
doc='For "text" output format, omit the column header.',
optional=True, ptype='boolean'),
Param(name='dumpdoc', long_name='dumpdoc',
doc='Writes documentation for the command in Markdown format.',
optional=True, ptype='boolean'),
Param(name='api', long_name='api',
doc='Identifies the API endpoint associated with this command.',
optional=True, ptype='boolean')
]
Options = []
Args = []
def __init__(self, debug=False):
self.access_key = None
self.secret_key = None
self.url = None
self.config_file_name = None
self.debug = 0
self.force = False
self.set_debug(debug)
self.cmd_name = os.path.basename(sys.argv[0])
self.process_cli_args()
# if there's a config file, we read it.
# any required values not explicitly specified on the command line,
# are read from the config file.
# there's a default file ".catoclient.conf", and you can override with the "config_file" argument
configargs = None
cfn = None
if self.config_file_name:
cfn = self.config_file_name
else:
cfn = "%s/.catoclient.conf" % os.path.expanduser("~")
try:
with open(cfn, 'r') as f_in:
if f_in:
configargs = json.loads(f_in.read())
except IOError:
# if the file doesn't exist, warn and exit (but continue if there's no default config file).
if cfn != "%s/.catoclient.conf" % os.path.expanduser("~"):
print("The specified config file (%s) could not be found." % cfn)
self.error_exit()
else:
if self.debug:
print("The default config file (%s) could not be found." % cfn)
except ValueError:
# if the format of either file is bad, bark about it
print("The specified config file (%s) json format is invalid." % cfn)
self.error_exit()
if configargs:
# loop through the settings
for k, v in configargs.items():
if hasattr(self, k):
if not getattr(self, k):
setattr(self, k, v)
# since the args can come from different sources, we have to explicitly check the required ones.
if not self.url:
print("URL is required, either via --url or in a config file.")
self.error_exit()
if not self.access_key:
print("Access Key is required, either via --access-key or in a config file.")
self.error_exit()
if not self.secret_key:
print("Secret Key is required, either via --secret-key or in a config file.")
self.error_exit()
def set_debug(self, debug=False):
if debug:
self.debug = 2
def set_force(self, force=True):
if force:
self.force = True
def process_cli_args(self):
try:
(opts, args) = getopt.gnu_getopt(sys.argv[1:],
self.short_options(),
self.long_options())
except getopt.GetoptError, e:
print(e)
sys.exit(1)
for (name, value) in opts:
if name in ('-H', '--help'):
self.usage()
sys.exit()
elif name == '--dumpdoc':
self.dumpdoc()
sys.exit()
elif name == '--api':
print self.API
sys.exit()
elif name in ('-D', '--debug'):
self.set_debug(True)
elif name in ('-C', '--config'):
self.config_file_name = value
else:
option = self.find_option(name)
if option:
try:
value = option.convert(value)
except:
msg = '%s should be of type %s' % (option.long_name,
option.ptype)
self.display_error_and_exit(msg)
if option.choices:
if value not in option.choices:
msg = '%s value must be one of: %s' % (option.long_name, '|'.join(["%s" % str(x) for x in option.choices]))
self.display_error_and_exit(msg)
if option.cardinality in ('*', '+'):
if not hasattr(self, option.name):
setattr(self, option.name, [])
getattr(self, option.name).append(value)
else:
setattr(self, option.name, value)
self.handle_defaults()
self.check_required_options()
for arg in self.Args:
if not arg.optional and len(args) == 0:
self.usage()
msg = 'Argument (%s) was not provided' % arg.name
self.display_error_and_exit(msg)
if arg.cardinality in ('*', '+'):
setattr(self, arg.name, args)
elif arg.cardinality == 1:
if len(args) == 0 and arg.optional:
continue
try:
value = arg.convert(args[0])
except:
msg = '%s should be of type %s' % (arg.name,
arg.ptype)
setattr(self, arg.name, value)
if len(args) > 1:
msg = 'Only 1 argument (%s) permitted' % arg.name
self.display_error_and_exit(msg)
# def check_for_conflict(self):
# for option in self.Options:
# if option.short_name == 'a' or option.short_name == 's':
# self.access_key_short_name = '-A'
# self.secret_key_short_name = '-S'
# opt = self.find_option('--access-key')
# opt.short_name = 'A'
# opt = self.find_option('--secret-key')
# opt.short_name = 'S'
def find_option(self, op_name):
for option in self.StandardOptions + self.Options:
if option.synopsis_short_name == op_name or option.synopsis_long_name == op_name:
return option
return None
def short_options(self):
s = ''
for option in self.StandardOptions + self.Options:
if option.short_name:
s += option.getopt_short_name
return s
def long_options(self):
l = []
for option in self.StandardOptions + self.Options:
if option.long_name:
l.append(option.getopt_long_name)
return l
def required(self):
return [ opt for opt in self.StandardOptions + self.Options if not opt.optional ]
def required_args(self):
return [ arg for arg in self.Args if not arg.optional ]
def optional(self):
return [ opt for opt in self.StandardOptions + self.Options if opt.optional ]
def optional_args(self):
return [ arg for arg in self.Args if arg.optional ]
def handle_defaults(self):
for option in self.Options + self.Args:
if not hasattr(self, option.name):
value = option.default
if value is None and option.cardinality in ('+', '*'):
value = []
elif value is None and option.ptype == 'boolean':
value = False
elif value is None and option.ptype == 'integer':
value = 0
setattr(self, option.name, value)
def check_required_options(self):
missing = []
for option in self.required():
if not hasattr(self, option.name) or getattr(self, option.name) is None:
missing.append(option.long_name)
if missing:
msg = 'These required options are missing: %s' % ','.join(missing)
self.display_error_and_exit(msg)
def param_usage(self, plist, label, n=25):
nn = 80 - n - 13
if plist:
print(' %s' % label)
for opt in plist:
names = []
if opt.short_name:
names.append(opt.synopsis_short_name)
if opt.long_name:
names.append(opt.synopsis_long_name)
if not names:
names.append(opt.name)
doc = textwrap.dedent(opt.doc)
doclines = textwrap.wrap(doc, nn)
if opt.choices:
vv = 'Valid Values: %s' % '|'.join(["%s" % str(x) for x in opt.choices])
doclines += textwrap.wrap(vv, nn)
if doclines:
print(' %s%s' % (','.join(names).ljust(n), doclines[0]))
for line in doclines[1:]:
print('%s%s' % (' ' * (n + 13), line))
def option_synopsis(self, options):
s = ''
for option in options:
names = []
if option.short_name:
names.append(option.synopsis_short_name)
if option.long_name:
names.append(option.synopsis_long_name)
if option.optional:
s += '['
s += ', '.join(names)
if option.ptype != 'boolean':
if option.metavar:
n = option.metavar
elif option.name:
n = option.name
else:
n = option.long_name
s += ' <%s> ' % n
if option.optional:
s += ']'
return s
def synopsis(self):
s = '%s ' % self.cmd_name
n = len(s) + 1
t = ''
t += self.option_synopsis(self.required())
t += self.option_synopsis(self.optional())
if self.Args:
t += ' '
arg_names = []
for arg in self.Args:
name = arg.name
if arg.optional:
name = '[ %s ]' % name
arg_names.append(name)
t += ' '.join(arg_names)
lines = textwrap.wrap(t, 80 - n)
print s, lines[0]
for line in lines[1:]:
print '%s%s' % (' ' * n, line)
def usage(self):
print ' %s\n' % self.Description
# self.synopsis()
self.param_usage([ opt for opt in self.Options if not opt.optional ],
'REQUIRED PARAMETERS')
self.param_usage([ opt for opt in self.Options if opt.optional ],
'OPTIONAL PARAMETERS')
self.param_usage([ opt for opt in self.StandardOptions ],
'STANDARD PARAMETERS')
if self.Info:
print self.Info
def dumpdoc(self):
print '## %s' % self.cmd_name
print '{:#%s}' % self.cmd_name
print '\n%s\n' % self.Description
self.param_usage([ opt for opt in self.Options if not opt.optional ],
'REQUIRED PARAMETERS')
self.param_usage([ opt for opt in self.Options if opt.optional ],
'OPTIONAL PARAMETERS')
if self.Info:
print self.Info
if self.Examples:
print "**Examples**"
print self.Examples
def display_error_and_exit(self, exc):
try:
print '%s: %s' % (exc.error_code, exc.error_message, exc.error_detail)
except:
print '%s' % exc
finally:
sys.exit(1)
def error_exit(self):
sys.exit(1)
def http_get(self, url, timeout=10):
try:
if not url:
return "URL not provided."
if self.debug:
print "Trying an HTTP GET to %s" % url
# for now, just use the url directly
u = urlparse(url)
if u.scheme.lower() == "https":
conn = httplib.HTTPSConnection(u.netloc, timeout=timeout)
else:
conn = httplib.HTTPConnection(u.netloc, timeout=timeout)
conn.request("GET", u.path + "?" + u.query)
response = conn.getresponse()
result = response.read()
if result:
return result
# except httplib.HTTPException as ex:
# if hasattr(ex, "reason"):
# print "HTTPGet: failed to reach a server."
# return ex.reason
# elif hasattr(ex, "code"):
# print "HTTPGet: The server couldn\'t fulfill the request."
# return ex.__str__()
# if all was well, we won't get here.
return "No results from request."
except httplib.ssl.SSLError as ex:
# a friendlier message if it was a protocol error.
raise Exception("The protocol specified in the API url property is 'https'. Is the API really running in SSL mode?")
except Exception as ex:
raise ex
def call_api(self, method, parameters):
host = self.url
key = self.access_key
pw = self.secret_key
outfmt = "text"
outdel = ""
noheader = None
# was a different output format specified?
# we limit the values to xml or json.
if hasattr(self, "output_format"):
x = getattr(self, "output_format")
if x:
if x == "xml" or x == "json":
outfmt = x
# are we using a custom delimiter?
if hasattr(self, "output_delimiter"):
x = getattr(self, "output_delimiter")
if x is not None:
outdel = x
# hide the headers in text mode?
if outfmt == "text":
noheader = getattr(self, "noheader", None)
args = {}
for param in parameters:
#if hasattr(self, param):
if getattr(self, param, None):
args[param] = getattr(self, param)
if len(args):
arglst = ["&%s=%s" % (k, urllib.quote_plus(str(v))) for k, v in args.items()]
argstr = "".join(arglst)
else:
argstr = ""
#timestamp
ts = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
ts = ts.replace(":", "%3A")
#string to sign
string_to_sign = "{0}?key={1}×tamp={2}".format(method, key, ts)
#encoded signature
sig = base64.b64encode(hmac.new(str(pw), msg=string_to_sign, digestmod=hashlib.sha256).digest())
sig = "&signature=" + urllib.quote_plus(sig)
of = "&output_format=%s" % outfmt
od = "&output_delimiter=%s" % urllib.quote_plus(outdel)
nh = "&header=false" if noheader else ""
url = "%s/%s%s%s%s%s%s" % (host, string_to_sign, sig, argstr, of, od, nh)
response = self.http_get(url)
if self.debug:
print(response)
if response:
if outfmt == "json":
try:
d = json.loads(response)
if d["ErrorCode"]:
code = d["ErrorCode"]
detail = d["ErrorDetail"]
message = d["ErrorMessage"]
msg = "%s, %s, %s" % (code, message, detail)
self.display_error_and_exit(msg)
else:
# JSON is a bit confusing...
# the entire 'payload' is json formatted, so by using json.loads above,
# we've converted THE WHOLE PAYLOAD to a python object
# However, we need to return a JSON *string* of the stuff *inside* the 'Response' property.
return json.dumps(d["Response"], indent=4)
except ValueError:
print("Response JSON could not be parsed.")
return response
except Exception as ex:
raise ex
elif outfmt == "xml":
try:
xRoot = ET.fromstring(response)
if xRoot.findtext("error/code", None):
code = xRoot.findtext("error/code", "")
detail = xRoot.findtext("error/detail", "")
message = xRoot.findtext("error/message", "")
msg = "%s, %s, %s" % (code, message, detail)
self.display_error_and_exit(msg)
else:
# the response might have inner content, or it might have just text
try:
innercontent = list(xRoot.find("response"))[0]
return ET.tostring(innercontent)
except IndexError:
return xRoot.findtext("response", "")
except ValueError:
print("Response XML could not be parsed.")
except Exception as ex:
raise ex
else:
return response
def get_relative_filename(self, filename):
return os.path.split(filename)[-1]
def get_file_path(self, filename):
# relative_filename = self.get_relative_filename(filename)
file_path = os.path.dirname(filename)
if len(file_path) == 0:
file_path = '.'
return file_path
|
|
#!/usr/bin/env python3
import csv
import ctypes
import functools
import os.path
import numpy as np
from scipy.integrate import odeint, ode
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import system
# -------------------------------------------------------------------------
# Controller implementations
class Controller (object):
''' Temperature controller base class, takes care or storing the heater
output for later plotting.
:param setpoint: The desired temperature in degrees centigrade.
'''
def __init__(self, setpoint):
self.setpoint = setpoint
self.power = []
def heater_power(self, t, T):
raise NotImplementedError()
def __call__(self, t, T):
# At T == 140 the second bimetallic thermostat (T2) disconnects
if T >= 140:
return 0
power = self.heater_power(t, T)
self.power.append((t, power))
if t >= 3600:
return 0
return power
class ControllerContext (ctypes.Structure):
_fields_ = [
('setpoint', ctypes.c_float),
('Kp', ctypes.c_float),
('Ki', ctypes.c_float),
('Kd', ctypes.c_float),
('integral', ctypes.c_float),
('previous_error', ctypes.c_float),
]
class RealPidController (Controller):
def __init__(self, setpoint, Kp, Ki, Kd):
super().__init__(setpoint)
self.lib = ctypes.cdll.LoadLibrary('./libcontroller.so')
self.lib.controller_new.restype = ControllerContext
self.lib.controller_power.restype = ctypes.c_float
self._last_t = 0
self._ctx = self.lib.controller_new(
ctypes.c_float(setpoint),
ctypes.c_float(Kp),
ctypes.c_float(Ki),
ctypes.c_float(Kd))
def heater_power(self, t, T):
dt = t - self._last_t
self._last_t = t
ratio = self.lib.controller_power(
ctypes.byref(self._ctx),
ctypes.c_float(dt), ctypes.c_float(T))
return system.heater_power * min(1, max(0, ratio))
class PidController (Controller):
def __init__(self, setpoint, Kp, Ki, Kd):
super().__init__(setpoint)
self.Kp = Kp
self.Ki = Ki
self.Kd = Kd
self._previous_t = 0
self._integral = 0
self._previous_error = 0
def heater_power(self, t, T):
dt = t - self._previous_t
self._previous_t = t
error = self.setpoint - T
self._integral += error * dt
if dt == 0:
derivative = 0
else:
derivative = (error - self._previous_error) / dt
output = self.Kp * error + \
self.Ki * self._integral + \
self.Kd * derivative
self._previous_error = error
return system.heater_power * min(1, max(0, output))
class ThresholdController (Controller):
def __init__(self, setpoint):
super().__init__(setpoint)
def heater_power(self, t, T):
return system.heater_power * (T < self.setpoint)
# -------------------------------------------------------------------------
# Solver for stiff systems
def odebdf(model, y0, t):
result = np.zeros((len(t), len(y0)))
sys = lambda y, t: model(t, y) # bdf requires swapped arguments
r = ode(sys).set_integrator('zvode', method='bdf')
r.set_initial_value(y0, t[0])
result[0] = y0
step = 1
while r.successful() and (step < len(t)):
r.integrate(t[step])
result[step] = np.real(r.y)
step += 1
return result
# -------------------------------------------------------------------------
# Tweakable simulation parameters
# Tweakable parameters
t = np.arange(0, 7200, 0.05)
setpoint = 100
Kp = 0.065
Ki = 0
Kd = 0
# Uncomment the controller to use
# controller = RealPidController(setpoint, Kp, Ki, Kd)
controller = PidController(setpoint, Kp, Ki, Kd)
# controller = ThresholdController(setpoint)
# -------------------------------------------------------------------------
# Simulate and plot
# initial conditions.
yW = 20
yM = 20
y0 = [yW, yM]
model = functools.partial(system.model, controller)
result, info_dict = odeint(model, y0, t, full_output=True)
plt.figure()
plt.plot(t, result[:, 1], label='Tm', color=(0, 0.5, 1), linewidth=1)
# plt.plot(t, result[:, 0], label='Tw', color=(1, 0.5, 0), linewidth=1.5)
# _t, _p = zip(*controller.power)
# plt.plot(_t, [_ / system.heater_power * 100 for _ in _p],
# label='Duty Cycle (%)', color=(1, 0, 1), linewidth=1.5)
with open(os.path.expanduser(('~/Dropbox/silvia_pid_upgrade/data/eerste run/'
'teensy-output_Kp=0.065_Ki=0_Kd=0_2014-11-16_14,04.log'))) as fp:
reader = csv.reader(fp, delimiter='\t')
params = dict(_.split('=') for _ in next(reader)[1:])
headers = next(reader)
index_t = headers.index('t')
index_T = headers.index('T')
t_, T_ = zip(*((row[index_t], row[index_T]) for row in reader))
t = [float(_) / 1000 for _ in t_]
T = map(float, T_)
index= [_ >= 7200 for _ in t].index(True)
t = t[:index]
T = list(T)[:index]
plt.plot(t, list(T), label='Tm (Kp={})'.format(params['Kp']),
color=(1, 0, 0), linewidth=1)
plt.xlabel('time')
font_properties = FontProperties()
font_properties.set_size('x-small')
legend = plt.legend(loc=0, prop=font_properties)
plt.setp(legend.get_title(), fontsize='x-small')
plt.savefig("imbabimbaresult_pid.png", dpi=150)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## @package TMS1mmX19Tuner
# Human interface for tuning the Topmetal-S 1mm version chip x19 array
#
from __future__ import print_function
from __future__ import division
import math,sys,time,os,shutil
from datetime import datetime
import array,copy
import ctypes
import socket
import argparse
import json
from command import *
from sigproc import *
import TMS1mmX19Config
from PyDE import *
if sys.version_info[0] < 3:
import Tkinter as tk
else:
import tkinter as tk
import threading
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler # for the default matplotlib key bindings
from matplotlib.figure import Figure
from matplotlib.ticker import FormatStrFormatter
from matplotlib import artist
class CommonData(object):
def __init__(self, cmd=Cmd(), dataSocket=None, ctrlSocket=None, nSamples=16384,
tms1mmReg=TMS1mmX19Config.TMS1mmReg(), sigproc=None):
self.cmd = cmd
self.dataSocket = dataSocket
self.ctrlSocket = ctrlSocket
self.dataFName = ["adc.dat", "sdm.dat"]
# number of chips
self.nCh = 19
self.nAdcCh = 20
self.adcSdmCycRatio = 5
self.nSamples = nSamples
self.nWords = 512//32 * self.nSamples
# signal processor
if (not sigproc):
self.sigproc = SigProc(self.nSamples, self.nAdcCh, self.nCh, self.adcSdmCycRatio)
# adc sampling interval in us
self.adcDt = 0.2
self.adcData = self.sigproc.generate_adcDataBuf() # ((ctypes.c_float * self.nSamples) * self.nAdcCh)()
self.sdmData = self.sigproc.generate_sdmDataBuf() # ((ctypes.c_byte * (self.nSamples*self.adcSdmCycRatio)) * (self.nCh*2))()
# size equals FPGA internal data fifo size
self.sampleBuf = bytearray(4 * self.nWords)
# number of voltages in a sensor to control
self.nVolts = 6
# update time interval (second)
self.tI = 0.5
#
self.x2gain = 2
self.bufferTest = 0
self.sdmMode = 0 # 0 : disabled, 1 : normal operation, 2 : test with signal injection
self.aoutBuf = 0 # 0 : AOUT1, 1 : AOUT2, >1 : disable both
#
self.voltsNames = ['VBIASN', 'VBIASP', 'VCASN', 'VCASP', 'VDIS', 'VREF']
# auto tune
self.atCalled = 0
#self.atBounds = [(1.3, 1.4), (1.5, 1.6), (1.45, 1.6), (1.1, 1.35), (1.1, 1.6), (2.4, 2.5)]
self.atBounds = [(0.8, 2.0), (0.8, 2.0), (0.8, 2.0), (0.8, 2.0), (0.8, 2.0), (2.2, 2.8)]
#self.atBounds = [(1.0, 1.8), (1.0, 1.8), (1.0, 1.8), (1.0, 1.8), (1.0, 1.8)]
self.atTbounds = (3000, 3500) # time of pulse bounds
self.atMeasNavg = 10 # number of measurements for average
self.atMaxIters = 100
self.atBestRet = 0.0
self.atBestVolts = [0.0 for i in range(len(self.voltsNames))]
#
self.cv = threading.Condition() # condition variable
########################################< cv protected <
self.quit = False
self.vUpdated = False
#
self.inputVs = [1.379, 1.546, 1.626, 1.169, 1.357, 2.458]
self.inputVcodes = [tms1mmReg.dac_volt2code(v) for v in self.inputVs]
# measured and returned values, not used but displayed
self.voltsOutput = [0.0 for i in range(self.nVolts)]
self.inputIs = [0.0 for i in range(self.nVolts)]
#
self.currentSensor = 0
self.sensorVcodes = [[v for v in self.inputVcodes] for i in range(self.nCh)]
########################################> cv protected >
self.tms1mmReg = tms1mmReg
class DataPanelGUI(object):
##
# @param [in] dataFigSize (w, h) in inches for the data plots figure assuming dpi=72
def __init__(self, master, cd, dataFigSize=(13, 12.5), visibleChannels=None):
self.master = master
self.cd = cd
self.nAdcCh = self.cd.nAdcCh
self.nSdmCh = self.cd.nCh
self.adcSdmCycRatio = self.cd.adcSdmCycRatio
self.master.wm_title("Topmetal-S 1mm version x19 array data")
# appropriate quitting
self.master.wm_protocol("WM_DELETE_WINDOW", self.quit)
# frame for plotting
self.dataPlotsFrame = tk.Frame(self.master)
self.dataPlotsFrame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.dataPlotsFrame.bind("<Configure>", self.on_resize)
self.dataPlotsFigure = Figure(figsize=dataFigSize, dpi=72)
self.dataPlotsFigure.subplots_adjust(left=0.1, right=0.98, top=0.98, bottom=0.05, hspace=0, wspace=0)
if visibleChannels == None or len(visibleChannels) == 0:
visibleChannels = [i for i in range(self.nAdcCh-1)]
# x-axis is shared
dataPlotsSubplotN = self.dataPlotsFigure.add_subplot(
len(visibleChannels)+1, 1, len(visibleChannels)+1, xlabel='t [us]', ylabel='[V]')
self.dataPlotsSubplots = {}
for i in range(len(visibleChannels)):
self.dataPlotsSubplots[visibleChannels[i]] = self.dataPlotsFigure.add_subplot(
len(visibleChannels)+1, 1, i+1, sharex=dataPlotsSubplotN)
for i,a in self.dataPlotsSubplots.items():
artist.setp(a.get_xticklabels(), visible=False)
self.dataPlotsSubplots[self.nAdcCh-1] = dataPlotsSubplotN
self.dataPlotsCanvas = FigureCanvasTkAgg(self.dataPlotsFigure, master=self.dataPlotsFrame)
self.dataPlotsCanvas.show()
self.dataPlotsCanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.dataPlotsToolbar = NavigationToolbar2TkAgg(self.dataPlotsCanvas, self.dataPlotsFrame)
self.dataPlotsToolbar.update()
self.dataPlotsCanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.dataPlotsCanvas.mpl_connect('key_press_event', self.on_key_event)
#
self.buttonFrame = tk.Frame(self.master)
self.buttonFrame.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.resampleButton = tk.Button(master=self.buttonFrame, text='Re-sample', command=self.get_and_plot_data)
self.resampleButton.pack(side=tk.LEFT, fill=tk.X, expand=True)
self.refreshButton = tk.Button(master=self.buttonFrame, text='Refresh', command=self.plot_data)
self.refreshButton.pack(side=tk.RIGHT, fill=tk.X)
#
self.plot_data()
def on_key_event(self, event):
print('You pressed {:s}'.format(event.key))
key_press_handler(event, self.dataPlotsCanvas, self.dataPlotsToolbar)
def on_resize(self, event):
# print(event.width, event.height)
return
def quit(self):
with self.cd.cv:
self.cd.quit = True
self.cd.cv.notify()
self.master.quit() # stops mainloop
self.master.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
def get_and_plot_data(self):
# reset data fifo
self.cd.dataSocket.sendall(self.cd.cmd.send_pulse(1<<2));
time.sleep(0.1)
buf = self.cd.cmd.acquire_from_datafifo(self.cd.dataSocket, self.cd.nWords, self.cd.sampleBuf)
self.cd.sigproc.demux_fifodata(buf, self.cd.adcData, self.cd.sdmData)
# self.demux_fifodata(buf, self.cd.adcData, self.cd.sdmData)
self.plot_data()
# self.save_data(self.cd.dataFName)
self.cd.sigproc.measure_pulse(self.cd.adcData)
self.cd.sigproc.save_data(self.cd.dataFName, self.cd.adcData, self.cd.sdmData)
def plot_data(self):
# self.dataPlotsFigure.clf(keep_observers=True)
for i,a in self.dataPlotsSubplots.items():
a.cla()
for i,a in self.dataPlotsSubplots.items():
if i == self.nAdcCh-1:
a.set_xlabel(u't [us]')
a.set_ylabel('[V]')
continue
artist.setp(a.get_xticklabels(), visible=False)
a.set_ylabel("#{:d}".format(i), rotation=0)
nSamples = len(self.cd.adcData[0])
x = [self.cd.adcDt * i for i in range(nSamples)]
for i,a in self.dataPlotsSubplots.items():
a.locator_params(axis='y', tight=True, nbins=4)
a.yaxis.set_major_formatter(FormatStrFormatter('%7.4f'))
a.set_xlim([0.0, self.cd.adcDt * nSamples])
a.step(x, array.array('f', self.cd.adcData[i]), where='post')
self.dataPlotsCanvas.show()
self.dataPlotsToolbar.update()
return
def demux_fifodata(self, fData, adcData=None, sdmData=None, adcVoffset=1.024, adcLSB=62.5e-6):
wWidth = 512
bytesPerSample = wWidth // 8
if type(fData[0]) == str:
fD = bytearray(fData)
else:
fD = fData
if len(fD) % bytesPerSample != 0:
return []
nSamples = len(fD) // bytesPerSample
if adcData == None:
adcData = ((ctypes.c_float * nSamples) * self.nAdcCh)()
if sdmData == None:
sdmData = ((ctypes.c_byte * (nSamples*self.adcSdmCycRatio)) * (self.nSdmCh*2))()
for i in range(nSamples):
for j in range(self.nAdcCh):
idx0 = bytesPerSample - 1 - j*2
v = ( fD[i * bytesPerSample + idx0 - 1] << 8
| fD[i * bytesPerSample + idx0])
# convert to signed int
v = (v ^ 0x8000) - 0x8000
# convert to actual volts
adcData[j][i] = v * adcLSB + adcVoffset
b0 = self.nAdcCh*2
for j in range(self.adcSdmCycRatio*self.nSdmCh*2):
bi = bytesPerSample - 1 - b0 - int(j / 8)
bs = j % 8
ss = int(j / (self.nSdmCh*2))
ch = j % (self.nSdmCh*2)
sdmData[ch][i*self.adcSdmCycRatio + ss] = (fD[i * bytesPerSample + bi] >> bs) & 0x1
#
return adcData
def save_data(self, fNames):
timeStamp = int(time.time())
with open(fNames[0], 'w') as fp:
fp.write("# TimeStamp: 0x{:016x} 5Msps ADC\n".format(timeStamp))
nSamples = len(self.cd.adcData[0])
for i in range(nSamples):
for j in range(len(self.cd.adcData)):
fp.write(" {:9.6f}".format(self.cd.adcData[j][i]))
fp.write("\n")
with open(fNames[1], 'w') as fp:
fp.write("# TimeStamp: 0x{:016x} 25Msps SDM\n".format(timeStamp))
nSamples = len(self.cd.sdmData[0])
for i in range(nSamples):
for j in range(len(self.cd.sdmData)):
fp.write(" {:1d}".format(self.cd.sdmData[j][i]))
fp.write("\n")
class ControlPanelGUI(object):
def __init__(self, master, cd):
self.master = master
self.cd = cd
self.nVolts = self.cd.nVolts
self.nCh = self.cd.nCh
# appropriate quitting
master.wm_protocol("WM_DELETE_WINDOW", self.quit)
# frame for selecting a sensor to operate on
self.sensorsFrame = tk.Frame(master)
self.sensorsFrame.pack(side=tk.TOP)
# sensor location approximated on a grid (row, col)
self.sensorLocOnGrid = {0 : [4,2], 1 : [2,2], 2 : [3,1], 3 : [5,1], 4 : [6,2], 5 : [5,3],
6 : [3,3], 7 : [0,2], 8 : [1,1], 9 : [2,0], 10 : [4,0], 11 : [6,0],
12 : [7,1], 13 : [8,2], 14: [7,3], 15 : [6,4], 16 : [4,4],
17 : [2,4], 18 : [1,3]}
self.sensorSelVar = tk.IntVar()
self.sensorSelRadioButtons = [tk.Radiobutton(self.sensorsFrame, text="{:d}".format(i),
variable=self.sensorSelVar, value=i,
command=self.select_current_sensor)
for i in range(self.nCh)]
for i in range(len(self.sensorSelRadioButtons)):
b = self.sensorSelRadioButtons[i]
b.grid(row=self.sensorLocOnGrid[i][0], column=self.sensorLocOnGrid[i][1])
# frame for controls
self.voltagesFrame = tk.Frame(master)
self.voltagesFrame.pack(side=tk.TOP)
# GUI widgets
self.voltsNameLabels = [tk.Label(self.voltagesFrame, text=self.cd.voltsNames[i])
for i in range(self.nVolts)]
self.voltsILabels = [tk.Label(self.voltagesFrame, font="Courier 10", text="0.0 A")
for i in range(self.nVolts)]
self.voltsOutputLabels = [tk.Label(self.voltagesFrame, font="Courier 10", text="0.0 V")
for i in range(self.nVolts)]
# update to latest display values
with self.cd.cv:
for i in range(self.nVolts):
self.cd.inputVcodes[i] = self.cd.sensorVcodes[self.cd.currentSensor][i]
self.cd.inputVs[i] = self.cd.tms1mmReg.dac_code2volt(self.cd.inputVcodes[i])
#
self.voltsSetVars = [tk.DoubleVar() for i in range(self.nVolts)]
for i in range(self.nVolts):
self.voltsSetVars[i].set(self.cd.inputVs[i])
self.voltsSetEntries = [tk.Spinbox(self.voltagesFrame, width=8, justify=tk.RIGHT,
textvariable=self.voltsSetVars[i],
from_=0.0, to=3.3, increment=0.001,
format_="%6.4f",
command=self.set_voltage_update)
for i in range(self.nVolts)]
for v in self.voltsSetEntries:
v.bind('<Return>', self.set_voltage_update)
self.voltsSetCodeVars = [tk.IntVar() for i in range(self.nVolts)]
for i in range(self.nVolts):
self.voltsSetCodeVars[i].set(self.cd.inputVcodes[i])
self.voltsSetCodeEntries = [tk.Spinbox(self.voltagesFrame, width=8, justify=tk.RIGHT,
textvariable=self.voltsSetCodeVars[i],
from_=0, to=65535, increment=1,
command=self.set_voltage_dac_code_update)
for i in range(self.nVolts)]
for v in self.voltsSetCodeEntries:
v.bind('<Return>', self.set_voltage_dac_code_update)
# caption
tk.Label(self.voltagesFrame, text="Name", width=15,
fg="white", bg="black").grid(row=0, column=0, sticky=tk.W+tk.E)
tk.Label(self.voltagesFrame, text="Set Voltage [V]", width=20,
fg="white", bg="black").grid(row=0, column=1, sticky=tk.W+tk.E)
tk.Label(self.voltagesFrame, text="Set Volt DAC code", width=20,
fg="white", bg="black").grid(row=0, column=2, sticky=tk.W+tk.E)
tk.Label(self.voltagesFrame, text="Measured Voltage [V]",
fg="white", bg="black").grid(row=0, column=3, sticky=tk.W+tk.E)
# placing widgets
for i in range(self.nVolts):
self.voltsNameLabels[i].grid(row=i+1,column=0)
self.voltsSetEntries[i].grid(row=i+1, column=1)
self.voltsSetCodeEntries[i].grid(row=i+1, column=2)
self.voltsOutputLabels[i].grid(row=i+1, column=3)
# buttons
self.buttonFrame = tk.Frame(self.master)
self.buttonFrame.pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.autoTuneButton = tk.Button(master=self.buttonFrame, text='AutoTune', command=self.auto_tune)
self.autoTuneButton.pack(side=tk.LEFT, fill=tk.X, expand=True)
# self-updating functions
self.update_values_display()
def quit(self):
with self.cd.cv:
self.cd.quit = True
self.cd.cv.notify()
self.master.destroy()
def update_values_display(self):
for i in range(self.nVolts):
self.voltsILabels[i].configure(text="{:7.3f}".format(self.cd.inputIs[i]))
self.voltsOutputLabels[i].configure(text="{:7.3f}".format(self.cd.voltsOutput[i]))
self.master.after(int(1000*self.cd.tI), self.update_values_display)
def select_current_sensor(self, *args):
with self.cd.cv:
self.cd.currentSensor = self.sensorSelVar.get()
# load Vcodes for the specific sensor
for i in range(self.nVolts):
self.voltsSetCodeVars[i].set(self.cd.sensorVcodes[self.cd.currentSensor][i])
self.set_voltage_dac_code_update()
def set_voltage_update(self, *args):
with self.cd.cv:
for i in range(self.nVolts):
self.cd.inputVs[i] = self.voltsSetVars[i].get()
self.cd.inputVcodes[i] = self.cd.tms1mmReg.dac_volt2code(self.cd.inputVs[i])
self.voltsSetCodeVars[i].set(self.cd.inputVcodes[i])
# update info for the array
self.cd.sensorVcodes[self.cd.currentSensor][i] = self.cd.inputVcodes[i]
self.cd.vUpdated = True
print("Set volts: ", self.cd.inputVs)
print("Set volt codes: ", self.cd.inputVcodes)
return True
def set_voltage_dac_code_update(self, *args):
with self.cd.cv:
for i in range(self.nVolts):
self.cd.inputVcodes[i] = self.voltsSetCodeVars[i].get()
self.cd.inputVs[i] = self.cd.tms1mmReg.dac_code2volt(self.cd.inputVcodes[i])
self.voltsSetVars[i].set(round(self.cd.inputVs[i],4))
# update info for the array
self.cd.sensorVcodes[self.cd.currentSensor][i] = self.cd.inputVcodes[i]
self.cd.vUpdated = True
print(self.cd.inputVcodes)
return True
def auto_tune(self, *args):
startTime = datetime.now()
self.cd.atCalled = 0
de = DE(self.auto_tune_fun, self.cd.atBounds, maxiters=self.cd.atMaxIters)
ret = de.solve()
print(ret)
print("AutoTune: best ret: {:}".format(self.cd.atBestRet), self.cd.atBestVolts)
for i in range(min(self.nVolts, len(self.cd.atBestVolts))):
self.voltsSetVars[i].set(self.cd.atBestVolts[i])
self.set_voltage_update()
stopTime = datetime.now()
runTime = stopTime - startTime
print("AutoTune: run time: {:}, fun called: {:d}".format(str(runTime), self.cd.atCalled))
self.auto_tune_fun(self.cd.atBestVolts)
return ret
def auto_tune_fun(self, x):
print("AutoTune: called: {:d}".format(self.cd.atCalled))
self.cd.atCalled += 1
for i in range(min(self.nVolts, len(x))):
self.voltsSetVars[i].set(x[i])
self.set_voltage_update()
time.sleep(2.0)
meas = [[] for i in range(self.cd.sigproc.nParamMax)]
for i in range(self.cd.atMeasNavg):
# reset data fifo
self.cd.dataSocket.sendall(self.cd.cmd.send_pulse(1<<2));
time.sleep(0.05)
buf = self.cd.cmd.acquire_from_datafifo(self.cd.dataSocket, self.cd.nWords, self.cd.sampleBuf)
self.cd.sigproc.demux_fifodata(buf, self.cd.adcData, self.cd.sdmData)
currMeasP = self.cd.sigproc.measure_pulse(self.cd.adcData)[self.cd.currentSensor]
if currMeasP[2] < self.cd.atTbounds[0] or currMeasP[2] > self.cd.atTbounds[1] or currMeasP[3] < 0:
return 0
for j in range(len(currMeasP)):
meas[j].append(currMeasP[j])
currMeasP = [sum(x)/len(x) for x in meas]
print("AutoTune: meas after {:d} avgs : {:}".format(self.cd.atMeasNavg, currMeasP))
ret = -currMeasP[3]/currMeasP[1]
print("AutoTune: ret : ", ret, currMeasP[1], currMeasP[3])
if ret < self.cd.atBestRet:
self.cd.atBestRet = ret
for i in range(len(self.cd.atBestVolts)):
self.cd.atBestVolts[i] = self.cd.inputVs[i]
return ret
class SensorConfig(threading.Thread):
# Do not try to access tk.IntVar etc. here. Since after
# master.destroy(), those variables associated with tk seem to be
# destroyed as well and accessing them would result in this thread
# to hang.
def __init__(self, cd, configFName):
threading.Thread.__init__(self)
self.cd = cd
self.s = self.cd.ctrlSocket
self.dac8568 = TMS1mmX19Config.DAC8568(self.cd.cmd)
self.tms1mmReg = cd.tms1mmReg
self.tms1mmX19sensorInChain = {0 : 2, 1 : 2, 2 : 1, 3 : 1, 4 : 2, 5 : 3, 6 : 3, 7 : 2,
8 : 1, 9 : 0, 10 : 0, 11 : 0, 12 : 1, 13 : 2, 14 : 3,
15 : 4, 16 : 4, 17 : 4, 18 : 3}
self.tms1mmX19chainSensors = {0 : [9, 10, 11],
1 : [8, 2, 3, 12],
2 : [7, 1, 0, 4, 13],
3 : [18, 6, 5, 14],
4 : [17, 16, 15]}
self.configFName = configFName
self.read_config_file()
#
self.set_global_defaults()
def run(self):
with self.cd.cv:
while not self.cd.quit:
self.cd.cv.wait(self.cd.tI)
if self.cd.vUpdated:
self.update_sensor(self.cd.currentSensor)
self.write_config_file()
self.cd.vUpdated = False
self.get_inputs()
def set_global_defaults(self):
tms_pwr_on = 1
tms_sdm_clk_src_sel = 0 # 0: FPGA, 1: external
if self.cd.sdmMode:
tms_sdm_clkff_div = 2 # /2**x, 0 disables clock
else:
tms_sdm_clkff_div = 0
adc_clk_src_sel = 0
adc_clkff_div = 0
adc_sdrn_ddr = 0 # 0: sdr, 1: ddr
cmdStr = self.cd.cmd.write_register(0, adc_clkff_div <<12 |
tms_sdm_clkff_div << 8 |
adc_sdrn_ddr << 3 |
adc_clk_src_sel << 2 |
tms_sdm_clk_src_sel << 1 |
tms_pwr_on)
self.s.sendall(cmdStr)
time.sleep(0.001)
# tms sdm idelay
cmdStr = self.cd.cmd.write_register(14, 38<<8 | 1) # clk loopback
cmdStr += self.cd.cmd.send_pulse(1<<4)
cmdStr += self.cd.cmd.write_register(14, 0<<8 | 0)
cmdStr += self.cd.cmd.send_pulse(1<<4)
self.s.sendall(cmdStr)
# adc idelay
cmdStr = self.cd.cmd.write_register(14, 20<<8 | 1) # clk loopback
cmdStr += self.cd.cmd.send_pulse(1<<5)
cmdStr += self.cd.cmd.write_register(14, 19<<8 | 0)
cmdStr += self.cd.cmd.send_pulse(1<<5)
self.s.sendall(cmdStr)
# DAC provided ref voltages
self.s.sendall(self.dac8568.turn_on_2V5_ref())
self.s.sendall(self.dac8568.set_voltage(0, 1.207))
self.s.sendall(self.dac8568.set_voltage(1, 1.024))
self.s.sendall(self.dac8568.set_voltage(2, 1.65))
time.sleep(0.001)
for c,l in self.tms1mmX19chainSensors.items():
self.update_sensor(l[0])
time.sleep(0.001)
def get_config_vector_for_sensor(self, iSensor):
if self.cd.bufferTest:
self.tms1mmReg.set_k(0, 0) # 0 - K1 is open, disconnect CSA output
self.tms1mmReg.set_k(1, 1) # 1 - K2 is closed, allow BufferX2_testIN to inject signal
self.tms1mmReg.set_k(4, 0) # 0 - K5 is open, disconnect SDM loads
self.tms1mmReg.set_k(6, 1) # 1 - K7 is closed, BufferX2 output to AOUT_BufferX2
self.tms1mmReg.set_power_down(3, 0) # Power on AOUT_BufferX2
else:
self.tms1mmReg.set_k(0, 1) # 1 - K1 is closed, connect CSA output to buffer
self.tms1mmReg.set_k(1, 0) # 0 - K2 is open,
self.tms1mmReg.set_k(6, 0) # 0 - K7 is open
self.tms1mmReg.set_power_down(3, 1) # Power down AOUT_BufferX2
if self.cd.x2gain == 2:
self.tms1mmReg.set_k(2, 1) # 1 - K3 is closed, K4 is open, setting gain to X2
self.tms1mmReg.set_k(3, 0)
else:
self.tms1mmReg.set_k(2, 0)
self.tms1mmReg.set_k(3, 1)
if self.cd.sdmMode == 2: # test mode
self.tms1mmReg.set_k(4, 0)
self.tms1mmReg.set_k(5, 1)
elif self.cd.sdmMode == 1: # normal operation
self.tms1mmReg.set_k(0, 1) # 1 - K1 is closed, connect CSA output to buffer
self.tms1mmReg.set_k(4, 1)
self.tms1mmReg.set_k(5, 0)
else: # disabled
self.tms1mmReg.set_k(4, 0)
self.tms1mmReg.set_k(5, 0)
if self.cd.aoutBuf == 0:
self.tms1mmReg.set_power_down(0, 0) # AOUT1_CSA PD
self.tms1mmReg.set_power_down(1, 1) # AOUT2_CSA PD
self.tms1mmReg.set_k(7, 1) # 1 - K8 CSA out to AOUT1_CSA
elif self.cd.aoutBuf == 1:
self.tms1mmReg.set_power_down(0, 1) # AOUT1_CSA PD
self.tms1mmReg.set_power_down(1, 0) # AOUT2_CSA PD
self.tms1mmReg.set_k(8, 1) # 1 - K9 CSA out to AOUT2_CSA that drives 50Ohm
self.tms1mmReg.set_power_down(2, 1)
self.tms1mmReg.set_dac(0, self.cd.sensorVcodes[iSensor][0]) # VBIASN
self.tms1mmReg.set_dac(1, self.cd.sensorVcodes[iSensor][1]) # VBIASP
self.tms1mmReg.set_dac(2, self.cd.sensorVcodes[iSensor][2]) # VCASN
self.tms1mmReg.set_dac(3, self.cd.sensorVcodes[iSensor][3]) # VCASP
self.tms1mmReg.set_dac(4, self.cd.sensorVcodes[iSensor][4]) # VDIS
self.tms1mmReg.set_dac(5, self.cd.sensorVcodes[iSensor][5]) # VREF
#
return self.tms1mmReg.get_config_vector()
def update_sensor(self, iSensor):
colAddr = self.tms1mmX19sensorInChain[iSensor]
sensorsInChain = self.tms1mmX19chainSensors[colAddr]
print("Updating chain {:d} with sensors {:}".format(colAddr, sensorsInChain))
for i in sensorsInChain:
data = self.get_config_vector_for_sensor(i)
print("Send : 0x{:0x}".format(data))
ret = TMS1mmX19Config.tms_sio_rw(self.s, self.cd.cmd, colAddr, data)
print("Return: 0x{:0x}".format(ret) + " equal = {:}".format(data == ret))
# tms reset and load register
self.s.sendall(self.cd.cmd.send_pulse(1<<0))
def get_inputs(self):
return
def read_config_file(self, fName=None):
if fName:
self.configFName = fName
if os.path.isfile(self.configFName):
with open(self.configFName, 'r') as fp:
config = json.load(fp)
for i in range(len(config)):
for j in range(len(self.cd.voltsNames)):
self.cd.sensorVcodes[i][j] = config[repr(i)][self.cd.voltsNames[j]]
else:
return self.cd.sensorVcodes
def write_config_file(self, fName=None):
if fName:
self.configFName = fName
config = {}
for i in range(self.cd.nCh):
config[i] = dict(zip(self.cd.voltsNames, self.cd.sensorVcodes[i]))
with open(self.configFName, 'w') as fp:
fp.write(json.dumps(config, sort_keys=True, indent=4))
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-a", "--aout-buf", type=int, default="1", help="AOUT buffer select, 0:AOUT1, 1:AOUT2, >1:disable both")
parser.add_argument("-c", "--control-ip-port", type=str, default="192.168.2.3:1025", help="control system ipaddr and port")
parser.add_argument("-d", "--data-ip-port", type=str, default="192.168.2.3:1024", help="data source ipaddr and port")
parser.add_argument("-f", "--config-file", type=str, default="config.json", help="configuration file, will be overwritten")
parser.add_argument("-g", "--bufferx2-gain", type=int, default="2", help="BufferX2 gain")
parser.add_argument("-l", "--visible-channels", type=str, default="None", help="List of ADC channels to plot (made visible). None or [] means all channels")
parser.add_argument("-s", "--sdm-mode", type=int, default="0", help="SDM working mode, 0:disabled, 1:normal operation, 2:test with signal injection")
parser.add_argument("-t", "--buffer-test", type=int, default="0", help="Buffer test")
#
args = parser.parse_args()
dataIpPort = args.data_ip_port.split(':')
sD = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sD.connect((dataIpPort[0],int(dataIpPort[1])))
ctrlIpPort = args.control_ip_port.split(':')
sC = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sC.connect((ctrlIpPort[0],int(ctrlIpPort[1])))
cmd = Cmd()
cd = CommonData(cmd, dataSocket=sD, ctrlSocket=sC)
cd.aoutBuf = args.aout_buf
cd.x2gain = args.bufferx2_gain
cd.sdmMode = args.sdm_mode
cd.bufferTest = args.buffer_test
#
sensorConfig = SensorConfig(cd, configFName=args.config_file)
sensorConfig.start()
#
root = tk.Tk()
root.wm_title("Topmetal-S 1mm version x19 array Tuner")
controlPanel = ControlPanelGUI(root, cd)
#
dataPanelMaster = tk.Toplevel(root)
dataPanel = DataPanelGUI(dataPanelMaster, cd, visibleChannels=eval(args.visible_channels))
root.mainloop()
# If you put root.destroy() here, it will cause an error if
# the window is closed with the window manager.
sensorConfig.join()
sC.close()
sD.close()
|
|
from flask import Flask
from passlib.hash import bcrypt
from sixquiprend.config import *
from sixquiprend.models.card import Card
from sixquiprend.models.chosen_card import ChosenCard
from sixquiprend.models.column import Column
from sixquiprend.models.game import Game
from sixquiprend.models.hand import Hand
from sixquiprend.models.heap import Heap
from sixquiprend.models.user import User
from sixquiprend.sixquiprend import app, db
from sixquiprend.utils import *
import json
import random
import unittest
class GamesTurnTestCase(unittest.TestCase):
USERNAME = 'User'
PASSWORD = 'Password'
ADMIN_USERNAME = 'Admin'
ADMIN_PASSWORD = 'Password'
def setUp(self):
app.config['SERVER_NAME'] = 'localhost'
app.config['WTF_CSRF_ENABLED'] = False
app.config['DATABASE_NAME'] = 'sixquiprend_test'
db_path = app.config['DATABASE_USER'] + ':' + app.config['DATABASE_PASSWORD']
db_path += '@' + app.config['DATABASE_HOST'] + '/' + app.config['DATABASE_NAME']
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + db_path
app.config['TESTING'] = True
self.app = app.test_client()
ctx = app.app_context()
ctx.push()
create_db()
db.create_all()
user = User(username=self.USERNAME,
password=bcrypt.hash(self.PASSWORD),
active=True)
admin = User(username=self.ADMIN_USERNAME,
password=bcrypt.hash(self.ADMIN_PASSWORD),
active=True,
urole=User.ROLE_ADMIN)
db.session.add(user)
db.session.add(admin)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
def login(self):
rv = self.app.post('/login', data=json.dumps(dict(
username=self.USERNAME,
password=self.PASSWORD,
)), content_type='application/json')
assert rv.status_code == 201
def get_current_user(self):
rv = self.app.get('/users/current')
assert rv.status_code == 200
result = json.loads(rv.data)
if result['user'] != {}:
return User.find(result['user']['id'])
def create_user(self, active=True, urole=User.ROLE_PLAYER):
username = 'User #'+str(User.query.count())
password = 'Password'
user = User(username=username,
password=bcrypt.hash(password),
active=active,
urole=urole)
db.session.add(user)
db.session.commit()
return user
def create_game(self, status=Game.STATUS_CREATED, users=[], owner_id=None):
game = Game(status=status)
for user in users:
game.users.append(user)
game.owner_id = owner_id
db.session.add(game)
db.session.commit()
return game
def create_column(self, game_id, cards=[]):
column = Column(game_id=game_id)
for card in cards:
column.cards.append(card)
db.session.add(column)
db.session.commit()
return column
def create_heap(self, game_id, user_id, cards=[]):
heap = Heap(game_id=game_id, user_id=user_id)
for card in cards:
heap.cards.append(card)
db.session.add(heap)
db.session.commit()
return heap
def create_hand(self, game_id, user_id, cards=[]):
hand = Hand(game_id=game_id, user_id=user_id)
for card in cards:
hand.cards.append(card)
db.session.add(hand)
db.session.commit()
return hand
def create_card(self, number=random.randint(1, 1000),
cow_value=random.randint(1, 1000)):
card = Card(number=number, cow_value=cow_value)
db.session.add(card)
db.session.commit()
return card
def create_chosen_card(self, game_id, user_id, card_id=None):
if card_id == None:
card = self.create_card()
card_id = card.id
chosen_card = ChosenCard(game_id=game_id, user_id=user_id,
card_id=card_id)
db.session.add(chosen_card)
db.session.commit()
return chosen_card
################################################################################
## Routes
################################################################################
def test_get_game_status(self):
self.login()
user = self.get_current_user()
game = self.create_game(status=Game.STATUS_STARTED, users=[user],
owner_id=user.id)
card = self.create_card()
hand = self.create_hand(game_id=game.id, user_id=user.id, cards=[card])
rv = self.app.get('/games/'+str(game.id)+'/status')
assert rv.status_code == 200
response_game_status = json.loads(rv.data)
assert response_game_status['can_place_card'] == False
assert response_game_status['can_choose_cards_for_bots'] == False
def test_choose_card_for_game(self):
self.login()
user = self.get_current_user()
game = self.create_game(status=Game.STATUS_STARTED, users=[user])
card = self.create_card()
hand = self.create_hand(game_id=game.id, user_id=user.id, cards=[card])
rv = self.app.post('/games/'+str(game.id)+'/card/'+str(card.id))
assert rv.status_code == 201
response_chosen_card = json.loads(rv.data)['chosen_card']
assert response_chosen_card['game_id'] == game.id
assert response_chosen_card['user_id'] == user.id
assert response_chosen_card['card']['id'] == card.id
def test_choose_card_for_bots(self):
self.login()
card = self.create_card(1, 1)
bot = self.create_user(urole=User.ROLE_BOT)
user = self.get_current_user()
game = self.create_game(status=Game.STATUS_STARTED, users=[user,
bot], owner_id = user.id)
card = self.create_card()
bot_hand = self.create_hand(game.id, bot.id, [card])
rv = self.app.post('/games/'+str(game.id)+'/bots/choose_cards')
assert rv.status_code == 201
assert bot_hand.cards == []
def test_place_card(self):
self.login()
user = self.get_current_user()
user2 = self.create_user()
game = self.create_game(status=Game.STATUS_STARTED)
game.users.append(user)
game.users.append(user2)
game.owner_id = user.id
db.session.add(game)
db.session.commit()
card = self.create_card(1, 1)
card2 = self.create_card(2, 2)
card3 = self.create_card(3, 3)
user_hand = self.create_hand(game.id, user.id)
user2_hand = self.create_hand(game.id, user2.id)
user_heap = self.create_heap(game.id, user.id)
user_chosen_card = self.create_chosen_card(game.id, user.id, card2.id)
user2_chosen_card = self.create_chosen_card(game.id, user2.id, card3.id)
column = self.create_column(game.id, [card])
rv = self.app.post('/games/'+str(game.id)+'/cards/place')
assert rv.status_code == 201
response = json.loads(rv.data)
assert response['chosen_column']['id'] == column.id
assert response['user_heap']['user_id'] == user.id
assert len(response['user_heap']['cards']) == 0
def test_choose_column_for_card(self):
self.login()
game = self.create_game(status=Game.STATUS_STARTED)
user = self.get_current_user()
game.users.append(user)
db.session.add(game)
db.session.commit()
card = self.create_card(1, 1)
card2 = self.create_card(2, 2)
user_chosen_card = self.create_chosen_card(game.id, user.id, card.id)
user_heap = self.create_heap(game.id, user.id)
column = self.create_column(game.id, [card2])
rv = self.app.post('/games/'+str(game.id)+'/columns/'+str(column.id)+'/choose')
assert rv.status_code == 201
response = json.loads(rv.data)
assert len(response['chosen_column']['cards']) == 1
assert response['chosen_column']['cards'][0]['id'] == card.id
assert len(response['user_heap']['cards']) == 1
assert response['user_heap']['cards'][0]['id'] == card2.id
if __name__ == '__main__':
unittest.main()
|
|
# ===============================================================================
# Copyright 2016 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
from datetime import datetime, timedelta
from numpy import array, nan, loadtxt, append, zeros
from pandas import DataFrame, notnull, to_numeric, concat
from app.paths import paths
def load_df(path):
"""
:param path: path to csv file
:returns dataframe of etrm time series input
"""
print 'reading in csv: {}'.format(path)
csv = loadtxt(path, dtype=str, delimiter=',')
# extracts should have headers
csv = csv[1:]
try:
new_ind = [datetime.strptime(row[0], '%Y-%m-%d') for row in csv]
except ValueError:
new_ind = [datetime.strptime(row[0], '%Y/%m/%d') for row in csv[1:]]
arr = array(csv[:, 1:], dtype=float)
cols = ['kcb', 'rg', 'etrs', 'min_temp', 'max_temp', 'temp', 'precip']
df = DataFrame(arr, index=new_ind, columns=cols)
return df
def get_etrm_time_series(input_root, dict_):
"""
Read pre-extracted data out of a formatted csv. Use recharge.point_extract_utility.py to do extract.
:param input_root: path to a folder of csv files.
:param dict_: dict of point locations
csv will be in the following format
['kcb', 'rg', 'etrs', 'min_temp', 'max_temp', 'temp', 'precip']
"""
csv_list = [filename for filename in os.listdir(input_root) if filename.endswith('.csv')]
print 'etrm extract csv list: {}'.format(csv_list)
for path in csv_list:
name = os.path.splitext(path)[0]
df = load_df(os.path.join(input_root, path))
if dict_:
for key, val in dict_.iteritems():
if val['Name'] == name:
print 'updating {} number {} with etrm inputs df'.format(name, key)
# print 'your df: \n{}'.format(df)
dict_[key]['etrm'] = df
def amf_obs_time_series(dict_, save_cleaned_data_path=False, complete_days_only=False,
close_threshold=0.20, return_low_err=False):
""" Analyze, clean and return dict of AmeriFlux sites and relevant data.
:param dict_: dict object of Ameriflux IDs as keys, nested dict with 'Name', 'Coords' etc
:param path: string path to AmeriFlux folder containing AmeriFlux csv files
:param return_low_err: retun dataframe of only low energy balance closure error
:param close_threshold: threshold of error tolerated
:param complete_days_only: return only full days of valid data
:param save_cleaned_data_path: path to save location of output
# (year, dtime, H, LE, FG, RN, RG, RGin, RGout)
# H = sensible heat flux
# LE = latent heat flux
# FG = soil heat flux
# RN = net radiation
# RG = incoming shortwave
# RGout = outgoing shortwave
# RGL = incoming longwave
# RGLout = outgoing longwave
"""
path = paths.amf_sites # this doesn't work
def day_fraction_to_hr_min(fractional_day, year):
"""
:param fractional_day: 100.134
:param year:
:return:
"""
def ext(d, scalar):
d = str(d)
a, b = d.split('.')
aa, bb = int(a), float('.{}'.format(b))
return aa, bb * scalar
dec = str(fractional_day)
day, timepart = ext(dec, 24)
hour, minpart = ext(timepart, 60)
minutes = int(minpart)
# day_part_str = '.{}'.format(dec_split[1])
# day, day_part_flt = int(dec_split[0]), float(day_part_str)
# hour_dec = day_part_flt * 24
# hour_split = str(hour_dec).split('.')
# hour, hour_part = int(hour_split[0]), float('.{}'.format(hour_split[1]))
# min_part = str(hour_part * 60).split('.')
#
if minutes == 29:
minutes = 30
elif minutes == 59:
minutes = 0
tup = datetime(year, 1, 1) + timedelta(days=day, hours=hour, minutes=minutes)
return tup
arr_cols = [0, 2, 12, 14, 28, 30, 33, 34, 35]
subset = ['year', 'day', 'H', 'LE', 'RN', 'RG', 'RGout', 'RGL', 'RGLout']
# ncols = len(arr_cols)
columns = ['H', 'LE', 'RN', 'RG', 'RGout', 'RGL', 'RGLout']
for key, val in dict_.iteritems(): # Changes here
amf_name = val['Name']
path = os.path.join('C:\Users\Mike\PyRANA',
'ETRM_inputs_Ameriflux','ameriflux_sites')
folder = os.path.join(path,'AMF_Data',amf_name)
folder_contents = os.listdir(folder)
print "this is the folder contents: {}".format(folder_contents)
csv_list = [os.path.join(folder, item) for item in folder_contents]
print "this is the new list: {}".format(csv_list)
# amf_data = array([]).reshape(0, ncols)
print 'attempting to fetch headers: {}'.format(subset)
amf_data = None
for item in csv_list:
p = os.path.join(folder, item)
# if i == 0:
# col_check = loadtxt(p, dtype=str, skiprows=0, delimiter=',', usecols=arr_cols)
# print 'headers being read: \n {}'.format(col_check[:1, :])
csv = loadtxt(p, dtype=str, skiprows=3, delimiter=',', usecols=arr_cols)
if amf_data is None:
amf_data = csv
else:
amf_data = append(amf_data, csv, axis=0)
new_ind = [day_fraction_to_hr_min(float(row[1]), int(row[0])) for row in amf_data]
amf_data = amf_data[:, 2:]
df = DataFrame(amf_data, index=new_ind, columns=columns)
print 'You have {} rows of --RAW-- data from {}'.format(df.shape[0], amf_name)
# drop rows with NA values in 'subset'
df[df == '-9999'] = nan
df = df[notnull(df)]
df = df.apply(to_numeric)
df.dropna(axis=0, how='any', inplace=True)
print 'You have {} rows of FlUX data from {}'.format(df.shape[0], amf_name)
# Find all complete days (48) records with no NULL values,
if complete_days_only:
df = df.groupby(lambda xx: xx.date())
print 'df grouped: {}'.format(df)
df = df.aggregate(lambda xx: sum(xx) if len(xx) > 23 else nan)
df.dropna(axis=0, how='any', inplace=True)
# and convert energy to MJ
for name, series in df.iteritems():
if name in columns:
series *= 0.0864 / 48
# calculate energy balance error
calculated_cols = ['rad_err', 'en_bal_err', 'rad_minus_sens_heat', 'amf_ET']
empty = zeros((df.shape[0], len(calculated_cols)), dtype=float)
new_df = DataFrame(empty, index=df.index, columns=calculated_cols)
df = concat([df, new_df], axis=1, join='outer')
for ind, row in df.iterrows():
rn = row['RN']
leg = row['LE'] + row['H']
rn_leg = rn - leg
row['rad_err'] = (abs(rn - (row['RG'] - row['RGout'] + row['RGL'] - row['RGLout'])) / rn)
row['en_bal_err'] = rn_leg / rn
row['rad_minus_sens_heat'] = rn_leg * 0.0864 / 48
row['amf_ET'] = (row['LE'] / 2.45) # convert from MJ/(step * m**2) to mm water
df_low_err = df[df['en_bal_err'] <= close_threshold]
print 'You have {} DAYS of CLEAN RN/LE/H/RAD data from {}'.format(df.shape[0], amf_name)
print 'The mean energy balance closure error is: {}'.format(df['en_bal_err'].mean())
print 'You have {} DAYS of [0.0 < CLOSURE ERROR < {}] data from {}'.format(len(df_low_err), close_threshold,
amf_name)
if save_cleaned_data_path:
p = os.path.join(save_cleaned_data_path, 'ameriflux_sites', 'AMF_ETRM_output', '{}_cleaned_all.csv'.format(amf_name))
df.to_csv(p)
p = os.path.join(save_cleaned_data_path, '{}_cleaned_lowErr.csv'.format(amf_name))
df_low_err.to_csv(p)
if return_low_err:
val['AMF_Data'] = df_low_err
else:
val['AMF_Data'] = df_low_err
return dict_
if __name__ == '__main__':
pass
# ============= EOF =============================================
|
|
"""
byceps.blueprints.admin.site.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
import dataclasses
from typing import Iterable, Iterator
from flask import abort, request
from flask_babel import gettext
from ....permissions.site import SitePermission
from ....services.board import board_service
from ....services.brand import service as brand_service
from ....services.brand.transfer.models import Brand
from ....services.news import channel_service as news_channel_service
from ....services.party import service as party_service
from ....services.shop.shop import service as shop_service
from ....services.shop.storefront import service as storefront_service
from ....services.shop.storefront.transfer.models import (
Storefront,
StorefrontID,
)
from ....services.site import (
service as site_service,
settings_service as site_settings_service,
)
from ....services.site.transfer.models import Site, SiteWithBrand
from ....util.framework.blueprint import create_blueprint
from ....util.framework.flash import flash_error, flash_success
from ....util.framework.templating import templated
from ....util.views import permission_required, redirect_to, respond_no_content
from .forms import AddNewsChannelForm, CreateForm, UpdateForm
blueprint = create_blueprint('site_admin', __name__)
@blueprint.get('/')
@permission_required(SitePermission.view)
@templated
def index():
"""List all sites."""
brands = brand_service.get_all_brands()
brands.sort(key=lambda brand: brand.title)
sites = site_service.get_all_sites()
sites = list(_sites_to_sites_with_brand(sites, brands))
sites.sort(key=lambda site: (site.title, site.party_id))
parties = party_service.get_all_parties()
party_titles_by_id = {p.id: p.title for p in parties}
storefronts_by_id = _get_storefronts_by_id(sites)
return {
'sites': sites,
'brands': brands,
'party_titles_by_id': party_titles_by_id,
'storefronts_by_id': storefronts_by_id,
}
@blueprint.get('/for_brand/<brand_id>')
@permission_required(SitePermission.view)
@templated
def index_for_brand(brand_id):
"""List sites for this brand."""
brand = brand_service.find_brand(brand_id)
if brand is None:
abort(404)
sites = site_service.get_sites_for_brand(brand.id)
sites = [_site_to_site_with_brand(site, brand) for site in sites]
sites.sort(key=lambda site: (site.title, site.party_id))
parties = party_service.get_all_parties()
party_titles_by_id = {p.id: p.title for p in parties}
storefronts_by_id = _get_storefronts_by_id(sites)
return {
'sites': sites,
'brand': brand,
'party_titles_by_id': party_titles_by_id,
'storefronts_by_id': storefronts_by_id,
}
def _sites_to_sites_with_brand(
sites: Iterable[Site], brands: Iterable[Brand]
) -> Iterator[SiteWithBrand]:
brands_by_id = {brand.id: brand for brand in brands}
for site in sites:
brand = brands_by_id[site.brand_id]
yield _site_to_site_with_brand(site, brand)
def _site_to_site_with_brand(site: Site, brand: Brand) -> SiteWithBrand:
site_tuple = dataclasses.astuple(site)
brand_tuple = (brand,)
return SiteWithBrand(*(site_tuple + brand_tuple))
def _get_storefronts_by_id(sites) -> dict[StorefrontID, Storefront]:
storefront_ids = {
site.storefront_id for site in sites if site.storefront_id is not None
}
storefronts = storefront_service.find_storefronts(storefront_ids)
return {storefront.id: storefront for storefront in storefronts}
@blueprint.get('/sites/<site_id>')
@permission_required(SitePermission.view)
@templated
def view(site_id):
"""Show a site's settings."""
site = site_service.find_site(site_id)
if site is None:
abort(404)
brand = brand_service.find_brand(site.brand_id)
news_channels = news_channel_service.get_channels(site.news_channel_ids)
if site.board_id:
board = board_service.find_board(site.board_id)
else:
board = None
if site.storefront_id:
storefront = storefront_service.get_storefront(site.storefront_id)
shop = shop_service.get_shop(storefront.shop_id)
else:
storefront = None
shop = None
settings = site_settings_service.get_settings(site.id)
return {
'site': site,
'brand': brand,
'news_channels': news_channels,
'board': board,
'shop': shop,
'storefront': storefront,
'settings': settings,
}
@blueprint.get('/sites/create/for_brand/<brand_id>')
@permission_required(SitePermission.create)
@templated
def create_form(brand_id, erroneous_form=None):
"""Show form to create a site."""
brand = _get_brand_or_404(brand_id)
form = erroneous_form if erroneous_form else CreateForm()
_fill_in_common_form_choices(form, brand.id)
return {
'brand': brand,
'form': form,
}
@blueprint.post('/sites/for_brand/<brand_id>')
@permission_required(SitePermission.create)
def create(brand_id):
"""Create a site."""
brand = _get_brand_or_404(brand_id)
form = CreateForm(request.form)
_fill_in_common_form_choices(form, brand.id)
if not form.validate():
return create_form(brand_id, form)
site_id = form.id.data.strip().lower()
title = form.title.data.strip()
server_name = form.server_name.data.strip()
party_id = form.party_id.data
enabled = form.enabled.data
user_account_creation_enabled = form.user_account_creation_enabled.data
login_enabled = form.login_enabled.data
board_id = form.board_id.data.strip() or None
storefront_id = form.storefront_id.data.strip() or None
if party_id:
party = party_service.find_party(party_id)
if not party:
flash_error(
gettext(
'Party ID "%(party_id)s" is unknown.',
party_id=party_id,
)
)
return create_form(brand_id, form)
else:
party_id = None
site = site_service.create_site(
site_id,
title,
server_name,
brand.id,
enabled=enabled,
user_account_creation_enabled=user_account_creation_enabled,
login_enabled=login_enabled,
party_id=party_id,
board_id=board_id,
storefront_id=storefront_id,
)
flash_success(
gettext('Site "%(title)s" has been created.', title=site.title)
)
return redirect_to('.view', site_id=site.id)
@blueprint.get('/sites/<site_id>/update')
@permission_required(SitePermission.update)
@templated
def update_form(site_id, erroneous_form=None):
"""Show form to update the site."""
site = _get_site_or_404(site_id)
form = erroneous_form if erroneous_form else UpdateForm(obj=site)
form.set_brand_choices()
_fill_in_common_form_choices(form, site.brand_id)
return {
'site': site,
'form': form,
}
@blueprint.post('/sites/<site_id>')
@permission_required(SitePermission.update)
def update(site_id):
"""Update the site."""
site = _get_site_or_404(site_id)
form = UpdateForm(request.form)
form.set_brand_choices()
_fill_in_common_form_choices(form, site.brand_id)
if not form.validate():
return update_form(site.id, form)
title = form.title.data.strip()
server_name = form.server_name.data.strip()
brand_id = form.brand_id.data
party_id = form.party_id.data
enabled = form.enabled.data
user_account_creation_enabled = form.user_account_creation_enabled.data
login_enabled = form.login_enabled.data
board_id = form.board_id.data.strip() or None
storefront_id = form.storefront_id.data.strip() or None
archived = form.archived.data
if party_id:
party = party_service.find_party(party_id)
if not party:
flash_error(
gettext(
'Party ID "%(party_id)s" is unknown.',
party_id=party_id,
)
)
return update_form(site.id, form)
else:
party_id = None
try:
site = site_service.update_site(
site.id,
title,
server_name,
brand_id,
party_id,
enabled,
user_account_creation_enabled,
login_enabled,
board_id,
storefront_id,
archived,
)
except site_service.UnknownSiteId:
abort(404, f'Unknown site ID "{site_id}".')
flash_success(
gettext('Site "%(title)s" has been updated.', title=site.title)
)
return redirect_to('.view', site_id=site.id)
def _fill_in_common_form_choices(form, brand_id):
form.set_party_choices(brand_id)
form.set_board_choices(brand_id)
form.set_storefront_choices()
# -------------------------------------------------------------------- #
# news channels
@blueprint.get('/sites/<site_id>/news_channels/add')
@permission_required(SitePermission.update)
@templated
def add_news_channel_form(site_id, erroneous_form=None):
"""Show form to add a news channel to the site."""
site = _get_site_or_404(site_id)
form = erroneous_form if erroneous_form else AddNewsChannelForm()
form.set_news_channel_choices(site.brand_id)
return {
'site': site,
'form': form,
}
@blueprint.post('/sites/<site_id>/news_channels')
@permission_required(SitePermission.update)
def add_news_channel(site_id):
"""Add a news channel to the site."""
site = _get_site_or_404(site_id)
form = AddNewsChannelForm(request.form)
form.set_news_channel_choices(site.brand_id)
if not form.validate():
return add_news_channel_form(site.id, form)
news_channel_id = form.news_channel_id.data
news_channel = news_channel_service.get_channel(news_channel_id)
site_service.add_news_channel(site.id, news_channel.id)
flash_success(
gettext(
'News channel "%(news_channel_id)s" has been added to site "%(site_title)s".',
news_channel_id=news_channel.id,
site_title=site.title,
)
)
return redirect_to('.view', site_id=site.id)
@blueprint.delete('/sites/<site_id>/news_channels/<news_channel_id>')
@permission_required(SitePermission.update)
@respond_no_content
def remove_news_channel(site_id, news_channel_id):
"""Remove the news channel from the site."""
site = _get_site_or_404(site_id)
news_channel = news_channel_service.find_channel(news_channel_id)
if news_channel is None:
abort(404)
news_channel_id = news_channel.id
site_service.remove_news_channel(site.id, news_channel.id)
flash_success(
gettext(
'News channel "%(news_channel_id)s" has been removed from site "%(site_title)s".',
news_channel_id=news_channel.id,
site_title=site.title,
)
)
# -------------------------------------------------------------------- #
def _get_site_or_404(site_id):
site = site_service.find_site(site_id)
if site is None:
abort(404)
return site
def _get_brand_or_404(brand_id):
brand = brand_service.find_brand(brand_id)
if brand is None:
abort(404)
return brand
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
class CSSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def RunChecks(self):
# We use this a lot, so make a nick name variable.
def _collapseable_hex(s):
return (len(s) == 6 and s[0] == s[1] and s[2] == s[3] and s[4] == s[5])
def _is_gray(s):
return s[0] == s[1] == s[2] if len(s) == 3 else s[0:2] == s[2:4] == s[4:6]
def _remove_all(s):
return _remove_grit(_remove_ats(_remove_comments(s)))
def _remove_ats(s):
return re.sub(re.compile(r'@\w+.*?{(.*{.*?})+.*?}', re.DOTALL), '\\1', s)
def _remove_comments(s):
return re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', s)
def _remove_grit(s):
grit_reg = r'<if[^>]+>.*?<\s*/\s*if[^>]*>|<include[^>]+>'
return re.sub(re.compile(grit_reg, re.DOTALL), '', s)
def _rgb_from_hex(s):
if len(s) == 3:
r, g, b = s[0] + s[0], s[1] + s[1], s[2] + s[2]
else:
r, g, b = s[0:2], s[2:4], s[4:6]
return int(r, base=16), int(g, base=16), int(b, base=16)
def alphabetize_props(contents):
errors = []
for rule in re.finditer(r'{(.*?)}', contents, re.DOTALL):
semis = map(lambda t: t.strip(), rule.group(1).split(';'))[:-1]
rules = filter(lambda r: ': ' in r, semis)
props = map(lambda r: r[0:r.find(':')], rules)
if props != sorted(props):
errors.append(' %s;\nExpected: %s' % (';\n '.join(rules), ','.join(list(sorted(props)))))
return errors
def braces_have_space_before_and_nothing_after(line):
return re.search(r'(?:^|\S){|{\s*\S+\s*$', line)
def classes_use_dashes(line):
# Intentionally dumbed down version of CSS 2.1 grammar for class without
# non-ASCII, escape chars, or whitespace.
m = re.search(r'\.(-?[_a-zA-Z0-9-]+).*[,{]\s*$', line)
return (m and (m.group(1).lower() != m.group(1) or
m.group(1).find('_') >= 0))
# Ignore single frames in a @keyframe, i.e. 0% { margin: 50px; }
frame_reg = r'\s*\d+%\s*{\s*[_a-zA-Z0-9-]+:(\s*[_a-zA-Z0-9-]+)+\s*;\s*}\s*'
def close_brace_on_new_line(line):
return (line.find('}') >= 0 and re.search(r'[^ }]', line) and
not re.match(frame_reg, line))
def colons_have_space_after(line):
return re.search(r'(?<!data):(?!//)\S[^;]+;\s*', line)
def favor_single_quotes(line):
return line.find('"') >= 0
# Shared between hex_could_be_shorter and rgb_if_not_gray.
hex_reg = (r'#([a-fA-F0-9]{3}|[a-fA-F0-9]{6})(?=[^_a-zA-Z0-9-]|$)'
r'(?!.*(?:{.*|,\s*)$)')
def hex_could_be_shorter(line):
m = re.search(hex_reg, line)
return (m and _is_gray(m.group(1)) and _collapseable_hex(m.group(1)))
small_seconds = r'(?:^|[^_a-zA-Z0-9-])(0?\.[0-9]+)s(?!-?[_a-zA-Z0-9-])'
def milliseconds_for_small_times(line):
return re.search(small_seconds, line)
def no_data_uris_in_source_files(line):
return re.search(r'\(\s*\'?\s*data:', line)
def one_rule_per_line(line):
return re.search(r'[_a-zA-Z0-9-](?<!data):(?!//)[^;]+;\s*[^ }]\s*', line)
any_reg = re.compile(r':(?:-webkit-)?any\(.*?\)', re.DOTALL)
multi_sels = re.compile(r'(?:}[\n\s]*)?([^,]+,(?=[^{}]+?{).*[,{])\s*$',
re.MULTILINE)
def one_selector_per_line(contents):
errors = []
for b in re.finditer(multi_sels, re.sub(any_reg, '', contents)):
errors.append(' ' + b.group(1).strip().splitlines()[-1:][0])
return errors
def rgb_if_not_gray(line):
m = re.search(hex_reg, line)
return (m and not _is_gray(m.group(1)))
def suggest_ms_from_s(line):
ms = int(float(re.search(small_seconds, line).group(1)) * 1000)
return ' (replace with %dms)' % ms
def suggest_rgb_from_hex(line):
suggestions = ['rgb(%d, %d, %d)' % _rgb_from_hex(h.group(1))
for h in re.finditer(hex_reg, line)]
return ' (replace with %s)' % ', '.join(suggestions)
def suggest_short_hex(line):
h = re.search(hex_reg, line).group(1)
return ' (replace with #%s)' % (h[0] + h[2] + h[4])
hsl = r'hsl\([^\)]*(?:[, ]|(?<=\())(?:0?\.?)?0%'
zeros = (r'^.*(?:^|\D)'
r'(?:\.0|0(?:\.0?|px|em|%|in|cm|mm|pc|pt|ex|deg|g?rad|m?s|k?hz))'
r'(?:\D|$)(?=[^{}]+?}).*$')
def zero_length_values(contents):
errors = []
for z in re.finditer(re.compile(zeros, re.MULTILINE), contents):
first_line = z.group(0).strip().splitlines()[0]
if not re.search(hsl, first_line):
errors.append(' ' + first_line)
return errors
added_or_modified_files_checks = [
{ 'desc': 'Alphabetize properties and list vendor specific (i.e. '
'-webkit) above standard.',
'test': alphabetize_props,
'multiline': True,
},
{ 'desc': 'Start braces ({) end a selector, have a space before them '
'and no rules after.',
'test': braces_have_space_before_and_nothing_after,
},
{ 'desc': 'Classes use .dash-form.',
'test': classes_use_dashes,
},
{ 'desc': 'Always put a rule closing brace (}) on a new line.',
'test': close_brace_on_new_line,
},
{ 'desc': 'Colons (:) should have a space after them.',
'test': colons_have_space_after,
},
{ 'desc': 'Use single quotes (\') instead of double quotes (") in '
'strings.',
'test': favor_single_quotes,
},
{ 'desc': 'Use abbreviated hex (#rgb) when in form #rrggbb.',
'test': hex_could_be_shorter,
'after': suggest_short_hex,
},
{ 'desc': 'Use milliseconds for time measurements under 1 second.',
'test': milliseconds_for_small_times,
'after': suggest_ms_from_s,
},
{ 'desc': 'Don\'t use data URIs in source files. Use grit instead.',
'test': no_data_uris_in_source_files,
},
{ 'desc': 'One rule per line (what not to do: color: red; margin: 0;).',
'test': one_rule_per_line,
},
{ 'desc': 'One selector per line (what not to do: a, b {}).',
'test': one_selector_per_line,
'multiline': True,
},
{ 'desc': 'Use rgb() over #hex when not a shade of gray (like #333).',
'test': rgb_if_not_gray,
'after': suggest_rgb_from_hex,
},
{ 'desc': 'Make all zero length terms (i.e. 0px) 0 unless inside of '
'hsl() or part of @keyframe.',
'test': zero_length_values,
'multiline': True,
},
]
results = []
affected_files = self.input_api.AffectedFiles(include_deletes=False,
file_filter=self.file_filter)
files = []
for f in affected_files:
# Remove all /*comments*/, @at-keywords, and grit <if|include> tags; we're
# not using a real parser. TODO(dbeam): Check alpha in <if> blocks.
file_contents = _remove_all('\n'.join(f.new_contents))
files.append((f.filename, file_contents))
# Only look at CSS files for now.
for f in filter(lambda f: f[0].endswith('.css'), files):
file_errors = []
for check in added_or_modified_files_checks:
# If the check is multiline, it receieves the whole file and gives us
# back a list of things wrong. If the check isn't multiline, we pass it
# each line and the check returns something truthy if there's an issue.
if ('multiline' in check and check['multiline']):
check_errors = check['test'](f[1])
if len(check_errors) > 0:
# There are currently no multiline checks with ['after'].
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors).rstrip()))
else:
check_errors = []
lines = f[1].splitlines()
for lnum in range(0, len(lines)):
line = lines[lnum]
if check['test'](line):
error = ' ' + line.strip()
if 'after' in check:
error += check['after'](line)
check_errors.append(error)
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors)))
if file_errors:
results.append(self.output_api.PresubmitPromptWarning(
'%s:\n%s' % (f[0], '\n\n'.join(file_errors))))
if results:
# Add your name if you're here often mucking around in the code.
authors = ['dbeam@chromium.org']
results.append(self.output_api.PresubmitNotifyResult(
'Was the CSS checker useful? Send feedback or hate mail to %s.' %
', '.join(authors)))
return results
|
|
'''
Virtual Network Simulator POX Application.
Author : Kausik Subramanian
'''
from pox.core import core
from collections import defaultdict
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import pox.openflow.discovery
import pox.openflow.spanning_tree
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.vlan import vlan
from pox.lib.packet.ipv4 import ipv4
from pox.lib.revent import *
from pox.lib.util import dpid_to_str
from pox.lib.util import dpidToStr
from pox.lib.addresses import IPAddr, EthAddr
from collections import namedtuple
import os
import sys
import time
from NetworkMapping import *
from VNSTopology import Topology
from MinSwitchMapper import *
log = core.getLogger()
class NetworkMapper (EventMixin):
def __init__(self):
self.listenTo(core.openflow)
core.openflow_discovery.addListeners(self)
log.debug("Enabling NetworkMapper Module")
# Adjacency map. [sw1][sw2] -> port from sw1 to sw2
self.adjacency = defaultdict(lambda:defaultdict(lambda:None))
self.switchMap = dict()
self.switchConnections = dict()
self.netDatabase = NetworkDatabase()
self.tenantDatabase = TenantDatabase()
# Initialize Physical Topology.
self.phyTopo = Topology("phy", self.netDatabase)
self.virtTopos = []
"""# Timing Code.
rt = Topology("tt0", self.netDatabase, self.tenantDatabase.getTenantID("tt0"))
st = time.time()
mapper = MinSwitchMapper(self.phyTopo, rt, self.netDatabase, self.tenantDatabase)
mapper.findHostMapping()
et = time.time()
print("Virtual Topology mapper Timing " + str(et - st))
st = time.time()
networkMaprt = NetworkMapping(phyTopo = self.phyTopo, virtTopo = rt, netDatabase = self.netDatabase)
networkMaprt.read()
et = time.time()
print("NetworkMapping Timing " + str(et - st))
"""
virtTopo1 = Topology("tenant1", self.netDatabase, self.tenantDatabase.getTenantID("tenant1"))
mapper = MinSwitchMapper(self.phyTopo, virtTopo1, self.netDatabase, self.tenantDatabase)
mapper.findHostMapping()
virtTopo2 = Topology("tenant2", self.netDatabase, self.tenantDatabase.getTenantID("tenant2"))
self.virtTopos.append(virtTopo2)
mapper2 = MinSwitchMapper(self.phyTopo, virtTopo2, self.netDatabase, self.tenantDatabase)
mapper2.findHostMapping()
self.virtTopos.append(virtTopo1)
networkMap1 = NetworkMapping(phyTopo = self.phyTopo, virtTopo = virtTopo1, netDatabase = self.netDatabase)
networkMap1.read()
networkMap2 = NetworkMapping(phyTopo = self.phyTopo, virtTopo = virtTopo2, netDatabase = self.netDatabase)
networkMap2.read()
self.networkRoutes = []
self.networkRoutes.extend(networkMap1.getNetworkRoutes())
self.networkRoutes.extend(networkMap2.getNetworkRoutes())
#Temp
self.routeAdded = False
# Write to the mininet configuration files.
self.phyTopo.writeToFile()
"""This event will be raised each time a switch will connect to the controller"""
def _handle_ConnectionUp(self, event):
# Use dpid to differentiate between switches (datapath-id)
# Each switch has its own flow table. As we'll see in this
# example we need to write different rules in different tables.
dpid = dpidToStr(event.dpid)
switchName = ""
for m in event.connection.features.ports:
name = m.name.split("-")
if switchName == "" :
switchName = name[0]
print switchName
if not switchName == name[0] :
log.debug("Some Error in mapping name from the OpenFlow Switch Up Message.")
self.switchMap[switchName] = dpid
self.switchConnections[switchName] = event.connection
def findSwitchName(self, dpid) :
for name in self.switchMap.iterkeys() :
if self.switchMap[name] == dpid :
return name
def getSwitchMacAddr(self, sw) :
if sw == None :
return None
else :
dpid = self.switchMap[sw]
mac = dpid.replace("-", ":")
return mac
def findOutputPort(self, curr, next, prev = None) :
#curr and next are not adjacent. Find the next switch.
sw = self.findNeighbour(src = curr, dst = next)
if sw == prev :
return of.OFPP_IN_PORT # send back on the input port.
elif not self.adjacency[self.switchMap[curr]][self.switchMap[sw]] == None :
return self.adjacency[self.switchMap[curr]][self.switchMap[sw]]
else :
print "[ERROR] No edge present."
return None
def getSubnet(self, ip, subnetMask) :
if ip == "10.0.0.5" or ip == "10.0.0.0":
return "10.0.0.0"
if ip == "10.1.0.5" or ip == "10.1.0.0":
return "10.1.0.0"
if ip == "10.2.0.5" or ip == "10.2.0.0":
return "10.2.0.0"
def findNeighbour(self, src, dst) :
return self.phyTopo.getNeighbour(src, dst)
def addForwardingRules(self, srcSubnet, dstSubnet, tenantID, route) :
"This function proactively adds the forwarding rules from srcSubnet to dstSubnet. "
"Subnet can be a host address as well."
currRouteTag = 2 # Start with 2.
# First switch
sw = route.getFirstSwitch()
sw_next = route.getNextRouteTagSwitch()
print "Adding VLAN Tag rule for Switch " + sw
self.installRouteTagRule(
connection = self.switchConnections[sw],
srcip = srcSubnet, dstip = dstSubnet,
srcSw = sw, dstSw = sw_next, prevSw=None,
vlanMatch = 0, vlanAction = tenantID,
routeTagMatch = 0, routeTagAction = currRouteTag)
sw = sw_next
sw_prev = route.getPrevSwitch()
while not route.isLastSwitch() :
sw_next = route.getNextRouteTagSwitch()
print "Adding rule for Switch " + sw + " " + sw_next
if route.getCurrentRouteTag() :
self.installRouteTagRule(
connection = self.switchConnections[sw],
srcip = srcSubnet, dstip = dstSubnet,
srcSw = sw, dstSw = sw_next, prevSw=sw_prev,
vlanMatch = tenantID, vlanAction = 0 ,
routeTagMatch = currRouteTag, routeTagAction = (currRouteTag + 1) )
currRouteTag += 1
sw = sw_next
sw_prev = route.getPrevSwitch()
# Last Switch. Strip VLAN
print "Adding rule for Switch " + sw
self.installRouteTagRule(
connection = self.switchConnections[sw],
srcip = srcSubnet, dstip = dstSubnet,
srcSw = sw, dstSw = None, prevSw = None,
vlanMatch = tenantID , vlanAction = -1,
routeTagMatch = currRouteTag, routeTagAction = 0)
# Add the required switch Tunnel rules.
for src in self.switchMap.iterkeys():
for dst in self.switchMap.iterkeys():
if not src == dst :
self.installSwitchTunnelRule(
connection = self.switchConnections[src],
srcSw = src, dstSw = dst)
def getVlanId(self, tenantID, routeTag) :
# A function of tenant ID and routeTag.
# 12 Bit VLAN ID: Most Significant 6 bits : tenantID, Least Significant 6 bits : routeTag
return (tenantID * 64 + routeTag)
def installRouteTagRule(self, connection, srcip, dstip, srcSw, dstSw, prevSw,
vlanMatch = 0, vlanAction = 0, routeTagMatch = 0, routeTagAction = 0):
msg = of.ofp_flow_mod()
#Match
msg.match = of.ofp_match()
msg.match.dl_type = ethernet.IP_TYPE
msg.match.set_nw_src(IPAddr(srcip, 32), 32)
msg.match.set_nw_dst(IPAddr(dstip, 32), 32)
if not vlanMatch == 0 and not routeTagMatch == 0 :
msg.match.dl_vlan = self.getVlanId(vlanMatch, routeTagMatch)
if vlanAction == -1 :
#Strip Vlan tag.
msg.actions.append(of.ofp_action_strip_vlan())
elif not vlanAction == 0 and not routeTagAction == 0:
# Need to set VLAN Tag for isolation of tenant traffic.
msg.actions.append(of.ofp_action_vlan_vid(vlan_vid = self.getVlanId(vlanAction, routeTagAction)))
elif vlanAction == 0 and not routeTagAction == 0:
msg.actions.append(of.ofp_action_vlan_vid(vlan_vid = self.getVlanId(vlanMatch, routeTagAction)))
if dstSw == None :
outport = of.OFPP_FLOOD
else :
msg.actions.append(of.ofp_action_dl_addr.set_src(EthAddr(self.getSwitchMacAddr(dstSw))))
outport = self.findOutputPort(curr=srcSw, next=dstSw, prev=prevSw)
msg.actions.append(of.ofp_action_output(port = outport))
connection.send(msg)
def installSwitchTunnelRule(self, connection, srcSw, dstSw) :
msg = of.ofp_flow_mod()
#Match
msg.match = of.ofp_match()
msg.match.dl_src = EthAddr(self.getSwitchMacAddr(dstSw))
outport = self.findOutputPort(curr=srcSw, next=dstSw)
if outport == None :
print "NONE is here. Why?"
msg.actions.append(of.ofp_action_output(port = outport))
connection.send(msg)
def reactiveInstallRule(self, event, srcip, dstip, outport, vlan=0):
msg = of.ofp_flow_mod()
#Match
msg.match = of.ofp_match()
msg.match.dl_type = ethernet.IP_TYPE
msg.match.set_nw_src(IPAddr(srcip, 32), 32)
msg.match.set_nw_dst(IPAddr(dstip, 32), 32)
"""
if not vlan == 0 :
# Need to set VLAN Tag for isolation of tenant traffic.
msg.actions.append(of.ofp_action_vlan_vid(vlan_vid = vlan)) """
msg.actions.append(of.ofp_action_output(port = outport))
msg.data = event.ofp
msg.in_port = event.port
event.connection.send(msg)
def _handle_LinkEvent (self, event):
l = event.link
sw1 = dpid_to_str(l.dpid1)
sw2 = dpid_to_str(l.dpid2)
log.debug ("link %s[%d] <-> %s[%d]",
sw1, l.port1,
sw2, l.port2)
self.adjacency[sw1][sw2] = int(l.port1)
self.adjacency[sw2][sw1] = int(l.port2)
def _handle_PacketIn (self, event):
"""
Handle packet in messages from the switch.
"""
packet = event.parsed
def install_fwdrule(event,srcip,dstip,outport,vlan=0):
msg = of.ofp_flow_mod()
#Match
msg.match = of.ofp_match()
msg.match.dl_type = ethernet.IP_TYPE
msg.match.set_nw_src(IPAddr(srcip, 32), 32)
msg.match.set_nw_dst(IPAddr(dstip, 32), 32)
if not vlan == 0 :
# Need to set VLAN Tag for isolation of tenant traffic.
msg.actions.append(of.ofp_action_vlan_vid(vlan_vid = vlan))
msg.actions.append(of.ofp_action_output(port = outport))
msg.data = event.ofp
msg.in_port = event.port
event.connection.send(msg)
def installFloodRule(event,packet,outport,vlan=0):
msg = of.ofp_flow_mod()
#Match
msg.match = of.ofp_match.from_packet(packet, event.port)
msg.actions.append(of.ofp_action_output(port = outport))
if not vlan == 0 :
# Need to set VLAN Tag for isolation of tenant traffic.
msg.actions.append(of.ofp_action_vlan_vid(vlan_vid = vlan))
msg.data = event.ofp
msg.in_port = event.port
event.connection.send(msg)
def handle_IP_packet (packet):
ip = packet.find('ipv4')
if ip is None:
# This packet isn't IP!
print "packet type has no transport ports, flooding"
installFloodRule(event,packet,of.OFPP_FLOOD)
return
else :
print "Source IP:", ip.srcip
vlanPacket = packet.find('ethernet')
routeTag = 0
if vlanPacket.type == ethernet.VLAN_TYPE :
print "Vlan header is there."
print packet.__str__()
if not self.routeAdded :
for route in self.networkRoutes :
self.addForwardingRules(route.getSrcSubnet(), route.getDstSubnet(), route.getTenantID(), route)
#switch is event.dpid
"""
sw = dpidToStr(event.dpid)
swName = self.findSwitchName(sw)
outport = self.findOutputPort(swName, ip.srcip, ip.dstip, 0, routeTag)
install_fwdrule(event, ip.srcip, ip.dstip, outport, 5)
"""
handle_IP_packet(packet)
# flood and install the flow table entry for the flood
def launch():
# Run spanning tree so that we can deal with topologies with loops
pox.openflow.discovery.launch()
'''
Starting the Topology Slicing module
'''
core.registerNew(NetworkMapper)
|
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import inspect
import json
import netaddr
from neutron.db.sqlalchemyutils import paginate_query
from neutron.openstack.common import uuidutils
from oslo.config import cfg
from oslo.utils import timeutils
from oslo_log import log as logging
from sqlalchemy import event
from sqlalchemy import func as sql_func
from sqlalchemy import and_, asc, desc, orm, or_, not_
from sqlalchemy.orm import class_mapper
from quark.db import models
from quark.db import sqlalchemy_adapter as quark_sa
from quark import network_strategy
from quark import protocols
STRATEGY = network_strategy.STRATEGY
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
ONE = "one"
ALL = "all"
# NOTE(jkoelker) init event listener that will ensure id is filled in
# on object creation (prior to commit).
def _perhaps_generate_id(target, args, kwargs):
if hasattr(target, 'id') and target.id is None:
target.id = uuidutils.generate_uuid()
# NOTE(jkoelker) Register the event on all models that have ids
for _name, klass in inspect.getmembers(models, inspect.isclass):
if klass is models.HasId:
continue
if models.HasId in klass.mro():
event.listen(klass, "init", _perhaps_generate_id)
def _listify(filters):
for key in ["name", "network_id", "id", "device_id", "tenant_id",
"subnet_id", "mac_address", "shared", "version", "segment_id",
"device_owner", "ip_address", "used_by_tenant_id", "group_id"]:
if key in filters:
if not filters[key]:
continue
listified = filters[key]
if not isinstance(listified, list):
listified = [listified]
filters[key] = listified
def _model_attrs(model):
model_map = class_mapper(model)
model_attrs = [x.key for x in model_map.column_attrs]
if "_cidr" in model_attrs:
model_attrs.append("cidr")
if "_deallocated" in model_attrs:
model_attrs.append("deallocated")
return model_attrs
def _model_query(context, model, filters, fields=None):
filters = filters or {}
model_filters = []
eq_filters = ["address", "cidr", "deallocated", "ip_version",
"mac_address_range_id", "transaction_id"]
in_filters = ["device_id", "device_owner", "group_id", "id", "mac_address",
"name", "network_id", "segment_id", "subnet_id",
"used_by_tenant_id", "version"]
# Sanitize incoming filters to only attributes that exist in the model.
# NOTE: Filters for unusable attributes are silently dropped here.
# NOTE: When the filter key != attribute key, a conditional must be added
# here.
model_attrs = _model_attrs(model)
filters = {x: y for x, y in filters.items()
if x in model_attrs or
(x == "tenant_id" and model == models.IPAddress) or
(x == "ip_address" and model == models.IPAddress) or
(x == "reuse_after" and model in (models.IPAddress,
models.MacAddress))}
# Inject the tenant id if none is set. We don't need unqualified queries.
# This works even when a non-shared, other-tenant owned network is passed
# in because the authZ checks that happen in Neutron above us yank it back
# out of the result set.
if not filters.get("tenant_id") and not context.is_admin:
filters["tenant_id"] = [context.tenant_id]
if model == models.SecurityGroupRule:
sg_rule_attribs = ["direction", "port_range_max", "port_range_min"]
eq_filters.extend(sg_rule_attribs)
for key, value in filters.items():
# This is mostly for unittests, as they're configured to send in None
if value is None:
continue
if key in in_filters:
model_type = getattr(model, key)
model_filters.append(model_type.in_(value))
elif key in eq_filters:
model_type = getattr(model, key)
model_filters.append(model_type == value)
elif key == "_deallocated":
if value:
model_filters.append(model._deallocated == 1)
else:
model_filters.append(model._deallocated != 1)
elif key == "ethertype":
etypes = []
for etype in value:
etypes.append(protocols.translate_ethertype(etype))
model_filters.append(model.ethertype.in_(etypes))
elif key == "ip_address":
model_filters.append(model.address.in_(
[ip.ipv6().value for ip in value]))
elif key == 'protocol':
pnums = []
for version in (protocols.PROTOCOLS_V4, protocols.PROTOCOLS_V6):
pnums.extend([y for x, y in version.items() if x in value])
model_filters.append(model.protocol.in_(pnums))
elif key == "reuse_after":
reuse = (timeutils.utcnow() -
datetime.timedelta(seconds=value))
# NOTE(asadoughi): should this allow for deallocated_at = null?
model_filters.append(model.deallocated_at <= reuse)
elif key == "tenant_id":
if model == models.IPAddress:
model_filters.append(model.used_by_tenant_id.in_(value))
else:
model_filters.append(model.tenant_id.in_(value))
return model_filters
def scoped(f):
def wrapped(*args, **kwargs):
scope = None
if "scope" in kwargs:
scope = kwargs.pop("scope")
if scope not in [None, ALL, ONE]:
raise Exception("Invalid scope")
_listify(kwargs)
res = f(*args, **kwargs)
if not res:
return
if "order_by" in kwargs:
res = res.order_by(kwargs["order_by"])
if scope == ALL:
if isinstance(res, list):
return res
return res.all()
elif scope == ONE:
if isinstance(res, list):
return res[0]
return res.first()
return res
return wrapped
@scoped
def port_find(context, limit=None, sorts=None, marker_obj=None, fields=None,
**filters):
query = context.session.query(models.Port).options(
orm.joinedload(models.Port.ip_addresses))
model_filters = _model_query(context, models.Port, filters)
if filters.get("ip_address_id"):
model_filters.append(models.Port.ip_addresses.any(
models.IPAddress.id.in_(filters["ip_address_id"])))
if filters.get("device_id"):
model_filters.append(models.Port.device_id.in_(filters["device_id"]))
if "join_security_groups" in filters:
query = query.options(orm.joinedload(models.Port.security_groups))
if fields and "port_subnets" in fields:
query = query.options(orm.joinedload("ip_addresses.subnet"))
query = query.options(
orm.joinedload("ip_addresses.subnet.dns_nameservers"))
query = query.options(
orm.joinedload("ip_addresses.subnet.routes"))
return paginate_query(query.filter(*model_filters), models.Port, limit,
sorts, marker_obj)
@scoped
def port_find_by_ip_address(context, **filters):
query = context.session.query(models.IPAddress).options(
orm.joinedload(models.IPAddress.ports))
model_filters = _model_query(context, models.IPAddress, filters)
return query.filter(*model_filters)
def port_count_all(context, **filters):
query = context.session.query(sql_func.count(models.Port.id))
model_filters = _model_query(context, models.Port, filters)
return query.filter(*model_filters).scalar()
def port_create(context, **port_dict):
port = models.Port()
port.update(port_dict)
port["tenant_id"] = context.tenant_id
if "addresses" in port_dict:
port["ip_addresses"].extend(port_dict["addresses"])
context.session.add(port)
return port
def port_disassociate_ip(context, ports, address):
assocs_to_remove = [assoc for assoc in address.associations
if assoc.port in ports]
for assoc in assocs_to_remove:
context.session.delete(assoc)
# NOTE(thomasem): Need to update in-session model for caller.
address.associations.remove(assoc)
context.session.add(address)
return address
def port_associate_ip(context, ports, address, enable_port=None):
for port in ports:
assoc = models.PortIpAssociation()
assoc.port_id = port.id
assoc.ip_address_id = address.id
assoc.enabled = port.id in enable_port if enable_port else False
address.associations.append(assoc)
context.session.add(address)
return address
def update_port_associations_for_ip(context, ports, address):
assoc_ports = set(address.ports)
new_ports = set(ports)
new_address = port_associate_ip(context, new_ports - assoc_ports,
address)
return port_disassociate_ip(context,
assoc_ports - new_ports, new_address)
def port_update(context, port, **kwargs):
if "addresses" in kwargs:
port["ip_addresses"] = kwargs.pop("addresses")
port.update(kwargs)
context.session.add(port)
return port
def port_delete(context, port):
context.session.delete(port)
def ip_address_update(context, address, **kwargs):
address.update(kwargs)
context.session.add(address)
return address
def ip_address_create(context, **address_dict):
ip_address = models.IPAddress()
address = address_dict.pop("address")
ip_address.update(address_dict)
ip_address["address"] = int(address.ipv6())
ip_address["address_readable"] = str(address)
ip_address["used_by_tenant_id"] = context.tenant_id
ip_address["_deallocated"] = 0
ip_address["allocated_at"] = timeutils.utcnow()
context.session.add(ip_address)
return ip_address
@scoped
def ip_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.IPAddress)
if lock_mode:
query = query.with_lockmode("update")
model_filters = _model_query(context, models.IPAddress, filters)
if filters.get("device_id"):
model_filters.append(models.IPAddress.ports.any(
models.Port.device_id.in_(filters["device_id"])))
if filters.get("port_id"):
model_filters.append(models.IPAddress.ports.any(
models.Port.id == filters['port_id']))
if filters.get("address_type"):
model_filters.append(
models.IPAddress.address_type == filters['address_type'])
return query.filter(*model_filters)
def ip_address_count_all(context, filters):
query = context.session.query(sql_func.count(models.IPAddress.id))
model_filters = _model_query(context, models.IPAddress, filters)
return query.filter(*model_filters).scalar()
@scoped
def ip_address_reallocate(context, update_kwargs, **filters):
LOG.debug("ip_address_reallocate %s", filters)
query = context.session.query(models.IPAddress)
model_filters = _model_query(context, models.IPAddress, filters)
query = query.filter(*model_filters)
row_count = quark_sa.update(query, update_kwargs,
update_args={"mysql_limit": 1})
return row_count == 1
def ip_address_reallocate_find(context, transaction_id):
address = ip_address_find(context, transaction_id=transaction_id,
scope=ONE)
if not address:
LOG.warn("Couldn't find IP address with transaction_id %s",
transaction_id)
return
LOG.info("Potentially reallocatable IP found: "
"{0}".format(address["address_readable"]))
subnet = address.get('subnet')
if not subnet:
LOG.debug("No subnet associated with address")
return
if subnet["do_not_use"]:
LOG.debug("Subnet marked as do_not_use")
return
addr = netaddr.IPAddress(int(address["address"]))
if address["subnet"]["ip_version"] == 4:
addr = addr.ipv4()
else:
addr = addr.ipv6()
# TODO(amir): performance test replacing this with SQL in
# ip_address_reallocate's UPDATE statement
policy = models.IPPolicy.get_ip_policy_cidrs(subnet)
if policy is not None and addr in policy:
LOG.info("Deleting Address {0} due to policy "
"violation".format(
address["address_readable"]))
context.session.delete(address)
return
# TODO(amir): performance test replacing this with SQL in
# ip_address_reallocate's UPDATE statement
cidr = netaddr.IPNetwork(address["subnet"]["cidr"])
if addr not in cidr:
LOG.info("Address {0} isn't in the subnet "
"it claims to be in".format(
address["address_readable"]))
context.session.delete(address)
return
return address
@scoped
def mac_address_find(context, lock_mode=False, **filters):
query = context.session.query(models.MacAddress)
if lock_mode:
query = query.with_lockmode("update")
model_filters = _model_query(context, models.MacAddress, filters)
return query.filter(*model_filters)
def mac_address_delete(context, mac_address):
context.session.delete(mac_address)
@scoped
def mac_address_reallocate(context, update_kwargs, **filters):
LOG.debug("mac_address_reallocate %s", filters)
query = context.session.query(models.MacAddress)
model_filters = _model_query(context, models.MacAddress, filters)
query = query.filter(*model_filters)
row_count = quark_sa.update(
query, update_kwargs,
update_args={"mysql_limit": 1})
return row_count == 1
def mac_address_reallocate_find(context, transaction_id):
mac = mac_address_find(context, transaction_id=transaction_id,
scope=ONE)
if not mac:
LOG.warn("Couldn't find MAC address with transaction_id %s",
transaction_id)
return
# NOTE(mdietz): This is a HACK. Please see RM11043 for details
if mac["mac_address_range"] and mac["mac_address_range"]["do_not_use"]:
mac_address_delete(context, mac)
LOG.debug("Found a deallocated MAC in a do_not_use"
" mac_address_range and deleted it. "
"Retrying...")
return
return mac
def mac_address_range_find_allocation_counts(context, address=None,
use_forbidden_mac_range=False):
count = sql_func.count(models.MacAddress.address)
query = context.session.query(models.MacAddressRange,
count.label("count")).with_lockmode("update")
query = query.outerjoin(models.MacAddress)
query = query.group_by(models.MacAddressRange.id)
query = query.order_by(desc(count))
if address:
query = query.filter(models.MacAddressRange.last_address >= address)
query = query.filter(models.MacAddressRange.first_address <= address)
query = query.filter(models.MacAddressRange.next_auto_assign_mac != -1)
if not use_forbidden_mac_range:
query = query.filter(models.MacAddressRange.do_not_use == '0') # noqa
query = query.limit(1)
return query.first()
@scoped
def mac_address_range_find(context, **filters):
query = context.session.query(models.MacAddressRange)
model_filters = _model_query(context, models.MacAddressRange, filters)
return query.filter(*model_filters)
def mac_address_range_create(context, **range_dict):
new_range = models.MacAddressRange()
new_range.update(range_dict)
context.session.add(new_range)
return new_range
def mac_address_range_delete(context, mac_address_range):
context.session.delete(mac_address_range)
def mac_address_range_update(context, mac_range, **kwargs):
mac_range.update(kwargs)
context.session.add(mac_range)
return mac_range
def mac_range_update_next_auto_assign_mac(context, mac_range):
query = context.session.query(models.MacAddressRange)
query = query.filter(models.MacAddressRange.id == mac_range["id"])
query = query.filter(models.MacAddressRange.next_auto_assign_mac != -1)
# For details on synchronize_session, see:
# http://docs.sqlalchemy.org/en/rel_0_8/orm/query.html
query = query.update(
{"next_auto_assign_mac":
models.MacAddressRange.next_auto_assign_mac + 1},
synchronize_session=False)
# Returns a count of the rows matched in the update
return query
def mac_range_update_set_full(context, mac_range):
query = context.session.query(models.MacAddressRange)
query = query.filter_by(id=mac_range["id"])
query = query.filter(models.MacAddressRange.next_auto_assign_mac != -1)
# For details on synchronize_session, see:
# http://docs.sqlalchemy.org/en/rel_0_8/orm/query.html
query = query.update(
{"next_auto_assign_mac": -1},
synchronize_session=False)
# Returns a count of the rows matched in the update
return query
def mac_address_update(context, mac, **kwargs):
mac.update(kwargs)
context.session.add(mac)
return mac
def mac_address_create(context, **mac_dict):
mac_address = models.MacAddress()
mac_address.update(mac_dict)
mac_address["tenant_id"] = context.tenant_id
mac_address["deallocated"] = False
mac_address["deallocated_at"] = None
context.session.add(mac_address)
return mac_address
INVERT_DEFAULTS = 'invert_defaults'
@scoped
def network_find(context, limit=None, sorts=None, marker=None,
page_reverse=False, fields=None, **filters):
ids = []
defaults = []
if "id" in filters:
ids, defaults = STRATEGY.split_network_ids(context, filters["id"])
if ids:
filters["id"] = ids
else:
filters.pop("id")
if "shared" in filters:
defaults = STRATEGY.get_assignable_networks(context)
if True in filters["shared"]:
if ids:
defaults = [net for net in ids if net in defaults]
filters.pop("id")
if not defaults:
return []
else:
defaults.insert(0, INVERT_DEFAULTS)
filters.pop("shared")
return _network_find(context, limit, sorts, marker, page_reverse, fields,
defaults=defaults, **filters)
def _network_find(context, limit, sorts, marker, page_reverse, fields,
defaults=None, **filters):
query = context.session.query(models.Network)
model_filters = _model_query(context, models.Network, filters, query)
if defaults:
invert_defaults = False
if INVERT_DEFAULTS in defaults:
invert_defaults = True
defaults.pop(0)
if filters and invert_defaults:
query = query.filter(and_(not_(models.Network.id.in_(defaults)),
and_(*model_filters)))
elif filters and not invert_defaults:
query = query.filter(or_(models.Network.id.in_(defaults),
and_(*model_filters)))
elif not invert_defaults:
query = query.filter(models.Network.id.in_(defaults))
else:
query = query.filter(*model_filters)
if "join_subnets" in filters:
query = query.options(orm.joinedload(models.Network.subnets))
return paginate_query(query, models.Network, limit, sorts, marker)
def network_create(context, **network):
new_net = models.Network()
new_net.update(network)
context.session.add(new_net)
return new_net
def network_update(context, network, **kwargs):
network.update(kwargs)
context.session.add(network)
return network
def network_count_all(context):
query = context.session.query(sql_func.count(models.Network.id))
return query.filter(
models.Network.tenant_id == context.tenant_id).scalar()
def network_delete(context, network):
context.session.delete(network)
def subnet_find_ordered_by_most_full(context, net_id, lock_subnets=True,
**filters):
count = sql_func.count(models.IPAddress.address).label("count")
size = (models.Subnet.last_ip - models.Subnet.first_ip)
query = context.session.query(models.Subnet, count)
if lock_subnets:
query = query.with_lockmode("update")
query = query.filter_by(do_not_use=False)
query = query.outerjoin(models.Subnet.generated_ips)
query = query.group_by(models.Subnet.id)
query = query.order_by(
asc(models.Subnet.ip_version),
asc(size - count))
query = query.filter(models.Subnet.network_id == net_id)
if "ip_version" in filters:
query = query.filter(models.Subnet.ip_version == filters["ip_version"])
if "segment_id" in filters and filters["segment_id"]:
query = query.filter(models.Subnet.segment_id == filters["segment_id"])
query = query.filter(models.Subnet.next_auto_assign_ip != -1)
if "subnet_id" in filters and filters["subnet_id"]:
query = query.filter(models.Subnet.id.in_(filters["subnet_id"]))
return query
def subnet_update_next_auto_assign_ip(context, subnet):
query = context.session.query(models.Subnet)
query = query.filter(models.Subnet.id == subnet["id"])
query = query.filter(models.Subnet.next_auto_assign_ip != -1)
# For details on synchronize_session, see:
# http://docs.sqlalchemy.org/en/rel_0_8/orm/query.html
query = query.update(
{"next_auto_assign_ip":
models.Subnet.next_auto_assign_ip + 1},
synchronize_session=False)
# Returns a count of the rows matched in the update
return query
def subnet_update_set_full(context, subnet):
query = context.session.query(models.Subnet)
query = query.filter_by(id=subnet["id"])
query = query.filter(models.Subnet.next_auto_assign_ip != -1)
# For details on synchronize_session, see:
# http://docs.sqlalchemy.org/en/rel_0_8/orm/query.html
query = query.update(
{"next_auto_assign_ip": -1},
synchronize_session=False)
# Returns a count of the rows matched in the update
return query
def subnet_update_set_alloc_pool_cache(context, subnet, cache_data=None):
if cache_data is not None:
cache_data = json.dumps(cache_data)
subnet["_allocation_pool_cache"] = cache_data
subnet = subnet_update(context, subnet)
LOG.debug("Setting alloc pool cache to %s" % cache_data)
return subnet
@scoped
def subnet_find(context, limit=None, page_reverse=False, sorts=None,
marker_obj=None, **filters):
if "shared" in filters and True in filters["shared"]:
return []
query = context.session.query(models.Subnet)
model_filters = _model_query(context, models.Subnet, filters)
if "join_dns" in filters:
query = query.options(orm.joinedload(models.Subnet.dns_nameservers))
if "join_routes" in filters:
query = query.options(orm.joinedload(models.Subnet.routes))
return paginate_query(query.filter(*model_filters), models.Subnet, limit,
sorts, marker_obj)
def subnet_count_all(context, **filters):
query = context.session.query(sql_func.count(models.Subnet.id))
if filters.get("network_id"):
query = query.filter(
models.Subnet.network_id == filters["network_id"])
query.filter(models.Subnet.tenant_id == context.tenant_id)
return query.scalar()
def subnet_delete(context, subnet):
context.session.delete(subnet)
def subnet_create(context, **subnet_dict):
subnet = models.Subnet()
subnet.update(subnet_dict)
subnet["tenant_id"] = context.tenant_id
context.session.add(subnet)
return subnet
def subnet_update(context, subnet, **kwargs):
subnet.update(kwargs)
context.session.add(subnet)
return subnet
@scoped
def route_find(context, fields=None, **filters):
query = context.session.query(models.Route)
model_filters = _model_query(context, models.Route, filters)
return query.filter(*model_filters)
def route_create(context, **route_dict):
new_route = models.Route()
new_route.update(route_dict)
new_route["tenant_id"] = context.tenant_id
context.session.add(new_route)
return new_route
def route_update(context, route, **kwargs):
route.update(kwargs)
context.session.add(route)
return route
def route_delete(context, route):
context.session.delete(route)
def dns_create(context, **dns_dict):
dns_nameserver = models.DNSNameserver()
ip = dns_dict.pop("ip")
dns_nameserver.update(dns_dict)
dns_nameserver["ip"] = int(ip)
dns_nameserver["tenant_id"] = context.tenant_id
context.session.add(dns_nameserver)
return dns_nameserver
def dns_delete(context, dns):
context.session.delete(dns)
@scoped
def security_group_find(context, **filters):
query = context.session.query(models.SecurityGroup).options(
orm.joinedload(models.SecurityGroup.rules))
model_filters = _model_query(context, models.SecurityGroup, filters)
return query.filter(*model_filters)
@scoped
def security_group_count(context, **filters):
query = context.session.query(sql_func.count(models.SecurityGroup.id))
model_filters = _model_query(context, models.SecurityGroup, filters)
return query.filter(*model_filters).scalar()
@scoped
def ports_with_security_groups_find(context):
query = context.session.query(models.Port)
query = query.join(models.Port.security_groups)
query = query.options(orm.contains_eager(models.Port.security_groups))
return query
@scoped
def ports_with_security_groups_count(context):
query = context.session.query(
sql_func.count(models.port_group_association_table.c.port_id))
return query.scalar()
def security_group_create(context, **sec_group_dict):
new_group = models.SecurityGroup()
new_group.update(sec_group_dict)
new_group["tenant_id"] = context.tenant_id
context.session.add(new_group)
return new_group
def security_group_update(context, group, **kwargs):
group.update(kwargs)
context.session.add(group)
return group
def security_group_delete(context, group):
context.session.delete(group)
@scoped
def security_group_rule_find(context, **filters):
query = context.session.query(models.SecurityGroupRule)
model_filters = _model_query(context, models.SecurityGroupRule, filters)
return query.filter(*model_filters)
def security_group_rule_create(context, **rule_dict):
new_rule = models.SecurityGroupRule()
new_rule.update(rule_dict)
new_rule.group_id = rule_dict['security_group_id']
new_rule.tenant_id = rule_dict['tenant_id']
context.session.add(new_rule)
return new_rule
def security_group_rule_delete(context, rule):
context.session.delete(rule)
def ip_policy_create(context, **ip_policy_dict):
new_policy = models.IPPolicy()
exclude = ip_policy_dict.pop("exclude")
ip_set = netaddr.IPSet()
for excluded_cidr in exclude:
cidr_net = netaddr.IPNetwork(excluded_cidr).ipv6()
new_policy["exclude"].append(
models.IPPolicyCIDR(cidr=excluded_cidr,
first_ip=cidr_net.first,
last_ip=cidr_net.last))
ip_set.add(excluded_cidr)
ip_policy_dict["size"] = ip_set.size
new_policy.update(ip_policy_dict)
new_policy["tenant_id"] = context.tenant_id
context.session.add(new_policy)
return new_policy
@scoped
def ip_policy_find(context, **filters):
query = context.session.query(models.IPPolicy)
model_filters = _model_query(context, models.IPPolicy, filters)
return query.filter(*model_filters)
def ip_policy_update(context, ip_policy, **ip_policy_dict):
exclude = ip_policy_dict.pop("exclude", [])
if exclude:
ip_policy["exclude"] = []
ip_set = netaddr.IPSet()
for excluded_cidr in exclude:
cidr_net = netaddr.IPNetwork(excluded_cidr).ipv6()
ip_policy["exclude"].append(
models.IPPolicyCIDR(cidr=excluded_cidr,
first_ip=cidr_net.first,
last_ip=cidr_net.last))
ip_set.add(excluded_cidr)
ip_policy_dict["size"] = ip_set.size
ip_policy.update(ip_policy_dict)
context.session.add(ip_policy)
return ip_policy
def ip_policy_delete(context, ip_policy):
context.session.delete(ip_policy)
def transaction_create(context):
transaction = models.Transaction()
context.session.add(transaction)
return transaction
@scoped
def floating_ip_find(context, lock_mode=False, limit=None, sorts=None,
marker=None, page_reverse=False, fields=None, **filters):
query = context.session.query(models.IPAddress)
if lock_mode:
query = query.with_lockmode("update")
model_filters = _model_query(context, models.IPAddress, filters)
if filters.get("port_id"):
model_filters.append(models.IPAddress.ports.any(
models.Port.id == filters['port_id']))
if filters.get("address_type"):
model_filters.append(
models.IPAddress.address_type == filters['address_type'])
if filters.get("transaction_id"):
model_filters.append(
models.IPAddress.transaction_id == filters['transaction_id'])
return paginate_query(query.filter(*model_filters), models.IPAddress,
limit, sorts, marker)
def floating_ip_associate_fixed_ip(context, floating_ip, fixed_ip,
enable=True):
assoc = models.FloatingToFixedIPAssociation()
assoc.floating_ip_address_id = floating_ip.id
assoc.fixed_ip_address_id = fixed_ip.id
assoc.enabled = enable
context.session.add(assoc)
floating_ip.fixed_ip = fixed_ip
return floating_ip
|
|
# coding: utf-8
# In[1]:
###########################################################################################################################
# In[2]:
import tensorflow as tf
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from sklearn import metrics
from math import sqrt
import time
import csv
import random
# In[3]:
# assert helper
# o: object
# t_list: list of type
def assertHelper(o, t_list):
boolToBeAsserted = False
stringToBePrinted = ''
for t in t_list:
boolToBeAsserted = boolToBeAsserted or isinstance(o, t)
if (t == int):
stringToBePrinted += '{} is not an integer '.format(o)
elif (t == float):
stringToBePrinted += '{} is not a float '.format(o)
elif (t == str):
stringToBePrinted += '{} is not a string '.format(o)
elif (t == list):
stringToBePrinted += '{} is not a list '.format(o)
elif (t == bool):
stringToBePrinted += '{} is not a boolean '.format(o)
else:
print ('assertHelper for {} is not realized'.format(t))
exit(1)
assert boolToBeAsserted, stringToBePrinted
# In[4]:
# get tensorflow trainer
def getTrainer(trainer, arg_dict = {'learning_rate': 0.1,
'epsilon': 0.1,
'rho': 0,
'global_step': 0,
'initial_gradient_squared_accumulator_value': 0.1}):
return {
# TODO: specify more
'gradientDescentOptimizer': tf.train.GradientDescentOptimizer(learning_rate = arg_dict['learning_rate']),
'adadeltaOptimizer': tf.train.AdadeltaOptimizer(epsilon = arg_dict['epsilon'],
learning_rate = arg_dict['learning_rate'],
rho = arg_dict['rho']),
'adagradOptimizer': tf.train.AdagradOptimizer(learning_rate = arg_dict['learning_rate']),
'adagradDAOptimizer': tf.train.AdagradDAOptimizer(global_step = arg_dict['global_step'],
learning_rate = arg_dict['learning_rate'],
initial_gradient_squared_accumulator_value
= arg_dict['initial_gradient_squared_accumulator_value']),
'momentumOptimizer': tf.train.MomentumOptimizer,
'adamOptimizer': tf.train.AdamOptimizer(learning_rate = arg_dict['learning_rate'],
epsilon = arg_dict['epsilon']),
'ftrlOptimizer': tf.train.FtrlOptimizer,
'proximalGradientDescentOptimizer': tf.train.ProximalGradientDescentOptimizer,
'proximalAdagradOptimizer': tf.train.ProximalAdagradOptimizer,
'rmsPropOptimizer': tf.train.RMSPropOptimizer}.get(trainer, None)
# In[5]:
# helper function to get activation functions
def getActivationFunc(activation):
return {
'relu': tf.nn.relu,
'relu6': tf.nn.relu6,
'crelu': tf.nn.crelu,
'elu': tf.nn.elu,
'softplus': tf.nn.softplus,
'softsign': tf.nn.softsign,
'dropout': tf.nn.dropout,
'bias_add': tf.nn.bias_add,
'sigmoid': tf.nn.sigmoid,
'tanh': tf.nn.tanh
}.get(activation, None)
# In[6]:
##########################################################################################################################
# In[7]:
# define RNN cell
class RNNCell(object):
# RNN cell needs ...
# cell_type: BasicRNN, RNN, BasicLSTM, LSTM, GRU, GRUBlock, etc.
# input_size: size of input vector
# state_size: size of hidden states
# output_size: size of output vector
# factory function to define rnn cell
# arg_dict should be compatible with cell type
def makeRNNCell(cell_type,
state_size,
is_training,
state_is_tuple = True,
activation = 'tanh',
keep_in = 1,
keep_states = 1,
keep_out = 1):
arg_dict = {'num_units': state_size,
'state_is_tuple': state_is_tuple,
'activation': activation}
activation_func = getActivationFunc(arg_dict['activation'])
if (activation_func == None):
print('{} activation is not realized'.format(arg_dict['activation']))
exit(1)
if (cell_type == 'BasicLSTM'):
cell = tf.contrib.rnn.BasicLSTMCell(num_units = arg_dict['num_units'],
activation = activation_func)
elif (cell_type == 'LSTM'):
cell = tf.contrib.rnn.LSTMCell(num_units = arg_dict['num_units'],
state_is_tuple = arg_dict['state_is_tuple'],
activation = activation_func,
reuse = tf.get_variable_scope().reuse)
elif (cell_type == 'GRU'):
cell = tf.contrib.rnn.GRUCell(num_units = arg_dict['num_units'],
activation = activation_func)
else:
print('{} cell type is not realized'.format(cell_type))
exit(1)
if (is_training and keep_in < 1 or keep_states < 1 or keep_out < 1):
cell = tf.contrib.rnn.DropoutWrapper(cell,
input_keep_prob = keep_in,
state_keep_prob = keep_states,
output_keep_prob = keep_out)
return cell
# In[8]:
# define RNN class
class RNN(object):
# RNN needs ...
# name: name of this RNN graph
# num_features: size of RNN input vector; int
# num_classes: size of RNN output vector; int
# cell_type: RNN cell type; str or str list
# state_size: size of hidden states; int or int list
# num_steps: maximum of number of time steps
# batch_size: size of mini batch
# keep_in: keep inputs probability: float list of length same as state_size
# keep_states: keep states probability: float list of length same as state_size
# keep_out: keep outputs probability: float list of length same as state_size
# activation_str: activation function to be used; str or str list of length same as state_size
# trainer_str: optimizer (trainer) to be used; str
def __init__(self, num_features, num_classes, cell_type, state_size, num_steps, batch_size, is_training,
activation_str = 'tanh', keep_in = 1., keep_states = 1., keep_out = 1., trainer_str = 'adamOptimizer'):
assertHelper(num_features, [int])
assertHelper(num_classes, [int])
assertHelper(cell_type, [str, list])
assertHelper(state_size, [list])
assertHelper(num_steps, [int])
assertHelper(batch_size, [int])
assertHelper(is_training, [bool])
assertHelper(activation_str, [str, list])
assertHelper(keep_in, [float, list])
assertHelper(keep_states, [float, list])
assertHelper(keep_out, [float, list])
assertHelper(trainer_str, [str])
# additionally assert lenghts of list inputs
for i in [cell_type, activation_str, keep_in, keep_states, keep_out]:
if (isinstance(i, list)):
assert i == len(state_size), '{} does not have the same length as state_size'.format(i)
self.num_features = num_features
self.num_classes = num_classes
self.state_size = state_size
self.num_steps = num_steps
self.batch_size = batch_size
if (isinstance(activation_str, str)):
activation_str = [activation_str] * len(state_size)
if (isinstance(cell_type, str)):
cell_type = [cell_type] * len(state_size)
if (isinstance(keep_in, float)):
keep_in = [keep_in] * len(state_size)
if (isinstance(keep_states, float)):
keep_states = [keep_states] * len(state_size)
if (isinstance(keep_out, float)):
keep_out = [keep_out] * len(state_size)
self.trainer = trainer = getTrainer(trainer_str)
if (trainer == None):
print('{} trainer is not realized'.format(trainer))
exit(1)
# placeholder for inputs
# inputs: batch_size x num_steps x num_features
self.inputs = inputs = tf.placeholder(dtype = tf.float32, shape = [batch_size, num_steps, num_features])
# placeholder for different time steps for each sequence
self.seq_len = seq_len = tf.placeholder(dtype = tf.int32, shape = [batch_size])
# placeholder for target_id and target_correctness
# target_id: batch_size x num_steps
# target_correctness: batch_size
self.target_id = target_id = tf.placeholder(dtype = tf.int32, shape = [batch_size, num_steps])
self.target_correctness = target_correctness = tf.placeholder(dtype = tf.float32, shape = [batch_size, num_steps])
# construct rnn cells
cells = [RNNCell.makeRNNCell(cell_type[layer_index],
state_size[layer_index],
is_training,
activation = activation_str[layer_index],
keep_in = keep_in[layer_index],
keep_states = keep_states[layer_index],
keep_out = keep_out[layer_index]) for layer_index in range(len(state_size))]
self.cells = cells = tf.contrib.rnn.MultiRNNCell(cells = cells,
state_is_tuple = True)
# run dynamic rnn
self.outputs, self.states = outputs, states = tf.nn.dynamic_rnn(cell = cells,
inputs = inputs,
sequence_length = seq_len,
dtype = tf.float32)
# outputs: batch_size x num_steps x state_size[-1]
# split outputs into arrays
# outputs: num_steps x (batch_size x state_size[-1])
outputs = [tf.squeeze(output, axis = 1) for output in tf.split(value = outputs,
num_or_size_splits = num_steps,
axis = 1)]
# weight and bias
# weight: state_size[-1] x num_classes
# bias: num_classes
self.weight = tf.get_variable('weights', [self.state_size[-1], num_classes])
self.bias = tf.get_variable('bias', [num_classes])
# produce logit outputs
# logits: num_steps x (batch_size x num_classes)
print('num_steps: ', num_steps)
print('len(output): ', len(outputs))
print('weight: ', self.weight)
print('outputs[0]: ', outputs[0])
self.logits = [tf.matmul(outputs[i], self.weight) + self.bias for i in range(num_steps)]
# stack logits: num_steps x batch_size x num_classes
self.logits = tf.stack(self.logits)
print('logits: ', self.logits)
# reshape logits: batch_size x num_steps x num_classes
self.logits = tf.reshape(self.logits, [batch_size, num_steps, num_classes])
# reshape logits: (batch_size x num_steps x num_classes)
self.logits = tf.reshape(self.logits, [-1])
# need to do ...
# result = tf.gather(params = self.logits, indices = target_id)
# result[batch_i x num_steps + step_i]
# = logits[batch_i x num_steps x num_classes + step_i x num_classes + class_i]
# where ...
# target_id[batch_i x num_steps + step_i] = batch_i x num_steps x num_classes + step_i x num_classes + class_i
# now target_id[batch_i][step_i] = class_i
# need to transform target_id
# first reshape target_id by flattening it
target_id = tf.reshape(target_id, [-1])
# next ...
# target_id[batch_i x num_steps + step_i] += num_classes x (batch_i x num_steps + step_i)
# which is equivalent to ...
# target_id[current_idx] += num_classes x current_idx
# create a tensor where
# new_tensor[idx] = num_classes x idx
# for idx in range(batch_size x num_steps)
tensor_to_add = tf.reshape([num_classes * idx for idx in range(batch_size * num_steps)], [-1])
# next add the tensor to target_id
target_id = tf.add(target_id, tensor_to_add)
# Then perform tf.gather and select the logits
# now logits is of shape [batch_size x num_steps]
self.logits = tf.gather(params = self.logits, indices = target_id)
print('logits set: ', self.logits)
# reshape logits to batch_size x num_steps
self.logits = tf.reshape(self.logits, [batch_size, num_steps])
print('logits reshaped')
print('logits: ', self.logits)
# transpose logits
self.logits = tf.transpose(self.logits)
print('logits transposed')
print('logits: ', self.logits)
# split logits
self.logits = [tf.squeeze(logit, axis = 0)
for logit in tf.split(self.logits, num_or_size_splits = num_steps, axis = 0)]
print('logits split')
print('len(logits): ', len(self.logits))
print('logits[0]: ', self.logits[0])
# produce prediction outputs
# pred: num_steps x (batch_size)
self.pred = [tf.nn.sigmoid(self.logits[i]) for i in range(num_steps)]
print('len(prediction): ', len(self.pred))
print('pred[0]: ', self.pred[0])
# stack
# pred: [num_steps, batch_size]
self.pred = tf.stack(self.pred)
# split into array again
# pred: batch_size x [num_steps]
self.pred = [tf.squeeze(pred, axis = 1)
for pred in tf.split(self.pred, num_or_size_splits = batch_size, axis = 1)]
# slice pred according to seq_len
# pred: batch_size x [1, seq_len[batch_i]]
self.pred = [tf.expand_dims(tf.slice(self.pred[i], begin = [0], size = [seq_len[i]]), 0)
for i in range(batch_size)]
print('self.pred[0] before concat and squeezing')
print('pred[0]: ', self.pred[0])
# concat
# pred: [sum_i(seq_len[i])]
self.pred = tf.squeeze(tf.concat(self.pred, axis = 1), axis = 0)
# split target_correctness
# target_correctness: num_steps x [batch_size]
target_correctness = [tf.squeeze(tc_i, axis = 1)
for tc_i in tf.split(target_correctness, num_or_size_splits = num_steps, axis = 1)]
print('target_correctness split')
print('len(target_correctness): ', len(target_correctness))
print('target_correctness[0]: ', target_correctness[0])
# produce loss functions
self.loss = [tf.nn.sigmoid_cross_entropy_with_logits(
logits = self.logits[i],
labels = target_correctness[i])
for i in range(num_steps)]
print('len(loss): ', len(self.loss))
print('loss[0]: ', self.loss[0])
self.cost = self.loss = tf.reduce_sum(self.loss, [0, 1])
print('cost: ', self.cost)
# optimizer function
self.optimizer = self.trainer.minimize(self.cost)
print('optimizer: ', self.optimizer)
# In[9]:
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
t = tf.concat(t1, 0)
a = [np.zeros(shape = (3, 4)), np.zeros(shape = (3, 4))]
t3 = tf.concat(a, 1)
with tf.Session() as sess:
print(sess.run([t]))
print('hi')
print(t.get_shape())
print(sess.run(t3))
print(t3.get_shape())
# In[10]:
##########################################################################################################################
# In[11]:
class AssistmentsRun(object):
def __init__(self,
version):
assertHelper(version, [str])
self.version = version
def setModel(self, model, is_training):
if (is_training):
self.model = model
else:
self.test_model = model
def setDataPath(self, data_path, is_test):
if (not(is_test)):
self.train_data_path = data_path
else:
self.test_data_path = data_path
def setBatchSize(self, batch_size):
self.batch_size = batch_size
def setResultFilePath(self):
self.result_file_path = 'run_logs_{}'.format(str(time.time()))
def setModelName(self, model_name):
self.model_name = model_name
def runEpoch(self, session, students, is_training, verbose = False):
if (is_training):
if (self.model == None):
print('model not yet set')
exit(1)
else:
model = self.model
else:
if (self.test_model == None):
print('test model not yet set')
exit(1)
else:
model = self.test_model
start_time = time.time()
tmpsum = 0
if (self.version == '2009'):
batch_start_i = 0
pred_labels = []
actual_labels = []
while (batch_start_i + model.batch_size < len(students)):
print('batch_number: ', batch_start_i)
input_x = np.zeros((model.batch_size, model.num_steps, model.num_features))
target_id = np.zeros(dtype = np.int32, shape = [model.batch_size, model.num_steps])
target_correctness = np.zeros(dtype = np.int32, shape = [model.batch_size, model.num_steps])
seq_len = np.empty(dtype = np.int32, shape = [model.batch_size])
for student_i in range(model.batch_size):
student = students[batch_start_i + student_i]
seq_len[student_i] = problem_num = int(student[0][0]) - 1
skill_ids = student[1]
correctness = student[2]
for skill_i in range(problem_num):
skill_id = int(skill_ids[skill_i])
is_correct = int(correctness[skill_i])
target_id[student_i, skill_i] = int(skill_ids[skill_i + 1])
target_correctness[student_i, skill_i] = int(correctness[skill_i + 1])
if (is_correct):
input_x[student_i, skill_i, skill_id] = 1
else:
input_x[student_i, skill_i, skill_id + model.num_features // 2] = 1
actual_labels.append(int(correctness[skill_i + 1]))
batch_start_i = batch_start_i + model.batch_size
pred, _ = session.run([model.pred, model.optimizer], feed_dict = {
model.inputs: input_x, model.target_id: target_id,
model.target_correctness: target_correctness, model.seq_len: seq_len
})
# for debugging
tmpsum += np.sum(seq_len)
# pred is now num_steps x [batch_size]
# flatten it
# pred: [num_steps, batch_size]
pred = np.stack(pred, axis = 0)
# then transpose it and flatten it
pred = np.reshape(np.transpose(pred), (-1))
for p in pred:
pred_labels.append(p)
# print pred labels
print('len(actual_labels): ', len(actual_labels))
print('len(pred_lables): ', len(pred_labels))
print('sum: ', tmpsum)
rmse = sqrt(mean_squared_error(actual_labels, pred_labels))
fpr, tpr, thresholds = metrics.roc_curve(actual_labels, pred_labels, pos_label = 1)
auc = metrics.auc(fpr, tpr)
# calculate r2
r2 = r2_score(actual_labels, pred_labels)
return rmse, auc, r2
def read_data_from_csv_file(self, fileName):
inputs = []
targets = []
rows = []
max_skill_num = 0
max_num_problems = 0
with open(fileName, "r") as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
rows.append(row)
index = 0
i = 0
print ("the number of rows is " + str(len(rows)))
tuple_rows = []
#turn list to tuple
while(index < len(rows)-1):
problems_num = int(rows[index][0])
tmp_max_skill = max(map(int, rows[index+1]))
if(tmp_max_skill > max_skill_num):
max_skill_num = tmp_max_skill
if(problems_num <= 2):
index += 3
else:
if problems_num > max_num_problems:
max_num_problems = problems_num
tup = (rows[index], rows[index+1], rows[index+2])
tuple_rows.append(tup)
index += 3
#shuffle the tuple
random.shuffle(tuple_rows)
print ("The number of students is " + str(len(tuple_rows)))
print ("Finish reading data")
return tuple_rows, max_num_problems, max_skill_num+1
def run(self):
train_data_path = self.train_data_path
test_data_path = self.test_data_path
train_students, train_num_steps, train_num_problems = self.read_data_from_csv_file(train_data_path)
test_students, test_num_steps, test_num_problems = self.read_data_from_csv_file(test_data_path)
self.setBatchSize(32)
train_batch_size = test_batch_size = self.batch_size
cell_type = 'LSTM'
self.setModelName('DKT')
self.setResultFilePath()
result_file_path = self.result_file_path
init_scale = 0.05
num_epochs = 150
with tf.Graph().as_default():
session_conf = tf.ConfigProto(allow_soft_placement = True,
log_device_placement = False)
session_conf.gpu_options.allow_growth = True
with tf.Session(config = session_conf) as sess:
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
print('train model being set')
# training model
with tf.variable_scope('model', reuse = None, initializer = initializer):
self.setModel(RNN(train_num_problems * 2, train_num_problems, cell_type, [200], train_num_steps - 1,
train_batch_size, is_training = True, keep_out = 0.6), is_training = True)
m = self.model
print('test model being set')
# testing model
with tf.variable_scope('model', reuse = True, initializer = initializer):
self.setModel(RNN(test_num_problems * 2, test_num_problems, cell_type, [200], test_num_steps - 1,
test_batch_size, is_training = False, keep_out = 0.6), is_training = False)
mtest = self.test_model
print('initialize global variables')
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
for i in range(num_epochs):
print('num_epochs: ', num_epochs)
rmse, auc, r2 = self.runEpoch(sess, train_students, True, verbose = True)
print("Epoch: %d Train Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f \n" % (i + 1, rmse, auc, r2))
if ((i + 1) % 5 == 0):
print('Save variables to disk')
save_path = saver.save(sess, self.model_name)
print('*' * 10)
print('Start to test model ....')
rmse, auc, r2 = self.runEpoch(sess, test_students, False, verbose = False)
print("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % (i+1, rmse, auc, r2))
with open(result_file_path, "a+") as f:
f.write("Epoch: %d Test Metrics:\n rmse: %.3f \t auc: %.3f \t r2: %.3f" % ((i+1)/2, rmse, auc, r2))
f.write("\n")
print("*"*10)
# In[12]:
with tf.device('/gpu:0'):
a_run = AssistmentsRun('2009')
a_run.setDataPath('data/0910_b_train.csv', False)
a_run.setDataPath('data/0910_b_test.csv', True)
a_run.run()
|
|
"""Test Z-Wave climate devices."""
import pytest
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
HVAC_MODES,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.components.zwave import climate, const
from homeassistant.components.zwave.climate import (
AUX_HEAT_ZWAVE_MODE,
DEFAULT_HVAC_MODES,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
@pytest.fixture
def device(hass, mock_openzwave):
"""Fixture to provide a precreated climate device."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_zxt_120(hass, mock_openzwave):
"""Fixture to provide a precreated climate device."""
node = MockNode(manufacturer_id="5254", product_id="8377")
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
zxt_120_swing_mode=MockValue(data="test3", data_items=[6, 7, 8], node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_mapping(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Test state mapping."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data="Heat",
data_items=["Off", "Cool", "Heat", "Full Power", "Auto"],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="heating", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_unknown(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Test state unknown."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data="Heat",
data_items=["Off", "Cool", "Heat", "heat_cool", "Abcdefg"],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_cool(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Test state heat only."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
"Heat Eco",
"Cool Eco",
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_cool_range(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Target range mode."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT_COOL,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_cool_away(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Target range mode."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT_COOL,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
PRESET_AWAY,
],
node=node,
),
setpoint_heating=MockValue(data=2, node=node),
setpoint_cooling=MockValue(data=9, node=node),
setpoint_away_heating=MockValue(data=1, node=node),
setpoint_away_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_eco(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. heat/heat eco."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[HVAC_MODE_OFF, HVAC_MODE_HEAT, "heat econ"],
node=node,
),
setpoint_heating=MockValue(data=2, node=node),
setpoint_eco_heating=MockValue(data=1, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_aux_heat(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. aux heat."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[HVAC_MODE_OFF, HVAC_MODE_HEAT, "Aux Heat"],
node=node,
),
setpoint_heating=MockValue(data=2, node=node),
setpoint_eco_heating=MockValue(data=1, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_single_setpoint(hass, mock_openzwave):
"""Fixture to provide a precreated climate device.
SETPOINT_THERMOSTAT device class.
"""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT, data=1, node=node
),
mode=None,
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_single_setpoint_with_mode(hass, mock_openzwave):
"""Fixture to provide a precreated climate device.
SETPOINT_THERMOSTAT device class with COMMAND_CLASS_THERMOSTAT_MODE command class
"""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT, data=1, node=node
),
mode=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[HVAC_MODE_OFF, HVAC_MODE_HEAT],
node=node,
),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
def test_get_device_detects_none(hass, mock_openzwave):
"""Test get_device returns None."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = climate.get_device(hass, node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_multiple_setpoint_device(device):
"""Test get_device returns a Z-Wave multiple setpoint device."""
assert isinstance(device, climate.ZWaveClimateMultipleSetpoint)
def test_get_device_detects_single_setpoint_device(device_single_setpoint):
"""Test get_device returns a Z-Wave single setpoint device."""
assert isinstance(device_single_setpoint, climate.ZWaveClimateSingleSetpoint)
def test_default_hvac_modes():
"""Test whether all hvac modes are included in default_hvac_modes."""
for hvac_mode in HVAC_MODES:
assert hvac_mode in DEFAULT_HVAC_MODES
def test_supported_features(device):
"""Test supported features flags."""
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
)
def test_supported_features_temp_range(device_heat_cool_range):
"""Test supported features flags with target temp range."""
device = device_heat_cool_range
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
)
def test_supported_features_preset_mode(device_mapping):
"""Test supported features flags with swing mode."""
device = device_mapping
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
+ SUPPORT_PRESET_MODE
)
def test_supported_features_preset_mode_away(device_heat_cool_away):
"""Test supported features flags with swing mode."""
device = device_heat_cool_away
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
+ SUPPORT_PRESET_MODE
)
def test_supported_features_swing_mode(device_zxt_120):
"""Test supported features flags with swing mode."""
device = device_zxt_120
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
+ SUPPORT_SWING_MODE
)
def test_supported_features_aux_heat(device_aux_heat):
"""Test supported features flags with aux heat."""
device = device_aux_heat
assert (
device.supported_features
== SUPPORT_FAN_MODE + SUPPORT_TARGET_TEMPERATURE + SUPPORT_AUX_HEAT
)
def test_supported_features_single_setpoint(device_single_setpoint):
"""Test supported features flags for SETPOINT_THERMOSTAT."""
device = device_single_setpoint
assert device.supported_features == SUPPORT_FAN_MODE + SUPPORT_TARGET_TEMPERATURE
def test_supported_features_single_setpoint_with_mode(device_single_setpoint_with_mode):
"""Test supported features flags for SETPOINT_THERMOSTAT."""
device = device_single_setpoint_with_mode
assert device.supported_features == SUPPORT_FAN_MODE + SUPPORT_TARGET_TEMPERATURE
def test_zxt_120_swing_mode(device_zxt_120):
"""Test operation of the zxt 120 swing mode."""
device = device_zxt_120
assert device.swing_modes == [6, 7, 8]
assert device._zxt_120 == 1
# Test set mode
assert device.values.zxt_120_swing_mode.data == "test3"
device.set_swing_mode("test_swing_set")
assert device.values.zxt_120_swing_mode.data == "test_swing_set"
# Test mode changed
value_changed(device.values.zxt_120_swing_mode)
assert device.swing_mode == "test_swing_set"
device.values.zxt_120_swing_mode.data = "test_swing_updated"
value_changed(device.values.zxt_120_swing_mode)
assert device.swing_mode == "test_swing_updated"
def test_temperature_unit(device):
"""Test temperature unit."""
assert device.temperature_unit == TEMP_CELSIUS
device.values.temperature.units = "F"
value_changed(device.values.temperature)
assert device.temperature_unit == TEMP_FAHRENHEIT
device.values.temperature.units = "C"
value_changed(device.values.temperature)
assert device.temperature_unit == TEMP_CELSIUS
def test_data_lists(device):
"""Test data lists from zwave value items."""
assert device.fan_modes == [3, 4, 5]
assert device.hvac_modes == [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
]
assert device.preset_modes == []
device.values.primary = None
assert device.preset_modes == []
def test_data_lists_single_setpoint(device_single_setpoint):
"""Test data lists from zwave value items."""
device = device_single_setpoint
assert device.fan_modes == [3, 4, 5]
assert device.hvac_modes == []
assert device.preset_modes == []
def test_data_lists_single_setpoint_with_mode(device_single_setpoint_with_mode):
"""Test data lists from zwave value items."""
device = device_single_setpoint_with_mode
assert device.fan_modes == [3, 4, 5]
assert device.hvac_modes == [HVAC_MODE_OFF, HVAC_MODE_HEAT]
assert device.preset_modes == []
def test_data_lists_mapping(device_mapping):
"""Test data lists from zwave value items."""
device = device_mapping
assert device.hvac_modes == ["off", "cool", "heat", "heat_cool"]
assert device.preset_modes == ["boost", "none"]
device.values.primary = None
assert device.preset_modes == []
def test_target_value_set(device):
"""Test values changed for climate device."""
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature()
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TEMPERATURE: 2})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 10
device.set_hvac_mode(HVAC_MODE_COOL)
value_changed(device.values.primary)
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TEMPERATURE: 9})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
def test_target_value_set_range(device_heat_cool_range):
"""Test values changed for climate device."""
device = device_heat_cool_range
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature()
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 2})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TARGET_TEMP_HIGH: 9})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 3, ATTR_TARGET_TEMP_HIGH: 8})
assert device.values.setpoint_heating.data == 3
assert device.values.setpoint_cooling.data == 8
def test_target_value_set_range_away(device_heat_cool_away):
"""Test values changed for climate device."""
device = device_heat_cool_away
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
assert device.values.setpoint_away_heating.data == 1
assert device.values.setpoint_away_cooling.data == 10
device.set_preset_mode(PRESET_AWAY)
device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 0, ATTR_TARGET_TEMP_HIGH: 11})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
assert device.values.setpoint_away_heating.data == 0
assert device.values.setpoint_away_cooling.data == 11
def test_target_value_set_eco(device_heat_eco):
"""Test values changed for climate device."""
device = device_heat_eco
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_eco_heating.data == 1
device.set_preset_mode("heat econ")
device.set_temperature(**{ATTR_TEMPERATURE: 0})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_eco_heating.data == 0
def test_target_value_set_single_setpoint(device_single_setpoint):
"""Test values changed for climate device."""
device = device_single_setpoint
assert device.values.primary.data == 1
device.set_temperature(**{ATTR_TEMPERATURE: 2})
assert device.values.primary.data == 2
def test_operation_value_set(device):
"""Test values changed for climate device."""
assert device.values.primary.data == HVAC_MODE_HEAT
device.set_hvac_mode(HVAC_MODE_COOL)
assert device.values.primary.data == HVAC_MODE_COOL
device.set_preset_mode(PRESET_ECO)
assert device.values.primary.data == PRESET_ECO
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_HEAT_COOL
device.values.primary = None
device.set_hvac_mode("test_set_failes")
assert device.values.primary is None
device.set_preset_mode("test_set_failes")
assert device.values.primary is None
def test_operation_value_set_mapping(device_mapping):
"""Test values changed for climate device. Mapping."""
device = device_mapping
assert device.values.primary.data == "Heat"
device.set_hvac_mode(HVAC_MODE_COOL)
assert device.values.primary.data == "Cool"
device.set_hvac_mode(HVAC_MODE_OFF)
assert device.values.primary.data == "Off"
device.set_preset_mode(PRESET_BOOST)
assert device.values.primary.data == "Full Power"
device.set_preset_mode(PRESET_ECO)
assert device.values.primary.data == "eco"
def test_operation_value_set_unknown(device_unknown):
"""Test values changed for climate device. Unknown."""
device = device_unknown
assert device.values.primary.data == "Heat"
device.set_preset_mode("Abcdefg")
assert device.values.primary.data == "Abcdefg"
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_HEAT_COOL
def test_operation_value_set_heat_cool(device_heat_cool):
"""Test values changed for climate device. Heat/Cool only."""
device = device_heat_cool
assert device.values.primary.data == HVAC_MODE_HEAT
device.set_preset_mode("Heat Eco")
assert device.values.primary.data == "Heat Eco"
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_HEAT
device.set_preset_mode("Cool Eco")
assert device.values.primary.data == "Cool Eco"
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_COOL
def test_fan_mode_value_set(device):
"""Test values changed for climate device."""
assert device.values.fan_mode.data == "test2"
device.set_fan_mode("test_fan_set")
assert device.values.fan_mode.data == "test_fan_set"
device.values.fan_mode = None
device.set_fan_mode("test_fan_set_failes")
assert device.values.fan_mode is None
def test_target_value_changed(device):
"""Test values changed for climate device."""
assert device.target_temperature == 1
device.values.setpoint_heating.data = 2
value_changed(device.values.setpoint_heating)
assert device.target_temperature == 2
device.values.primary.data = HVAC_MODE_COOL
value_changed(device.values.primary)
assert device.target_temperature == 10
device.values.setpoint_cooling.data = 9
value_changed(device.values.setpoint_cooling)
assert device.target_temperature == 9
def test_target_range_changed(device_heat_cool_range):
"""Test values changed for climate device."""
device = device_heat_cool_range
assert device.target_temperature_low == 1
assert device.target_temperature_high == 10
device.values.setpoint_heating.data = 2
value_changed(device.values.setpoint_heating)
assert device.target_temperature_low == 2
assert device.target_temperature_high == 10
device.values.setpoint_cooling.data = 9
value_changed(device.values.setpoint_cooling)
assert device.target_temperature_low == 2
assert device.target_temperature_high == 9
def test_target_changed_preset_range(device_heat_cool_away):
"""Test values changed for climate device."""
device = device_heat_cool_away
assert device.target_temperature_low == 2
assert device.target_temperature_high == 9
device.values.primary.data = PRESET_AWAY
value_changed(device.values.primary)
assert device.target_temperature_low == 1
assert device.target_temperature_high == 10
device.values.setpoint_away_heating.data = 0
value_changed(device.values.setpoint_away_heating)
device.values.setpoint_away_cooling.data = 11
value_changed(device.values.setpoint_away_cooling)
assert device.target_temperature_low == 0
assert device.target_temperature_high == 11
device.values.primary.data = HVAC_MODE_HEAT_COOL
value_changed(device.values.primary)
assert device.target_temperature_low == 2
assert device.target_temperature_high == 9
def test_target_changed_eco(device_heat_eco):
"""Test values changed for climate device."""
device = device_heat_eco
assert device.target_temperature == 2
device.values.primary.data = "heat econ"
value_changed(device.values.primary)
assert device.target_temperature == 1
device.values.setpoint_eco_heating.data = 0
value_changed(device.values.setpoint_eco_heating)
assert device.target_temperature == 0
device.values.primary.data = HVAC_MODE_HEAT
value_changed(device.values.primary)
assert device.target_temperature == 2
def test_target_changed_with_mode(device):
"""Test values changed for climate device."""
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.target_temperature == 1
device.values.primary.data = HVAC_MODE_COOL
value_changed(device.values.primary)
assert device.target_temperature == 10
device.values.primary.data = HVAC_MODE_HEAT_COOL
value_changed(device.values.primary)
assert device.target_temperature_low == 1
assert device.target_temperature_high == 10
def test_target_value_changed_single_setpoint(device_single_setpoint):
"""Test values changed for climate device."""
device = device_single_setpoint
assert device.target_temperature == 1
device.values.primary.data = 2
value_changed(device.values.primary)
assert device.target_temperature == 2
def test_temperature_value_changed(device):
"""Test values changed for climate device."""
assert device.current_temperature == 5
device.values.temperature.data = 3
value_changed(device.values.temperature)
assert device.current_temperature == 3
def test_operation_value_changed(device):
"""Test values changed for climate device."""
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = HVAC_MODE_COOL
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_COOL
assert device.preset_mode == PRESET_NONE
device.values.primary.data = HVAC_MODE_OFF
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_OFF
assert device.preset_mode == PRESET_NONE
device.values.primary = None
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_NONE
def test_operation_value_changed_preset(device_mapping):
"""Test preset changed for climate device."""
device = device_mapping
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = PRESET_ECO
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_ECO
def test_operation_value_changed_mapping(device_mapping):
"""Test values changed for climate device. Mapping."""
device = device_mapping
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Off"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_OFF
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Cool"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_COOL
assert device.preset_mode == PRESET_NONE
def test_operation_value_changed_mapping_preset(device_mapping):
"""Test values changed for climate device. Mapping with presets."""
device = device_mapping
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Full Power"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_BOOST
device.values.primary = None
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_NONE
def test_operation_value_changed_unknown(device_unknown):
"""Test preset changed for climate device. Unknown."""
device = device_unknown
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Abcdefg"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == "Abcdefg"
def test_operation_value_changed_heat_cool(device_heat_cool):
"""Test preset changed for climate device. Heat/Cool only."""
device = device_heat_cool
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Cool Eco"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_COOL
assert device.preset_mode == "Cool Eco"
device.values.primary.data = "Heat Eco"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == "Heat Eco"
def test_fan_mode_value_changed(device):
"""Test values changed for climate device."""
assert device.fan_mode == "test2"
device.values.fan_mode.data = "test_updated_fan"
value_changed(device.values.fan_mode)
assert device.fan_mode == "test_updated_fan"
def test_hvac_action_value_changed(device):
"""Test values changed for climate device."""
assert device.hvac_action == CURRENT_HVAC_HEAT
device.values.operating_state.data = CURRENT_HVAC_COOL
value_changed(device.values.operating_state)
assert device.hvac_action == CURRENT_HVAC_COOL
def test_hvac_action_value_changed_mapping(device_mapping):
"""Test values changed for climate device."""
device = device_mapping
assert device.hvac_action == CURRENT_HVAC_HEAT
device.values.operating_state.data = "cooling"
value_changed(device.values.operating_state)
assert device.hvac_action == CURRENT_HVAC_COOL
def test_hvac_action_value_changed_unknown(device_unknown):
"""Test values changed for climate device."""
device = device_unknown
assert device.hvac_action == "test4"
device.values.operating_state.data = "another_hvac_action"
value_changed(device.values.operating_state)
assert device.hvac_action == "another_hvac_action"
def test_fan_action_value_changed(device):
"""Test values changed for climate device."""
assert device.device_state_attributes[climate.ATTR_FAN_ACTION] == 7
device.values.fan_action.data = 9
value_changed(device.values.fan_action)
assert device.device_state_attributes[climate.ATTR_FAN_ACTION] == 9
def test_aux_heat_unsupported_set(device):
"""Test aux heat for climate device."""
device = device
assert device.values.primary.data == HVAC_MODE_HEAT
device.turn_aux_heat_on()
assert device.values.primary.data == HVAC_MODE_HEAT
device.turn_aux_heat_off()
assert device.values.primary.data == HVAC_MODE_HEAT
def test_aux_heat_unsupported_value_changed(device):
"""Test aux heat for climate device."""
device = device
assert device.is_aux_heat is None
device.values.primary.data = HVAC_MODE_HEAT
value_changed(device.values.primary)
assert device.is_aux_heat is None
def test_aux_heat_set(device_aux_heat):
"""Test aux heat for climate device."""
device = device_aux_heat
assert device.values.primary.data == HVAC_MODE_HEAT
device.turn_aux_heat_on()
assert device.values.primary.data == AUX_HEAT_ZWAVE_MODE
device.turn_aux_heat_off()
assert device.values.primary.data == HVAC_MODE_HEAT
def test_aux_heat_value_changed(device_aux_heat):
"""Test aux heat for climate device."""
device = device_aux_heat
assert device.is_aux_heat is False
device.values.primary.data = AUX_HEAT_ZWAVE_MODE
value_changed(device.values.primary)
assert device.is_aux_heat is True
device.values.primary.data = HVAC_MODE_HEAT
value_changed(device.values.primary)
assert device.is_aux_heat is False
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Anatomical Reference -processing workflows.
Originally coded by Craig Moodie. Refactored by the CRN Developers.
"""
import os.path as op
from nipype.interfaces import ants
from nipype.interfaces import fsl
from nipype.interfaces import io as nio
from nipype.interfaces import utility as niu
from nipype.pipeline import engine as pe
from niworkflows.interfaces.registration import RobustMNINormalizationRPT
from niworkflows.anat.skullstrip import afni_wf as skullstrip_wf
from niworkflows.data import get_mni_icbm152_nlin_asym_09c
from niworkflows.interfaces.masks import BrainExtractionRPT
from niworkflows.interfaces.segmentation import FASTRPT
from fmriprep.interfaces import (DerivativesDataSink, IntraModalMerge)
from fmriprep.interfaces.utils import reorient
from fmriprep.utils.misc import fix_multi_T1w_source_name
from fmriprep.viz import stripped_brain_overlay
# pylint: disable=R0914
def t1w_preprocessing(name='t1w_preprocessing', settings=None):
"""T1w images preprocessing pipeline"""
if settings is None:
raise RuntimeError('Workflow settings are missing')
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['t1w']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['t1_seg', 't1_tpms', 'bias_corrected_t1', 't1_brain', 't1_mask',
't1_2_mni', 't1_2_mni_forward_transform',
't1_2_mni_reverse_transform']), name='outputnode')
# 0. Align and merge if several T1w images are provided
t1wmrg = pe.Node(IntraModalMerge(), name='MergeT1s')
# 1. Reorient T1
arw = pe.Node(niu.Function(input_names=['in_file'],
output_names=['out_file'],
function=reorient),
name='Reorient')
# 2. T1 Bias Field Correction
inu_n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3),
name='CorrectINU')
# 3. Skull-stripping
asw = skullstrip_wf()
if settings.get('skull_strip_ants', False):
asw = skullstrip_ants(settings=settings)
# 4. Segmentation
t1_seg = pe.Node(FASTRPT(generate_report=True, segments=True,
no_bias=True, probability_maps=True),
name='Segmentation')
# 5. Spatial normalization (T1w to MNI registration)
t1_2_mni = pe.Node(
RobustMNINormalizationRPT(
generate_report=True,
num_threads=settings['ants_nthreads'],
testing=settings.get('debug', False),
template='mni_icbm152_nlin_asym_09c'
),
name='T1_2_MNI_Registration'
)
# should not be necesssary byt does not hurt - make sure the multiproc
# scheduler knows the resource limits
t1_2_mni.interface.num_threads = settings['ants_nthreads']
# Resample the brain mask and the tissue probability maps into mni space
bmask_mni = pe.Node(
ants.ApplyTransforms(dimension=3, default_value=0,
interpolation='NearestNeighbor'),
name='brain_mni_warp'
)
bmask_mni.inputs.reference_image = op.join(get_mni_icbm152_nlin_asym_09c(),
'1mm_T1.nii.gz')
tpms_mni = pe.MapNode(
ants.ApplyTransforms(dimension=3, default_value=0,
interpolation='Linear'),
iterfield=['input_image'],
name='tpms_mni_warp'
)
tpms_mni.inputs.reference_image = op.join(get_mni_icbm152_nlin_asym_09c(),
'1mm_T1.nii.gz')
ds_t1_seg_report = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='t1_seg', out_path_base='reports'),
name='DS_T1_Seg_Report'
)
ds_t1_2_mni_report = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='t1_2_mni', out_path_base='reports'),
name='DS_T1_2_MNI_Report'
)
workflow.connect([
(inputnode, t1wmrg, [('t1w', 'in_files')]),
(t1wmrg, arw, [('out_avg', 'in_file')]),
(arw, inu_n4, [('out_file', 'input_image')]),
(inu_n4, asw, [('output_image', 'inputnode.in_file')]),
(asw, t1_seg, [('outputnode.out_file', 'in_files')]),
(inu_n4, t1_2_mni, [('output_image', 'moving_image')]),
(asw, t1_2_mni, [('outputnode.out_mask', 'moving_mask')]),
(t1_seg, outputnode, [('tissue_class_map', 't1_seg')]),
(inu_n4, outputnode, [('output_image', 'bias_corrected_t1')]),
(t1_seg, outputnode, [('probability_maps', 't1_tpms')]),
(t1_2_mni, outputnode, [
('warped_image', 't1_2_mni'),
('forward_transforms', 't1_2_mni_forward_transform'),
('reverse_transforms', 't1_2_mni_reverse_transform')
]),
(asw, bmask_mni, [('outputnode.out_mask', 'input_image')]),
(t1_2_mni, bmask_mni, [('forward_transforms', 'transforms'),
('forward_invert_flags',
'invert_transform_flags')]),
(t1_seg, tpms_mni, [('probability_maps', 'input_image')]),
(t1_2_mni, tpms_mni, [('forward_transforms', 'transforms'),
('forward_invert_flags', 'invert_transform_flags')]),
(asw, outputnode, [('outputnode.out_file', 't1_brain'),
('outputnode.out_mask', 't1_mask')]),
(inputnode, ds_t1_seg_report, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(t1_seg, ds_t1_seg_report, [('out_report', 'in_file')]),
(inputnode, ds_t1_2_mni_report, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(t1_2_mni, ds_t1_2_mni_report, [('out_report', 'in_file')])
])
if settings.get('skull_strip_ants', False):
ds_t1_skull_strip_report = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='t1_skull_strip', out_path_base='reports'),
name='DS_Report'
)
workflow.connect([
(inputnode, ds_t1_skull_strip_report, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(asw, ds_t1_skull_strip_report, [('outputnode.out_report', 'in_file')])
])
# Write corrected file in the designated output dir
ds_t1_bias = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='preproc'),
name='DerivT1_inu'
)
ds_t1_seg = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='dtissue'),
name='DerivT1_seg'
)
ds_mask = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='brainmask'),
name='DerivT1_mask'
)
ds_t1_mni = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='space-MNI152NLin2009cAsym_preproc'),
name='DerivT1w_MNI'
)
ds_t1_mni_aff = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='target-MNI152NLin2009cAsym_affine'),
name='DerivT1w_MNI_affine'
)
ds_bmask_mni = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='space-MNI152NLin2009cAsym_brainmask'),
name='DerivT1_Mask_MNI'
)
ds_tpms_mni = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='space-MNI152NLin2009cAsym_class-{extra_value}_probtissue'),
name='DerivT1_TPMs_MNI'
)
ds_tpms_mni.inputs.extra_values = ['CSF', 'GM', 'WM']
if settings.get('debug', False):
workflow.connect([
(t1_2_mni, ds_t1_mni_aff, [('forward_transforms', 'in_file')])
])
else:
ds_t1_mni_warp = pe.Node(
DerivativesDataSink(base_directory=settings['output_dir'],
suffix='target-MNI152NLin2009cAsym_warp'), name='mni_warp')
def _get_aff(inlist):
return inlist[:-1]
def _get_warp(inlist):
return inlist[-1]
workflow.connect([
(inputnode, ds_t1_mni_warp, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(t1_2_mni, ds_t1_mni_aff, [
(('forward_transforms', _get_aff), 'in_file')]),
(t1_2_mni, ds_t1_mni_warp, [
(('forward_transforms', _get_warp), 'in_file')])
])
workflow.connect([
(inputnode, ds_t1_bias, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inputnode, ds_t1_seg, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inputnode, ds_mask, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inputnode, ds_t1_mni, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inputnode, ds_t1_mni_aff, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inputnode, ds_bmask_mni, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inputnode, ds_tpms_mni, [(('t1w', fix_multi_T1w_source_name), 'source_file')]),
(inu_n4, ds_t1_bias, [('output_image', 'in_file')]),
(t1_seg, ds_t1_seg, [('tissue_class_map', 'in_file')]),
(asw, ds_mask, [('outputnode.out_mask', 'in_file')]),
(t1_2_mni, ds_t1_mni, [('warped_image', 'in_file')]),
(bmask_mni, ds_bmask_mni, [('output_image', 'in_file')]),
(tpms_mni, ds_tpms_mni, [('output_image', 'in_file')])
])
return workflow
def skullstrip_ants(name='ANTsBrainExtraction', settings=None):
from niworkflows.data import get_ants_oasis_template_ras
if settings is None:
settings = {'debug': False}
workflow = pe.Workflow(name=name)
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'source_file']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(
fields=['out_file', 'out_mask', 'out_report']), name='outputnode')
t1_skull_strip = pe.Node(BrainExtractionRPT(
dimension=3, use_floatingpoint_precision=1,
debug=settings['debug'], generate_report=True,
num_threads=settings['ants_nthreads']),
name='Ants_T1_Brain_Extraction')
# should not be necesssary byt does not hurt - make sure the multiproc
# scheduler knows the resource limits
t1_skull_strip.interface.num_threads = settings['ants_nthreads']
t1_skull_strip.inputs.brain_template = op.join(
get_ants_oasis_template_ras(),
'T_template0.nii.gz'
)
t1_skull_strip.inputs.brain_probability_mask = op.join(
get_ants_oasis_template_ras(),
'T_template0_BrainCerebellumProbabilityMask.nii.gz'
)
t1_skull_strip.inputs.extraction_registration_mask = op.join(
get_ants_oasis_template_ras(),
'T_template0_BrainCerebellumRegistrationMask.nii.gz'
)
workflow.connect([
(inputnode, t1_skull_strip, [('in_file', 'anatomical_image')]),
(t1_skull_strip, outputnode, [('BrainExtractionMask', 'out_mask'),
('BrainExtractionBrain', 'out_file'),
('out_report', 'out_report')])
])
return workflow
|
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from builtins import str
from django.conf import settings
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from django.utils import timezone
from etgen.html import E
from lino.api import dd, rt, gettext
from lino import mixins
from lino.utils import join_elems
from lino.modlib.printing.mixins import PrintableType, TypedPrintable
from lino.modlib.gfks.mixins import Controllable
from lino_xl.lib.skills.mixins import Feasible
from lino.modlib.users.mixins import My, UserAuthored
from lino.modlib.notify.mixins import ChangeNotifier
from lino.modlib.uploads.mixins import UploadController
from lino_xl.lib.outbox.mixins import MailableType, Mailable
from lino_xl.lib.contacts.mixins import ContactRelated
# from lino.modlib.office.roles import OfficeUser, OfficeStaff, OfficeOperator
from lino.modlib.office.roles import OfficeStaff
from .roles import NotesUser, NotesStaff
from lino.modlib.notify.choicelists import MessageTypes
MessageTypes.add_item('notes', dd.plugins.notes.verbose_name)
from .choicelists import SpecialTypes
class NoteType(mixins.BabelNamed, PrintableType, MailableType):
templates_group = 'notes/Note'
class Meta:
app_label = 'notes'
verbose_name = _("Note Type")
verbose_name_plural = _("Note Types")
important = models.BooleanField(
verbose_name=_("important"),
default=False)
remark = models.TextField(verbose_name=_("Remark"), blank=True)
special_type = SpecialTypes.field(blank=True)
class NoteTypes(dd.Table):
model = 'notes.NoteType'
required_roles = dd.login_required(OfficeStaff)
#~ label = _("Note types")
column_names = 'name build_method template special_type *'
order_by = ["name"]
insert_layout = """
name
build_method
"""
detail_layout = """
id name
build_method template special_type email_template attach_to_email
remark:60x5
notes.NotesByType
"""
class EventType(mixins.BabelNamed):
class Meta:
app_label = 'notes'
verbose_name = pgettext_lazy(u"notes", u"Event Type")
verbose_name_plural = _("Event Types")
remark = models.TextField(verbose_name=_("Remark"), blank=True)
body = dd.BabelTextField(_("Body"), blank=True, format='html')
class EventTypes(dd.Table):
model = 'notes.EventType'
required_roles = dd.login_required(OfficeStaff)
column_names = 'name *'
order_by = ["name"]
detail_layout = """
id name
remark:60x3
notes.NotesByEventType:60x6
"""
class Note(TypedPrintable,
UserAuthored,
Controllable,
Feasible,
ContactRelated,
mixins.ProjectRelated,
ChangeNotifier,
UploadController,
Mailable):
manager_roles_required = dd.login_required(OfficeStaff)
class Meta:
app_label = 'notes'
abstract = dd.is_abstract_model(__name__, 'Note')
verbose_name = _("Note")
verbose_name_plural = _("Notes")
date = models.DateField(
verbose_name=_('Date'), default=dd.today)
time = dd.TimeField(
blank=True, null=True,
verbose_name=_("Time"),
default=timezone.now)
type = dd.ForeignKey(
'notes.NoteType',
blank=True, null=True,
verbose_name=_('Note Type (Content)'))
event_type = dd.ForeignKey(
'notes.EventType',
blank=True, null=True,
verbose_name=_('Event Type (Form)'))
subject = models.CharField(_("Subject"), max_length=200, blank=True)
body = dd.RichTextField(_("Body"), blank=True, format='html')
language = dd.LanguageField()
def __str__(self):
if self.event_type_id:
return u'%s #%s' % (self.event_type, self.pk)
return u'%s #%s' % (self._meta.verbose_name, self.pk)
def summary_row(self, ar, **kw):
#~ s = super(Note,self).summary_row(ui,rr)
s = super(Note, self).summary_row(ar)
#~ s = contacts.ContactDocument.summary_row(self,ui,rr)
if self.subject:
s += [' ', self.subject]
return s
def get_mailable_type(self):
return self.type
def get_print_language(self):
return self.language
def get_change_owner(self):
return self.project
# def get_change_observers(self, ar=None):
# # in lino_welfare the project is pcsw.Client
# prj = self.project
# if isinstance(prj, ChangeNotifier):
# for u in prj.get_change_observers(ar):
# yield u
def get_change_info(self, ar, cw):
yield E.p(
gettext("Subject"), ': ', self.subject,
E.br(), gettext("Client"), ': ', ar.obj2memo(self.project))
if dd.is_installed('contacts'):
dd.update_field(
Note, 'company', verbose_name=_("Recipient (Organization)"))
dd.update_field(
Note, 'contact_person', verbose_name=_("Recipient (Person)"))
class NoteDetail(dd.DetailLayout):
main = """
date:10 time event_type:25 type:25
subject project
company contact_person contact_role
id user:10 language:8 build_time
body:40 #outbox.MailsByController:40
"""
class Notes(dd.Table):
#required_roles = dd.login_required((OfficeUser, OfficeOperator))
required_roles = dd.login_required((NotesUser))
model = 'notes.Note'
detail_layout = 'notes.NoteDetail'
column_names = "date time id user event_type type project subject * body"
order_by = ["date", "time"]
class AllNotes(Notes):
required_roles = dd.login_required(NotesStaff)
class MyNotes(My, Notes):
column_names = "date time event_type type subject project body *"
order_by = ["date", "time"]
class NotesByType(Notes):
master_key = 'type'
column_names = "date time event_type subject user *"
order_by = ["date", "time"]
class NotesByEventType(Notes):
master_key = 'event_type'
column_names = "date time type subject user *"
order_by = ["date", "time"]
class NotesByX(Notes):
abstract = True
column_names = "date time event_type type subject user *"
order_by = ["-date", "-time"]
display_mode = 'summary'
insert_layout = """
event_type:25 type:25
subject
project #company
"""
@classmethod
def summary_row(cls, ar, obj, **kwargs):
# s = super(NotesByX, cls).summary_row(ar, obj)
s = [dd.fds(obj.date)]
if obj.time is not None:
s += [" ", obj.time.strftime(
settings.SITE.time_format_strftime)]
s+= [" ", obj.obj2href(ar)]
if obj.user != ar.get_user():
s += [' (', obj.user.initials or str(obj.user), ")"]
if obj.subject:
s += [' ', obj.subject]
return s
if settings.SITE.project_model is not None:
class NotesByProject(NotesByX):
master_key = 'project'
# stay_in_grid = True
class NotesByOwner(NotesByX):
master_key = 'owner'
class NotesByCompany(NotesByX):
master_key = 'company'
class NotesByPerson(NotesByX):
master_key = 'contact_person'
dd.inject_field(
'system.SiteConfig',
'system_note_type',
dd.ForeignKey(
'notes.EventType',
blank=True, null=True,
verbose_name=_("Default system note type"),
help_text=_("""\
Note Type used by system notes.
If this is empty, then system notes won't create any entry to the Notes table.""")))
|
|
#!/usr/bin/env python3
# Copyright 2020 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Library to extract scenario definitions from scenario_config.py.
#
# Contains functions to filter, analyze and dump scenario definitions.
#
# This library is used in loadtest_config.py to generate the "scenariosJSON"
# field in the format accepted by the OSS benchmarks framework.
# See https://github.com/grpc/test-infra/blob/master/config/samples/cxx_example_loadtest.yaml
#
# It can also be used to dump scenarios to files, to count scenarios by
# language, and to export scenario languages in a format that can be used for
# automation.
#
# Example usage:
#
# scenario_config.py --export_scenarios -l cxx -f cxx_scenario_ -r '.*' \
# --category=scalable
#
# scenario_config.py --count_scenarios
#
# scenario_config.py --count_scenarios --category=scalable
#
# For usage of the language config output, see loadtest_config.py.
import argparse
import collections
import json
import re
import sys
from typing import Any, Callable, Dict, Iterable, NamedTuple
import scenario_config
# Language parameters for load test config generation.
LanguageConfig = NamedTuple('LanguageConfig', [('category', str),
('language', str),
('client_language', str),
('server_language', str)])
def category_string(categories: Iterable[str], category: str) -> str:
"""Converts a list of categories into a single string for counting."""
if category != 'all':
return category if category in categories else ''
main_categories = ('scalable', 'smoketest')
s = set(categories)
c = [m for m in main_categories if m in s]
s.difference_update(main_categories)
c.extend(s)
return ' '.join(c)
def gen_scenario_languages(category: str) -> Iterable[LanguageConfig]:
"""Generates tuples containing the languages specified in each scenario."""
for language in scenario_config.LANGUAGES:
for scenario in scenario_config.LANGUAGES[language].scenarios():
client_language = scenario.get('CLIENT_LANGUAGE', '')
server_language = scenario.get('SERVER_LANGUAGE', '')
categories = scenario.get('CATEGORIES', [])
if category != 'all' and category not in categories:
continue
cat = category_string(categories, category)
yield LanguageConfig(category=cat,
language=language,
client_language=client_language,
server_language=server_language)
def scenario_filter(
scenario_name_regex: str = '.*',
category: str = 'all',
client_language: str = '',
server_language: str = '',
) -> Callable[[Dict[str, Any]], bool]:
"""Returns a function to filter scenarios to process."""
def filter_scenario(scenario: Dict[str, Any]) -> bool:
"""Filters scenarios that match specified criteria."""
if not re.search(scenario_name_regex, scenario["name"]):
return False
# if the 'CATEGORIES' key is missing, treat scenario as part of
# 'scalable' and 'smoketest'. This matches the behavior of
# run_performance_tests.py.
scenario_categories = scenario.get('CATEGORIES',
['scalable', 'smoketest'])
if category not in scenario_categories and category != 'all':
return False
scenario_client_language = scenario.get('CLIENT_LANGUAGE', '')
if client_language != scenario_client_language:
return False
scenario_server_language = scenario.get('SERVER_LANGUAGE', '')
if server_language != scenario_server_language:
return False
return True
return filter_scenario
def gen_scenarios(
language_name: str, scenario_filter_function: Callable[[Dict[str, Any]],
bool]
) -> Iterable[Dict[str, Any]]:
"""Generates scenarios that match a given filter function."""
return map(
scenario_config.remove_nonproto_fields,
filter(scenario_filter_function,
scenario_config.LANGUAGES[language_name].scenarios()))
def dump_to_json_files(scenarios: Iterable[Dict[str, Any]],
filename_prefix: str) -> None:
"""Dumps a list of scenarios to JSON files"""
count = 0
for scenario in scenarios:
filename = '{}{}.json'.format(filename_prefix, scenario['name'])
print('Writing file {}'.format(filename), file=sys.stderr)
with open(filename, 'w') as outfile:
# The dump file should have {"scenarios" : []} as the top level
# element, when embedded in a LoadTest configuration YAML file.
json.dump({'scenarios': [scenario]}, outfile, indent=2)
count += 1
print('Wrote {} scenarios'.format(count), file=sys.stderr)
def main() -> None:
language_choices = sorted(scenario_config.LANGUAGES.keys())
argp = argparse.ArgumentParser(description='Exports scenarios to files.')
argp.add_argument('--export_scenarios',
action='store_true',
help='Export scenarios to JSON files.')
argp.add_argument('--count_scenarios',
action='store_true',
help='Count scenarios for all test languages.')
argp.add_argument('-l',
'--language',
choices=language_choices,
help='Language to export.')
argp.add_argument('-f',
'--filename_prefix',
default='scenario_dump_',
type=str,
help='Prefix for exported JSON file names.')
argp.add_argument('-r',
'--regex',
default='.*',
type=str,
help='Regex to select scenarios to run.')
argp.add_argument(
'--category',
default='all',
choices=['all', 'inproc', 'scalable', 'smoketest', 'sweep'],
help='Select scenarios for a category of tests.')
argp.add_argument(
'--client_language',
default='',
choices=language_choices,
help='Select only scenarios with a specified client language.')
argp.add_argument(
'--server_language',
default='',
choices=language_choices,
help='Select only scenarios with a specified server language.')
args = argp.parse_args()
if args.export_scenarios and not args.language:
print('Dumping scenarios requires a specified language.',
file=sys.stderr)
argp.print_usage(file=sys.stderr)
return
if args.export_scenarios:
s_filter = scenario_filter(scenario_name_regex=args.regex,
category=args.category,
client_language=args.client_language,
server_language=args.server_language)
scenarios = gen_scenarios(args.language, s_filter)
dump_to_json_files(scenarios, args.filename_prefix)
if args.count_scenarios:
print('Scenario count for all languages (category: {}):'.format(
args.category))
print('{:>5} {:16} {:8} {:8} {}'.format('Count', 'Language', 'Client',
'Server', 'Categories'))
c = collections.Counter(gen_scenario_languages(args.category))
total = 0
for ((cat, l, cl, sl), count) in c.most_common():
print('{count:5} {l:16} {cl:8} {sl:8} {cat}'.format(l=l,
cl=cl,
sl=sl,
count=count,
cat=cat))
total += count
print('\n{:>5} total scenarios (category: {})'.format(
total, args.category))
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.blocktools import (
COINBASE_MATURITY,
)
from test_framework.address import (
script_to_p2sh,
script_to_p2wsh,
)
from test_framework.key import ECKey
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
find_vout_for_address,
)
from test_framework.messages import (
CTxInWitness,
tx_from_hex,
)
from test_framework.script import (
CScript,
OP_CHECKLOCKTIMEVERIFY,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
OP_TRUE,
)
from test_framework.script_util import (
key_to_p2pk_script,
key_to_p2pkh_script,
script_to_p2sh_p2wsh_script,
script_to_p2wsh_script,
)
from test_framework.wallet_util import bytes_to_wif
from decimal import (
Decimal,
getcontext,
)
class SignRawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
self.log.info("Test valid raw transaction with one input")
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, inputs)
# 1) The transaction has a complete set of signatures
assert rawTxSigned['complete']
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def test_with_lock_outputs(self):
self.log.info("Test correct error reporting when trying to sign a locked output")
self.nodes[0].encryptwallet("password")
rawTx = '020000000156b958f78e3f24e0b2f4e4db1255426b0902027cb37e3ddadb52e37c3557dddb0000000000ffffffff01c0a6b929010000001600149a2ee8c77140a053f36018ac8124a6ececc1668a00000000'
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].signrawtransactionwithwallet, rawTx)
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
self.log.info("Test script verification errors")
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransactionwithkey(rawTx, privKeys, scripts)
# 3) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransactionwithwallet(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert not rawTxSigned['complete']
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def test_fully_signed_tx(self):
self.log.info("Test signing a fully signed transaction does nothing")
self.nodes[0].walletpassphrase("password", 9999)
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
rawtx = self.nodes[0].createrawtransaction([], [{self.nodes[0].getnewaddress(): 10}])
fundedtx = self.nodes[0].fundrawtransaction(rawtx)
signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx["hex"])
assert_equal(signedtx["complete"], True)
signedtx2 = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert_equal(signedtx2["complete"], True)
assert_equal(signedtx["hex"], signedtx2["hex"])
self.nodes[0].walletlock()
def witness_script_test(self):
self.log.info("Test signing transaction to P2SH-P2WSH addresses without wallet")
# Create a new P2SH-P2WSH 1-of-1 multisig address:
eckey = ECKey()
eckey.generate()
embedded_privkey = bytes_to_wif(eckey.get_bytes())
embedded_pubkey = eckey.get_pubkey().get_bytes().hex()
p2sh_p2wsh_address = self.nodes[1].createmultisig(1, [embedded_pubkey], "p2sh-segwit")
# send transaction to P2SH-P2WSH 1-of-1 multisig address
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
self.nodes[0].sendtoaddress(p2sh_p2wsh_address["address"], 49.999)
self.generate(self.nodes[0], 1)
# Get the UTXO info from scantxoutset
unspent_output = self.nodes[1].scantxoutset('start', [p2sh_p2wsh_address['descriptor']])['unspents'][0]
spk = script_to_p2sh_p2wsh_script(p2sh_p2wsh_address['redeemScript']).hex()
unspent_output['witnessScript'] = p2sh_p2wsh_address['redeemScript']
unspent_output['redeemScript'] = script_to_p2wsh_script(unspent_output['witnessScript']).hex()
assert_equal(spk, unspent_output['scriptPubKey'])
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([unspent_output], {self.nodes[1].get_wallet_rpc(self.default_wallet_name).getnewaddress(): Decimal("49.998")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [unspent_output])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
# Now test with P2PKH and P2PK scripts as the witnessScript
for tx_type in ['P2PKH', 'P2PK']: # these tests are order-independent
self.verify_txn_with_witness_script(tx_type)
def verify_txn_with_witness_script(self, tx_type):
self.log.info("Test with a {} script as the witnessScript".format(tx_type))
eckey = ECKey()
eckey.generate()
embedded_privkey = bytes_to_wif(eckey.get_bytes())
embedded_pubkey = eckey.get_pubkey().get_bytes().hex()
witness_script = {
'P2PKH': key_to_p2pkh_script(embedded_pubkey).hex(),
'P2PK': key_to_p2pk_script(embedded_pubkey).hex()
}.get(tx_type, "Invalid tx_type")
redeem_script = script_to_p2wsh_script(witness_script).hex()
addr = script_to_p2sh(redeem_script)
script_pub_key = self.nodes[1].validateaddress(addr)['scriptPubKey']
# Fund that address
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
self.generate(self.nodes[0], 1)
# Now create and sign a transaction spending that output on node[0], which doesn't know the scripts or keys
spending_tx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): Decimal("9.999")})
spending_tx_signed = self.nodes[0].signrawtransactionwithkey(spending_tx, [embedded_privkey], [{'txid': txid, 'vout': vout, 'scriptPubKey': script_pub_key, 'redeemScript': redeem_script, 'witnessScript': witness_script, 'amount': 10}])
# Check the signing completed successfully
assert 'complete' in spending_tx_signed
assert_equal(spending_tx_signed['complete'], True)
self.nodes[0].sendrawtransaction(spending_tx_signed['hex'])
def OP_1NEGATE_test(self):
self.log.info("Test OP_1NEGATE (0x4f) satisfies BIP62 minimal push standardness rule")
hex_str = (
"0200000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
"FFFFFFFF00000000044F024F9CFDFFFFFF01F0B9F5050000000023210277777777"
"77777777777777777777777777777777777777777777777777777777AC66030000"
)
prev_txs = [
{
"txid": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
"vout": 0,
"scriptPubKey": "A914AE44AB6E9AA0B71F1CD2B453B69340E9BFBAEF6087",
"redeemScript": "4F9C",
"amount": 1,
}
]
txn = self.nodes[0].signrawtransactionwithwallet(hex_str, prev_txs)
assert txn["complete"]
def test_signing_with_csv(self):
self.log.info("Test signing a transaction containing a fully signed CSV input")
self.nodes[0].walletpassphrase("password", 9999)
getcontext().prec = 8
# Make sure CSV is active
assert self.nodes[0].getdeploymentinfo()['deployments']['csv']['active']
# Create a P2WSH script with CSV
script = CScript([1, OP_CHECKSEQUENCEVERIFY, OP_DROP])
address = script_to_p2wsh(script)
# Fund that address and make the spend
txid = self.nodes[0].sendtoaddress(address, 1)
vout = find_vout_for_address(self.nodes[0], txid, address)
self.generate(self.nodes[0], 1)
utxo = self.nodes[0].listunspent()[0]
amt = Decimal(1) + utxo["amount"] - Decimal(0.00001)
tx = self.nodes[0].createrawtransaction(
[{"txid": txid, "vout": vout, "sequence": 1},{"txid": utxo["txid"], "vout": utxo["vout"]}],
[{self.nodes[0].getnewaddress(): amt}],
self.nodes[0].getblockcount()
)
# Set the witness script
ctx = tx_from_hex(tx)
ctx.wit.vtxinwit.append(CTxInWitness())
ctx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), script]
tx = ctx.serialize_with_witness().hex()
# Sign and send the transaction
signed = self.nodes[0].signrawtransactionwithwallet(tx)
assert_equal(signed["complete"], True)
self.nodes[0].sendrawtransaction(signed["hex"])
def test_signing_with_cltv(self):
self.log.info("Test signing a transaction containing a fully signed CLTV input")
self.nodes[0].walletpassphrase("password", 9999)
getcontext().prec = 8
# Make sure CLTV is active
assert self.nodes[0].getdeploymentinfo()['deployments']['bip65']['active']
# Create a P2WSH script with CLTV
script = CScript([100, OP_CHECKLOCKTIMEVERIFY, OP_DROP])
address = script_to_p2wsh(script)
# Fund that address and make the spend
txid = self.nodes[0].sendtoaddress(address, 1)
vout = find_vout_for_address(self.nodes[0], txid, address)
self.generate(self.nodes[0], 1)
utxo = self.nodes[0].listunspent()[0]
amt = Decimal(1) + utxo["amount"] - Decimal(0.00001)
tx = self.nodes[0].createrawtransaction(
[{"txid": txid, "vout": vout},{"txid": utxo["txid"], "vout": utxo["vout"]}],
[{self.nodes[0].getnewaddress(): amt}],
self.nodes[0].getblockcount()
)
# Set the witness script
ctx = tx_from_hex(tx)
ctx.wit.vtxinwit.append(CTxInWitness())
ctx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), script]
tx = ctx.serialize_with_witness().hex()
# Sign and send the transaction
signed = self.nodes[0].signrawtransactionwithwallet(tx)
assert_equal(signed["complete"], True)
self.nodes[0].sendrawtransaction(signed["hex"])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
self.witness_script_test()
self.OP_1NEGATE_test()
self.test_with_lock_outputs()
self.test_fully_signed_tx()
self.test_signing_with_csv()
self.test_signing_with_cltv()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations. This test case reads the configuration
file test_migrations.conf for database connection settings
to use in the tests. For each connection found in the config file,
the test case runs a series of test cases to ensure that migrations work
properly both upgrading and downgrading, and that no data loss occurs
if possible.
"""
import ConfigParser
import os
import subprocess
import uuid
from migrate.versioning import repository
import six.moves.urllib.parse as urlparse
import sqlalchemy
import testtools
import cinder.db.migration as migration
import cinder.db.sqlalchemy.migrate_repo
from cinder.db.sqlalchemy.migration import versioning_api as migration_api
from cinder import test
def _get_connect_string(backend,
user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
"""Return connect string.
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped.
"""
if backend == "postgres":
backend = "postgresql+psycopg2"
return ("%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s" %
{'backend': backend, 'user': user, 'passwd': passwd,
'database': database})
def _is_mysql_avail(**kwargs):
return _is_backend_avail('mysql', **kwargs)
def _is_backend_avail(backend,
user="openstack_citest",
passwd="openstack_citest",
database="openstack_citest"):
try:
if backend == "mysql":
connect_uri = _get_connect_string("mysql", user=user,
passwd=passwd, database=database)
elif backend == "postgres":
connect_uri = _get_connect_string("postgres", user=user,
passwd=passwd, database=database)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def _have_mysql():
present = os.environ.get('NOVA_TEST_MYSQL_PRESENT')
if present is None:
return _is_backend_avail('mysql')
return present.lower() in ('', 'true')
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
return sqlalchemy.Table(name, metadata, autoload=True)
class TestMigrations(test.TestCase):
"""Test sqlalchemy-migrate migrations."""
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
# Test machines can set the CINDER_TEST_MIGRATIONS_CONF variable
# to override the location of the config file for migration testing
CONFIG_FILE_PATH = os.environ.get('CINDER_TEST_MIGRATIONS_CONF',
DEFAULT_CONFIG_FILE)
MIGRATE_FILE = cinder.db.sqlalchemy.migrate_repo.__file__
REPOSITORY = repository.Repository(
os.path.abspath(os.path.dirname(MIGRATE_FILE)))
def setUp(self):
super(TestMigrations, self).setUp()
self.snake_walk = False
self.test_databases = {}
# Load test databases from the config file. Only do this
# once. No need to re-run this on each test...
if not self.test_databases:
if os.path.exists(TestMigrations.CONFIG_FILE_PATH):
cp = ConfigParser.RawConfigParser()
try:
cp.read(TestMigrations.CONFIG_FILE_PATH)
defaults = cp.defaults()
for key, value in defaults.items():
self.test_databases[key] = value
self.snake_walk = cp.getboolean('walk_style', 'snake_walk')
except ConfigParser.ParsingError as e:
self.fail("Failed to read test_migrations.conf config "
"file. Got error: %s" % e)
else:
self.fail("Failed to find test_migrations.conf config "
"file.")
self.engines = {}
for key, value in self.test_databases.items():
self.engines[key] = sqlalchemy.create_engine(value)
# Set-up a dict of types for those column types that
# are not uniform for all databases.
self.bool_type = {}
self.time_type = {}
for (key, engine) in self.engines.items():
self.bool_type[engine.name] = sqlalchemy.types.BOOLEAN
self.time_type[engine.name] = sqlalchemy.types.DATETIME
if engine.name == 'mysql':
self.bool_type[engine.name] = sqlalchemy.dialects.mysql.TINYINT
if engine.name == 'postgresql':
self.time_type[engine.name] = sqlalchemy.types.TIMESTAMP
# We start each test case with a completely blank slate.
self._reset_databases()
# We destroy the test data store between each test case,
# and recreate it, which ensures that we have no side-effects
# from the tests
self.addCleanup(self._reset_databases)
def _reset_databases(self):
def execute_cmd(cmd=None):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
proc.communicate()[0]
self.assertEqual(0, proc.returncode)
for key, engine in self.engines.items():
conn_string = self.test_databases[key]
conn_pieces = urlparse.urlparse(conn_string)
engine.dispose()
if conn_string.startswith('sqlite'):
# We can just delete the SQLite database, which is
# the easiest and cleanest solution
db_path = conn_pieces.path.strip('/')
if os.path.exists(db_path):
os.unlink(db_path)
# No need to recreate the SQLite DB. SQLite will
# create it for us if it's not there...
elif conn_string.startswith('mysql'):
# We can execute the MySQL client to destroy and re-create
# the MYSQL database, which is easier and less error-prone
# than using SQLAlchemy to do this via MetaData...trust me.
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
if auth_pieces[1].strip():
password = "-p\"%s\"" % auth_pieces[1]
sql = ("drop database if exists %(database)s; create database "
"%(database)s;") % {'database': database}
cmd = ("mysql -u \"%(user)s\" %(password)s -h %(host)s "
"-e \"%(sql)s\"") % {'user': user, 'password': password,
'host': host, 'sql': sql}
execute_cmd(cmd)
elif conn_string.startswith('postgresql'):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
# note(krtaylor): File creation problems with tests in
# venv using .pgpass authentication, changed to
# PGPASSWORD environment variable which is no longer
# planned to be deprecated
os.environ['PGPASSWORD'] = password
os.environ['PGUSER'] = user
# note(boris-42): We must create and drop database, we can't
# drop database which we have connected to, so for such
# operations there is a special database template1.
sqlcmd = ("psql -w -U %(user)s -h %(host)s -c"
" '%(sql)s' -d template1")
sql = ("drop database if exists %(database)s;") % {'database':
database}
droptable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
execute_cmd(droptable)
sql = ("create database %(database)s;") % {'database':
database}
createtable = sqlcmd % {'user': user, 'host': host, 'sql': sql}
execute_cmd(createtable)
os.unsetenv('PGPASSWORD')
os.unsetenv('PGUSER')
def test_walk_versions(self):
"""Test walk versions.
Walks all version scripts for each tested database, ensuring
that there are no errors in the version scripts for each engine
"""
for key, engine in self.engines.items():
self._walk_versions(engine, self.snake_walk)
def test_mysql_connect_fail(self):
"""Test for mysql connection failure.
Test that we can trigger a mysql connection failure and we fail
gracefully to ensure we don't break people without mysql
"""
if _is_mysql_avail(user="openstack_cifail"):
self.fail("Shouldn't have connected")
@testtools.skipUnless(_have_mysql(), "mysql not available")
def test_mysql_innodb(self):
"""Test that table creation on mysql only builds InnoDB tables."""
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string('mysql')
engine = sqlalchemy.create_engine(connect_string)
self.engines["mysqlcitest"] = engine
self.test_databases["mysqlcitest"] = connect_string
# build a fully populated mysql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
uri = _get_connect_string('mysql', database="information_schema")
connection = sqlalchemy.create_engine(uri).connect()
# sanity check
total = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest'")
self.assertGreater(total.scalar(), 0,
msg="No tables found. Wrong schema?")
noninnodb = connection.execute("SELECT count(*) "
"from information_schema.TABLES "
"where TABLE_SCHEMA='openstack_citest' "
"and ENGINE!='InnoDB' "
"and TABLE_NAME!='migrate_version'")
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
def test_postgresql_connect_fail(self):
"""Test connection failure on PostgrSQL.
Test that we can trigger a postgres connection failure and we fail
gracefully to ensure we don't break people without postgres.
"""
if _is_backend_avail('postgres', user="openstack_cifail"):
self.fail("Shouldn't have connected")
@testtools.skipUnless(_is_backend_avail('postgres'),
"postgresql not available")
def test_postgresql_opportunistically(self):
# add this to the global lists to make reset work with it, it's removed
# automatically in tearDown so no need to clean it up here.
connect_string = _get_connect_string("postgres")
engine = sqlalchemy.create_engine(connect_string)
self.engines["postgresqlcitest"] = engine
self.test_databases["postgresqlcitest"] = connect_string
# build a fully populated postgresql database with all the tables
self._reset_databases()
self._walk_versions(engine, False, False)
def _walk_versions(self, engine=None, snake_walk=False, downgrade=True):
# Determine latest version script from the repo, then
# upgrade from 1 through to the latest, with no data
# in the databases. This just checks that the schema itself
# upgrades successfully.
# Place the database under version control
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
self.assertEqual(migration.db_initial_version(),
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
migration_api.upgrade(engine, TestMigrations.REPOSITORY,
migration.db_initial_version() + 1)
for version in xrange(migration.db_initial_version() + 2,
TestMigrations.REPOSITORY.latest + 1):
# upgrade -> downgrade -> upgrade
self._migrate_up(engine, version, with_data=True)
if snake_walk:
self._migrate_down(engine, version - 1)
self._migrate_up(engine, version)
if downgrade:
# Now walk it back down to 0 from the latest, testing
# the downgrade paths.
for version in reversed(
xrange(migration.db_initial_version() + 1,
TestMigrations.REPOSITORY.latest)):
# downgrade -> upgrade -> downgrade
self._migrate_down(engine, version)
if snake_walk:
self._migrate_up(engine, version + 1)
self._migrate_down(engine, version)
def _migrate_down(self, engine, version):
migration_api.downgrade(engine,
TestMigrations.REPOSITORY,
version)
self.assertEqual(version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
def _migrate_up(self, engine, version, with_data=False):
"""Migrate up to a new version of the db.
We allow for data insertion and post checks at every
migration version with special _prerun_### and
_check_### functions in the main test.
"""
# NOTE(sdague): try block is here because it's impossible to debug
# where a failed data migration happens otherwise
try:
if with_data:
data = None
prerun = getattr(self, "_prerun_%3.3d" % version, None)
if prerun:
data = prerun(engine)
migration_api.upgrade(engine,
TestMigrations.REPOSITORY,
version)
self.assertEqual(
version,
migration_api.db_version(engine,
TestMigrations.REPOSITORY))
if with_data:
check = getattr(self, "_check_%3.3d" % version, None)
if check:
check(engine, data)
except Exception:
raise
# migration 004 - change volume types to UUID
def _prerun_004(self, engine):
data = {
'volumes': [{'id': str(uuid.uuid4()), 'host': 'test1',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test2',
'volume_type_id': 1},
{'id': str(uuid.uuid4()), 'host': 'test3',
'volume_type_id': 3},
],
'volume_types': [{'name': 'vtype1'},
{'name': 'vtype2'},
{'name': 'vtype3'},
],
'volume_type_extra_specs': [{'volume_type_id': 1,
'key': 'v1',
'value': 'hotep',
},
{'volume_type_id': 1,
'key': 'v2',
'value': 'bending rodrigez',
},
{'volume_type_id': 2,
'key': 'v3',
'value': 'bending rodrigez',
},
]}
volume_types = get_table(engine, 'volume_types')
for vtype in data['volume_types']:
r = volume_types.insert().values(vtype).execute()
vtype['id'] = r.inserted_primary_key[0]
volume_type_es = get_table(engine, 'volume_type_extra_specs')
for vtes in data['volume_type_extra_specs']:
r = volume_type_es.insert().values(vtes).execute()
vtes['id'] = r.inserted_primary_key[0]
volumes = get_table(engine, 'volumes')
for vol in data['volumes']:
r = volumes.insert().values(vol).execute()
vol['id'] = r.inserted_primary_key[0]
return data
def _check_004(self, engine, data):
volumes = get_table(engine, 'volumes')
v1 = volumes.select(volumes.c.id ==
data['volumes'][0]['id']
).execute().first()
v2 = volumes.select(volumes.c.id ==
data['volumes'][1]['id']
).execute().first()
v3 = volumes.select(volumes.c.id ==
data['volumes'][2]['id']
).execute().first()
volume_types = get_table(engine, 'volume_types')
vt1 = volume_types.select(volume_types.c.name ==
data['volume_types'][0]['name']
).execute().first()
vt2 = volume_types.select(volume_types.c.name ==
data['volume_types'][1]['name']
).execute().first()
vt3 = volume_types.select(volume_types.c.name ==
data['volume_types'][2]['name']
).execute().first()
vtes = get_table(engine, 'volume_type_extra_specs')
vtes1 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][0]['key']
).execute().first()
vtes2 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][1]['key']
).execute().first()
vtes3 = vtes.select(vtes.c.key ==
data['volume_type_extra_specs'][2]['key']
).execute().first()
self.assertEqual(v1['volume_type_id'], vt1['id'])
self.assertEqual(v2['volume_type_id'], vt1['id'])
self.assertEqual(v3['volume_type_id'], vt3['id'])
self.assertEqual(vtes1['volume_type_id'], vt1['id'])
self.assertEqual(vtes2['volume_type_id'], vt1['id'])
self.assertEqual(vtes3['volume_type_id'], vt2['id'])
def test_migration_005(self):
"""Test that adding source_volid column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 4)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 5)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertIsInstance(volumes.c.source_volid.type,
sqlalchemy.types.VARCHAR)
def _metadatas(self, upgrade_to, downgrade_to=None):
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine,
TestMigrations.REPOSITORY,
upgrade_to)
if downgrade_to is not None:
migration_api.downgrade(
engine, TestMigrations.REPOSITORY, downgrade_to)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
yield metadata
def metadatas_upgraded_to(self, revision):
return self._metadatas(revision)
def metadatas_downgraded_from(self, revision):
return self._metadatas(revision, revision - 1)
def test_upgrade_006_adds_provider_location(self):
for metadata in self.metadatas_upgraded_to(6):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertIsInstance(snapshots.c.provider_location.type,
sqlalchemy.types.VARCHAR)
def test_downgrade_006_removes_provider_location(self):
for metadata in self.metadatas_downgraded_from(6):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertNotIn('provider_location', snapshots.c)
def test_upgrade_007_adds_fk(self):
for metadata in self.metadatas_upgraded_to(7):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
fkey, = snapshots.c.volume_id.foreign_keys
self.assertEqual(volumes.c.id, fkey.column)
def test_downgrade_007_removes_fk(self):
for metadata in self.metadatas_downgraded_from(7):
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertEqual(0, len(snapshots.c.volume_id.foreign_keys))
def test_migration_008(self):
"""Test that adding and removing the backups table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 7)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"backups"))
backups = sqlalchemy.Table('backups',
metadata,
autoload=True)
self.assertIsInstance(backups.c.created_at.type,
self.time_type[engine.name])
self.assertIsInstance(backups.c.updated_at.type,
self.time_type[engine.name])
self.assertIsInstance(backups.c.deleted_at.type,
self.time_type[engine.name])
self.assertIsInstance(backups.c.deleted.type,
self.bool_type[engine.name])
self.assertIsInstance(backups.c.id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.volume_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.user_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.project_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.host.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.availability_zone.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.display_name.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.display_description.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.container.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.status.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.fail_reason.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.service_metadata.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.service.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(backups.c.size.type,
sqlalchemy.types.INTEGER)
self.assertIsInstance(backups.c.object_count.type,
sqlalchemy.types.INTEGER)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 7)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"backups"))
def test_migration_009(self):
"""Test adding snapshot_metadata table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 8)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
snapshot_metadata = sqlalchemy.Table('snapshot_metadata',
metadata,
autoload=True)
self.assertIsInstance(snapshot_metadata.c.created_at.type,
self.time_type[engine.name])
self.assertIsInstance(snapshot_metadata.c.updated_at.type,
self.time_type[engine.name])
self.assertIsInstance(snapshot_metadata.c.deleted_at.type,
self.time_type[engine.name])
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.bool_type[engine.name])
self.assertIsInstance(snapshot_metadata.c.deleted.type,
self.bool_type[engine.name])
self.assertIsInstance(snapshot_metadata.c.id.type,
sqlalchemy.types.INTEGER)
self.assertIsInstance(snapshot_metadata.c.snapshot_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(snapshot_metadata.c.key.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(snapshot_metadata.c.value.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 8)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"snapshot_metadata"))
def test_migration_010(self):
"""Test adding transfers table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 9)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 10)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"transfers"))
transfers = sqlalchemy.Table('transfers',
metadata,
autoload=True)
self.assertIsInstance(transfers.c.created_at.type,
self.time_type[engine.name])
self.assertIsInstance(transfers.c.updated_at.type,
self.time_type[engine.name])
self.assertIsInstance(transfers.c.deleted_at.type,
self.time_type[engine.name])
self.assertIsInstance(transfers.c.deleted.type,
self.bool_type[engine.name])
self.assertIsInstance(transfers.c.id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(transfers.c.volume_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(transfers.c.display_name.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(transfers.c.salt.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(transfers.c.crypt_hash.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(transfers.c.expires_at.type,
self.time_type[engine.name])
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 9)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"transfers"))
def test_migration_011(self):
"""Test adding transfers table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 10)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes_v10 = sqlalchemy.Table('volumes',
metadata,
autoload=True)
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volumes"))
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
# Make sure we didn't miss any columns in the upgrade
for column in volumes_v10.c:
self.assertTrue(volumes.c.__contains__(column.name))
self.assertIsInstance(volumes.c.bootable.type,
self.bool_type[engine.name])
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 10)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertNotIn('bootable', volumes.c)
# Make sure we put all the columns back
for column in volumes_v10.c:
self.assertTrue(volumes.c.__contains__(column.name))
def test_migration_012(self):
"""Test that adding attached_host column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 11)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertIsInstance(volumes.c.attached_host.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 11)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertNotIn('attached_host', volumes.c)
def test_migration_013(self):
"""Test that adding provider_geometry column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 12)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 13)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertIsInstance(volumes.c.provider_geometry.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 12)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertNotIn('provider_geometry', volumes.c)
def test_migration_014(self):
"""Test that adding _name_id column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 13)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 14)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertIsInstance(volumes.c._name_id.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 13)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertNotIn('_name_id', volumes.c)
def test_migration_015(self):
"""Test removing migrations table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 15)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"migrations"))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 14)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"migrations"))
def test_migration_016(self):
"""Test that dropping xen storage manager tables works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 15)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 16)
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertFalse(engine.dialect.has_table(engine.connect(),
'sm_volume'))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 15)
self.assertTrue(engine.dialect.has_table(engine.connect(),
'sm_flavors'))
self.assertTrue(engine.dialect.has_table(engine.connect(),
'sm_backend_config'))
self.assertTrue(engine.dialect.has_table(engine.connect(),
'sm_volume'))
def test_migration_017(self):
"""Test that added encryption information works correctly."""
# upgrade schema
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 16)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 17)
# encryption key UUID
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
self.assertIn('encryption_key_id', volumes.c)
self.assertIsInstance(volumes.c.encryption_key_id.type,
sqlalchemy.types.VARCHAR)
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertIn('encryption_key_id', snapshots.c)
self.assertIsInstance(snapshots.c.encryption_key_id.type,
sqlalchemy.types.VARCHAR)
self.assertIn('volume_type_id', snapshots.c)
self.assertIsInstance(snapshots.c.volume_type_id.type,
sqlalchemy.types.VARCHAR)
# encryption types table
encryption = sqlalchemy.Table('encryption',
metadata,
autoload=True)
self.assertIsInstance(encryption.c.volume_type_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(encryption.c.cipher.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(encryption.c.key_size.type,
sqlalchemy.types.INTEGER)
self.assertIsInstance(encryption.c.provider.type,
sqlalchemy.types.VARCHAR)
# downgrade schema
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 16)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes', metadata, autoload=True)
self.assertNotIn('encryption_key_id', volumes.c)
snapshots = sqlalchemy.Table('snapshots', metadata, autoload=True)
self.assertNotIn('encryption_key_id', snapshots.c)
self.assertFalse(engine.dialect.has_table(engine.connect(),
'encryption'))
def test_migration_018(self):
"""Test that added qos_specs table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 17)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18)
self.assertTrue(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
qos_specs = sqlalchemy.Table('quality_of_service_specs',
metadata,
autoload=True)
self.assertIsInstance(qos_specs.c.created_at.type,
self.time_type[engine.name])
self.assertIsInstance(qos_specs.c.updated_at.type,
self.time_type[engine.name])
self.assertIsInstance(qos_specs.c.deleted_at.type,
self.time_type[engine.name])
self.assertIsInstance(qos_specs.c.deleted.type,
self.bool_type[engine.name])
self.assertIsInstance(qos_specs.c.id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(qos_specs.c.specs_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(qos_specs.c.key.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(qos_specs.c.value.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 17)
self.assertFalse(engine.dialect.has_table(
engine.connect(), "quality_of_service_specs"))
def test_migration_019(self):
"""Test that adding migration_status column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 18)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertIsInstance(volumes.c.migration_status.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 18)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertNotIn('migration_status', volumes.c)
def test_migration_020(self):
"""Test adding volume_admin_metadata table works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 19)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20)
self.assertTrue(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
volume_admin_metadata = sqlalchemy.Table('volume_admin_metadata',
metadata,
autoload=True)
self.assertIsInstance(volume_admin_metadata.c.created_at.type,
self.time_type[engine.name])
self.assertIsInstance(volume_admin_metadata.c.updated_at.type,
self.time_type[engine.name])
self.assertIsInstance(volume_admin_metadata.c.deleted_at.type,
self.time_type[engine.name])
self.assertIsInstance(volume_admin_metadata.c.deleted.type,
self.bool_type[engine.name])
self.assertIsInstance(volume_admin_metadata.c.id.type,
sqlalchemy.types.INTEGER)
self.assertIsInstance(volume_admin_metadata.c.volume_id.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volume_admin_metadata.c.key.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volume_admin_metadata.c.value.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 19)
self.assertFalse(engine.dialect.has_table(engine.connect(),
"volume_admin_metadata"))
def test_migration_021(self):
"""Test adding default data for quota classes works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 20)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 21)
quota_class_metadata = sqlalchemy.Table('quota_classes',
metadata,
autoload=True)
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 20)
# Defaults should not be deleted during downgrade
num_defaults = quota_class_metadata.count().\
where(quota_class_metadata.c.class_name == 'default').\
execute().scalar()
self.assertEqual(3, num_defaults)
def test_migration_022(self):
"""Test that adding disabled_reason column works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 21)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 22)
services = sqlalchemy.Table('services',
metadata,
autoload=True)
self.assertIsInstance(services.c.disabled_reason.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 21)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
services = sqlalchemy.Table('services',
metadata,
autoload=True)
self.assertNotIn('disabled_reason', services.c)
def test_migration_023(self):
"""Test that adding reservations index works correctly."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 22)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 23)
reservations = sqlalchemy.Table('reservations',
metadata,
autoload=True)
index_columns = []
for idx in reservations.indexes:
if idx.name == 'reservations_deleted_expire_idx':
index_columns = idx.columns.keys()
break
self.assertEqual(sorted(['deleted', 'expire']),
sorted(index_columns))
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 22)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
reservations = sqlalchemy.Table('reservations',
metadata,
autoload=True)
index_names = [idx.name for idx in reservations.indexes]
self.assertNotIn('reservations_deleted_expire_idx', index_names)
def test_migration_024(self):
"""Test adding replication columns to volume table."""
for (key, engine) in self.engines.items():
migration_api.version_control(engine,
TestMigrations.REPOSITORY,
migration.db_initial_version())
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 23)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
migration_api.upgrade(engine, TestMigrations.REPOSITORY, 24)
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertIsInstance(volumes.c.replication_status.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volumes.c.replication_extended_status.type,
sqlalchemy.types.VARCHAR)
self.assertIsInstance(volumes.c.replication_driver_data.type,
sqlalchemy.types.VARCHAR)
migration_api.downgrade(engine, TestMigrations.REPOSITORY, 23)
metadata = sqlalchemy.schema.MetaData()
metadata.bind = engine
volumes = sqlalchemy.Table('volumes',
metadata,
autoload=True)
self.assertNotIn('replication_status', volumes.c)
self.assertNotIn('replication_extended_status', volumes.c)
self.assertNotIn('replication_driver_data', volumes.c)
|
|
'''
sphinxEvaluator processes the Sphinx language in the context of an XBRL DTS and instance.
(c) Copyright 2013 Mark V Systems Limited, California US, All rights reserved.
Mark V copyright applies to this software, which is licensed according to the terms of Arelle(r).
Sphinx is a Rules Language for XBRL described by a Sphinx 2 Primer
(c) Copyright 2012 CoreFiling, Oxford UK.
Sphinx copyright applies to the Sphinx language, not to this software.
Mark V Systems conveys neither rights nor license for the Sphinx language.
'''
import operator
from .SphinxContext import HyperspaceBindings, HyperspaceBinding
from .SphinxParser import (astFunctionReference, astHyperspaceExpression, astNode,
astFormulaRule, astReportRule,
astVariableReference)
from .SphinxMethods import (methodImplementation, functionImplementation,
aggreateFunctionImplementation, aggreateFunctionAcceptsFactArgs,
moduleInit as SphinxMethodsModuleInit)
from arelle.ModelFormulaObject import Aspect
from arelle.ModelValue import QName
from arelle.ModelInstanceObject import ModelFact
from arelle.ModelXbrl import DEFAULT, NONDEFAULT, DEFAULTorNONDEFAULT
from arelle import XbrlConst, XmlUtil
class SphinxException(Exception):
def __init__(self, node, code, message, **kwargs ):
self.node = node
self.code = code
self.message = message
self.kwargs = kwargs
self.args = ( self.__repr__(), )
def __repr__(self):
return _('[{0}] exception: {1} at {2}').format(self.code, self.message % self.kwargs, self.node.sourceFileLine)
class SphinxSpecialValue:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
UNBOUND = SphinxSpecialValue("unbound")
NONE = SphinxSpecialValue("none")
def evaluateRuleBase(sphinxContext):
# clear any residual values
for constantNode in sphinxContext.constants.values():
constantNode.value = None
clearEvaluation(sphinxContext)
# check any rule-base preconditions
for preconditionNode in sphinxContext.ruleBasePreconditionNodes:
preconditionPasses = evaluate(preconditionNode, sphinxContext)
clearEvaluation(sphinxContext)
if not preconditionPasses:
return
# evaluate rules
for ruleProg in sphinxContext.rules:
evaluate(ruleProg, sphinxContext)
clearEvaluation(sphinxContext)
# dereference constants
for constantNode in sphinxContext.constants.values():
constantNode.value = None
def clearEvaluation(sphinxContext):
sphinxContext.tags.clear()
sphinxContext.localVariables.clear()
while sphinxContext.hyperspaceBindings:
sphinxContext.hyperspaceBindings.close() # resets sphinxContext.hyperspaceBindings to parent bindings
def evaluate(node, sphinxContext, value=False, fallback=None, hsBoundFact=False):
if isinstance(node, astNode):
if fallback is None:
result = evaluator[node.__class__.__name__](node, sphinxContext)
else:
try:
result = evaluator[node.__class__.__name__](node, sphinxContext)
except StopIteration:
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s has unbound evaluation"),
sourceFileLine=node.sourceFileLine, node=str(node))
return fallback
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s evaluation: %(value)s"),
sourceFileLine=node.sourceFileLine, node=str(node), value=result)
if result is not None:
if isinstance(result, HyperspaceBinding):
if hsBoundFact: # return fact, not the value of fact
return result.yieldedFact
elif value:
return result.value
# dereference nodes to their value
if (value or hsBoundFact) and isinstance(result, astNode):
return evaluate(result, sphinxContext, value, fallback, hsBoundFact)
return result
return result
elif isinstance(node, (tuple,list)):
return [evaluate(item, sphinxContext, value, fallback, hsBoundFact)
for item in node]
elif isinstance(node, set):
return set(evaluate(item, sphinxContext, value, fallback, hsBoundFact)
for item in node)
else:
return node
def evaluateAnnotationDeclaration(node, sphinxContext):
return None
def evaluateBinaryOperation(node, sphinxContext):
leftValue = evaluate(node.leftExpr, sphinxContext, value=True, fallback=UNBOUND)
rightValue = evaluate(node.rightExpr, sphinxContext, value=True, fallback=UNBOUND)
op = node.op
if sphinxContext.formulaOptions.traceVariableExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("Binary op %(op)s v1: %(leftValue)s, v2: %(rightValue)s"),
sourceFileLine=node.sourceFileLine, op=op, leftValue=leftValue, rightValue=rightValue)
if op == ":=":
if sphinxContext.ruleNode.bind == "left":
if rightValue is UNBOUND: raise StopIteration
elif sphinxContext.ruleNode.bind == "right":
if leftValue is UNBOUND: raise StopIteration
elif sphinxContext.ruleNode.bind == "either":
if leftValue is UNBOUND and rightValue is UNBOUND: raise StopIteration
else: # both or default
if leftValue is UNBOUND or rightValue is UNBOUND: raise StopIteration
return (leftValue, rightValue)
elif op in {"|+|", "|+", "+|", "+", "|-|", "|-", "-|", "-"}:
if leftValue is UNBOUND:
if op[0] == '|':
raise StopIteration
else:
leftValue = 0
if rightValue is UNBOUND:
if op[-1] == '|':
raise StopIteration
else:
rightValue = 0
else:
if leftValue is UNBOUND:
return UNBOUND
if rightValue is UNBOUND:
if op == "or" and leftValue:
return True
return UNBOUND
if op == "/" and rightValue == 0: # prevent divide by zero
return UNBOUND
try:
result = {'+': operator.add, '-': operator.sub, '*': operator.mul, '/': operator.truediv,
'<': operator.lt, '>': operator.gt, '<=': operator.le, '>=': operator.ge,
'==': operator.eq, '!=': operator.ne,
'and': operator.and_, 'or': operator.or_,
}[op](leftValue, rightValue)
return result
except KeyError:
sphinxContext.modelXbrl.error("sphinx:error",
_("Operation \"%(op)s\" not implemented for %(node)s"),
sourceFileLine=node.sourceFileLine, op=op, node=str(node))
except (TypeError, ZeroDivisionError) as err:
sphinxContext.modelXbrl.error("sphinx:error",
_("Operation \"%(op)s\" raises exception %(error)s for %(node)s"),
sourceFileLine=node.sourceFileLine, op=op, node=str(node), error=str(err))
return None
def evaluateConstant(node, sphinxContext):
if node.value is None: # first time
hsBindings = HyperspaceBindings(sphinxContext) # must have own hsBindings from caller
previousLocalVariables = sphinxContext.localVariables # save local variables
sphinxContext.localVariables = {}
node.value = evaluate(node.expr, sphinxContext)
if sphinxContext.formulaOptions.traceVariableSetExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("Constant %(name)s assigned value: %(value)s"),
sourceFileLine=node.sourceFileLine, name=node.constantName, value=node.value)
hsBindings.close()
sphinxContext.localVariables = previousLocalVariables
return node.value
def evaluateFor(node, sphinxContext):
# add a hyperspaceBinding to sphinxContext for this node
hsBindings = sphinxContext.hyperspaceBindings
forBinding = hsBindings.forBinding(node)
# set variable here because although needed for next() operation, will be cleared outside of for's context
sphinxContext.localVariables[node.name] = forBinding.yieldedValue
return evaluate(node.expr, sphinxContext)
def evaluateFunctionDeclaration(node, sphinxContext, args):
overriddenVariables = {}
if isinstance(args, dict):
# args may not all be used in the function declaration, just want used ones
argDict = dict((name, value)
for name, value in args.items()
if name in node.params)
else: # purely positional args
# positional parameters named according to function prototype
if len(args) != len(node.params):
sphinxContext.modelXbrl.log("ERROR", "sphinx.functionArgumentsMismatch",
_("Function %(name)s requires %(required)s parameters but %(provided)s are provided"),
sourceFileLine=node.sourceFileLine,
name=node.name, required=len(node.params), provided=len(args))
return None
argDict = dict((paramName, args[i])
for i, paramName in enumerate(node.params))
for name, value in argDict.items():
if name in sphinxContext.localVariables:
overriddenVariables[name] = sphinxContext.localVariables[name]
sphinxContext.localVariables[name] = value
def clearFunctionArgs():
for name in argDict.keys():
del sphinxContext.localVariables[name]
sphinxContext.localVariables.update(overriddenVariables)
overriddenVariables.clear()
try:
result = evaluate(node.expr, sphinxContext)
clearFunctionArgs()
return result
except StopIteration as ex:
clearFunctionArgs()
raise ex # reraise exception
def evaluateFunctionReference(node, sphinxContext):
name = node.name
if name in ("error", "warning", "info", "pass"):
sphinxContext.dynamicSeverity = node.name
elif name == "unbound":
return UNBOUND
if name in aggreateFunctionImplementation:
return evaluateAggregateFunction(node, sphinxContext, name)
if name in sphinxContext.functions: # user defined function
resolveValues = sphinxContext.functions[name].functionType == "function"
namedParametersAssignedTo = sphinxContext.localVariables
else:
resolveValues = True
if name in ("error", "warning", "info", "pass"):
namedParametersAssignedTo = sphinxContext.tags
else:
namedParametersAssignedTo = sphinxContext.localVariables
# evaluate local variables
for localVar in node.localVariables:
evaluate(localVar, sphinxContext)
# evaluate args
args = []
tagName = None
l = len(node.args)
for i in range(l):
arg = node.args[i]
if arg == "=":
if i > 0:
tagName = node.args[i-1]
elif i == l - 1 or node.args[i+1] != "=":
if resolveValues: # macros pass in the argument, not value
arg = evaluate(arg, sphinxContext, value=True)
elif (isinstance(arg, astVariableReference) and
getattr(sphinxContext.localVariables.get(arg.variableName),
"isMacroParameter", False)):
# pass original macro parameter, not a reference to it (otherwise causes looping)
arg = sphinxContext.localVariables[arg.variableName]
elif isinstance(arg, astNode):
arg.isMacroParameter = True
args.append(arg)
if tagName:
namedParametersAssignedTo[tagName] = arg
tagName = None
if name in ("error", "warning", "info", "pass"):
result = None
# call function here
elif name in sphinxContext.functions: # user defined function
result = evaluateFunctionDeclaration(sphinxContext.functions[name], sphinxContext, args)
# call built-in functions
elif name in functionImplementation:
result = functionImplementation[name](node, sphinxContext, args)
else:
raise SphinxException(node,
"sphinx:functionName",
_("unassigned function name %(name)s"),
name=name)
# remove local variables
for localVar in node.localVariables:
del sphinxContext.localVariables[localVar.name]
return result
def evaluateAggregateFunction(node, sphinxContext, name):
# determine if evaluating args found hyperspace (first time)
args = []
iterateAbove, bindingsLen = getattr(node, "aggregationHsBindings", (None, None))
firstTime = bindingsLen is None
hsBindings = sphinxContext.hyperspaceBindings
parentAggregationNode = hsBindings.aggregationNode
parentIsValuesIteration = hsBindings.isValuesIteration
hsBindings.aggregationNode = node # block removing nested aspect bindings
hsBindings.isValuesIteration = False
prevHsBindingsLen = len(hsBindings.hyperspaceBindings)
hsBoundFact = aggreateFunctionAcceptsFactArgs[name]
arg = node.args[0]
try:
while (True): # possibly multiple bindings
# evaluate local variables
for localVar in node.localVariables:
evaluate(localVar, sphinxContext)
value = evaluate(arg, sphinxContext, value=True, hsBoundFact=hsBoundFact)
if isinstance(value, (list,set)):
for listArg in value:
if value is not UNBOUND:
args.append(evaluate(listArg, sphinxContext, value=True))
elif value is not UNBOUND:
args.append(value)
if firstTime:
if len(hsBindings.hyperspaceBindings) == prevHsBindingsLen:
# no hs bindings, just scalar
break
else: # has hs bindings, evaluate rest of them
firstTime = False
iterateAbove = prevHsBindingsLen - 1
bindingsLen = len(hsBindings.hyperspaceBindings)
node.aggregationHsBindings = (iterateAbove, bindingsLen)
hsBindings.next(iterateAbove, bindingsLen)
except StopIteration:
pass # no more bindings
hsBindings.isValuesIteration = parentIsValuesIteration
hsBindings.aggregationNode = parentAggregationNode
# remove local variables
for localVar in node.localVariables:
if localVar in sphinxContext.localVariables:
del sphinxContext.localVariables[localVar.name]
if sphinxContext.formulaOptions.traceVariableExpressionEvaluation:
sphinxContext.modelXbrl.info("sphinx:trace",
_("Aggregative function %(name)s arguments: %(args)s"),
sourceFileLine=node.sourceFileLine, name=name,
args=",".join(str(a) for a in args))
try:
return aggreateFunctionImplementation[name](node, sphinxContext, args)
except (TypeError, ZeroDivisionError) as err:
sphinxContext.modelXbrl.error("sphinx:error",
_("Function %(name)s raises exception %(error)s in %(node)s"),
sourceFileLine=node.sourceFileLine, name=name, node=str(node), error=str(err))
return None
def evaluateHyperspaceExpression(node, sphinxContext):
# add a hyperspaceBinding to sphinxContext for this node
hsBindings = sphinxContext.hyperspaceBindings
nodeBinding = hsBindings.nodeBinding(node)
return nodeBinding
def evaluateIf(node, sphinxContext):
condition = evaluate(node.condition, sphinxContext, value=True)
if condition:
expr = node.thenExpr
else:
expr = node.elseExpr
return evaluate(expr, sphinxContext)
def evaluateMessage(node, sphinxContext, resultTags, hsBindings):
def evaluateTagExpr(tagExpr, modifier):
if modifier == "value":
value = evaluate(tagExpr, sphinxContext, value=True)
elif modifier == "context":
value = contextView(sphinxContext, tagExpr)
else:
value = "{0} {1}".format(evaluate(tagExpr, sphinxContext, value=True),
contextView(sphinxContext))
return value
msgstr = evaluate(node.message, sphinxContext, value=True)
text = []
args = []
i = 0
while True:
j = msgstr.find("${", i)
if j >= 0:
text.append(msgstr[i:j]) # previous part of string
k = msgstr.find("}", j+2)
if k > j:
text.append("{" + str(len(args)) + "}")
tag, sep, modifier = msgstr[j+2:k].strip().partition(".")
if tag == "context":
value = contextView(sphinxContext),
elif tag in resultTags:
value = evaluateTagExpr(resultTags.tags[tag], modifier)
elif tag in sphinxContext.tags:
value = evaluateTagExpr(sphinxContext.tags[tag], modifier)
elif tag in sphinxContext.taggedConstants:
value = evaluateTagExpr(evaluateConstant(sphinxContext.taggedConstants[tag], sphinxContext), modifier)
elif tag in ("trace", "left", "right", "difference"):
value = 'Tag "{0}" is not yet supported'.format(tag)
else:
sphinxContext.modelXbrl.log("ERROR", "sphinx.unboundMessageTag",
_("Validation rule tag %(tag)s is not Bound"),
sourceFileLine=node.sourceFileLine,
tag=tag)
value = "${" + tag + "}"
args.append(value)
i = k + 1
else:
text.append(msgstr[i:])
break
messageStr = ''.join(text)
return messageStr.format(*args)
def evaluateMethodReference(node, sphinxContext):
args = []
for i, nodeArg in enumerate(node.args):
arg = evaluate(nodeArg,
sphinxContext,
value=True,
hsBoundFact=(i == 0)) # don't deref arg 0
args.append(arg)
return methodImplementation.get(node.name, # requested method
methodImplementation["unknown"] # default if missing method
)(node, sphinxContext, args)
def evaluateNoOp(node, sphinxContext):
return None
def evaluateNumericLiteral(node, sphinxContext):
return node.value
def evaluatePreconditionDeclaration(node, sphinxContext):
hsBindings = HyperspaceBindings(sphinxContext)
result = evaluate(node.expr, sphinxContext, value=True)
hsBindings.close()
return result
def evaluatePreconditionReference(node, sphinxContext):
preconditionPasses = True
for name in node.names:
if name in sphinxContext.preconditionNodes:
if not evaluate(sphinxContext.preconditionNodes[name], sphinxContext, value=True):
preconditionPasses = False
clearEvaluation(sphinxContext)
if not preconditionPasses:
break
return preconditionPasses
def evaluateQnameLiteral(node, sphinxContext):
return node.value
def evaluateReportRule(node, sphinxContext):
return None
def evaluateRuleBasePrecondition(node, sphinxContext):
if node.precondition:
return evaluate(node.precondition, sphinxContext, value=True)
return True
def evaluateStringLiteral(node, sphinxContext):
return node.text
def evaluateTagAssignment(node, sphinxContext):
result = evaluate(node.expr, sphinxContext, value=True)
sphinxContext.tags[node.tagName] = result
return result
def evaluateTagReference(node, sphinxContext):
try:
return sphinxContext.tags[node.name]
except KeyError:
raise SphinxException(node,
"sphinx:tagName",
_("unassigned tag name %(name)s"),
name=node.name )
def evaluateRule(node, sphinxContext):
isFormulaRule = isinstance(node, astFormulaRule)
isReportRule = isinstance(node, astReportRule)
name = (node.name or ("sphinx.report" if isReportRule else "sphinx.raise"))
nodeId = node.nodeTypeName + ' ' + name
if node.precondition:
result = evaluate(node.precondition, sphinxContext, value=True)
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s precondition evaluation: %(value)s"),
sourceFileLine=node.sourceFileLine, node=nodeId, value=result)
if not result:
return None
# nest hyperspace binding
sphinxContext.ruleNode = node
hsBindings = None
ruleIteration = 0
try:
hsBindings = HyperspaceBindings(sphinxContext)
while True:
ruleIteration += 1
sphinxContext.dynamicSeverity = None
sphinxContext.tags.clear()
sphinxContext.localVariables.clear()
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s starting iteration %(iteration)s"),
sourceFileLine=node.sourceFileLine, node=nodeId, iteration=ruleIteration)
for varAssignNode in node.variableAssignments:
evaluateVariableAssignment(varAssignNode, sphinxContext)
result = evaluate(node.expr, sphinxContext, value=True)
if result is UNBOUND:
result = None # nothing to do for this pass
elif isFormulaRule:
left, right = result
if left is UNBOUND:
difference = UNBOUND
elif right is UNBOUND:
difference = UNBOUND
else:
difference = abs(left - right)
result = difference != 0
resultTags = {"left": left, "right": right, "difference": difference}
sphinxContext.dynamicSeverity = None
if node.severity in sphinxContext.functions:
evaluateFunctionDeclaration(sphinxContext.functions[node.severity],
sphinxContext,
{"difference": difference, "left": left, "right": right})
if sphinxContext.dynamicSeverity is None or sphinxContext.dynamicSeverity == "pass": # don't process pass
sphinxContext.dynamicSeverity = None
result = False
else:
if isReportRule:
resultTags = {"value": result}
else:
resultTags = {}
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s result %(result)s %(severity)s iteration %(iteration)s"),
sourceFileLine=node.sourceFileLine, node=nodeId, iteration=ruleIteration,
result=result,
severity=(sphinxContext.dynamicSeverity or node.severity or
("info" if isReportRule else "error")))
if ((result or isReportRule) or
(sphinxContext.dynamicSeverity and sphinxContext.dynamicSeverity != "pass")):
severity = (sphinxContext.dynamicSeverity or node.severity or
("info" if isReportRule else "error"))
if isinstance(severity, astFunctionReference):
severity = severity.name
logSeverity = {"error" : "ERROR", "warning": "WARNING", "info": "INFO"}[severity]
if node.message:
sphinxContext.modelXbrl.log(logSeverity, name,
evaluateMessage(node.message, sphinxContext, resultTags, hsBindings),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity)
elif isFormulaRule:
sphinxContext.modelXbrl.log(logSeverity,
name,
_("Formula %(severity)s difference %(value)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity,
value=difference,
aspects=contextView(sphinxContext))
elif isReportRule:
sphinxContext.modelXbrl.log(logSeverity,
name,
_("Report %(severity)s %(value)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity,
value=result,
aspects=contextView(sphinxContext))
else:
sphinxContext.modelXbrl.log(logSeverity,
name,
_("Validation rule %(severity)s for %(aspects)s"),
sourceFileLine=[node.sourceFileLine] +
[(fact.modelDocument.uri, fact.sourceline) for fact in hsBindings.boundFacts],
severity=severity,
aspects=contextView(sphinxContext))
hsBindings.next() # raises StopIteration when done
except StopIteration:
if sphinxContext.formulaOptions.traceVariableSetExpressionResult:
sphinxContext.modelXbrl.info("sphinx:trace",
_("%(node)s StopIteration"),
sourceFileLine=node.sourceFileLine, node=nodeId)
except SphinxException as ex:
sphinxContext.modelXbrl.log("ERROR",
ex.code,
_("Exception in %(node)s: %(exception)s"),
node=nodeId,
ruleName=name,
exception=ex.message % ex.kwargs,
sourceFileLine=[node.sourceFileLine] + ([ex.node.sourceFileLine] if ex.node is not node else []),
**ex.kwargs)
if hsBindings is not None:
hsBindings.close()
return None
def noop(arg):
return arg
def evaluateUnaryOperation(node, sphinxContext):
if node.op == "brackets": # parentheses around an expression
return node.expr
value = evaluate(node.expr, sphinxContext, value=True, fallback=UNBOUND)
if value is UNBOUND:
return UNBOUND
try:
result = {'+': operator.pos, '-': operator.neg, 'not': operator.not_,
'values': noop,
}[node.op](value)
return result
except KeyError:
sphinxContext.modelXbrl.error("sphinx:error",
_("%(node)s operation %(op)s not implemented"),
modelObject=node, op=node.op)
return None
def evaluateValuesIteration(node, sphinxContext):
hsBindings = sphinxContext.hyperspaceBindings
if hsBindings.aggregationNode is None:
sphinxContext.modelXbrl.error("sphinx:warning",
_("Values iteration expected to be nested in an aggregating function"),
modelObject=node)
else:
hsBindings.isValuesIteration = True
return evaluate(node.expr, sphinxContext)
def evaluateVariableAssignment(node, sphinxContext):
result = evaluate(node.expr, sphinxContext)
sphinxContext.localVariables[node.variableName] = result
if node.tagName:
sphinxContext.tags[node.tagName] = result
return result
def evaluateVariableReference(node, sphinxContext):
try:
return sphinxContext.localVariables[node.variableName]
except KeyError:
if node.variableName in sphinxContext.constants:
return evaluateConstant(sphinxContext.constants[node.variableName], sphinxContext)
raise SphinxException(node,
"sphinx:variableName",
_("unassigned variable name %(name)s"),
name=node.variableName)
def evaluateWith(node, sphinxContext):
# covered clauses of withExpr match uncovered aspects of expr
hsBindings = sphinxContext.hyperspaceBindings
withRestrictionBinding = hsBindings.nodeBinding(node.restrictionExpr, isWithRestrictionNode=True)
hsBindings.withRestrictionBindings.append(withRestrictionBinding)
try:
for varAssignNode in node.variableAssignments:
evaluateVariableAssignment(varAssignNode, sphinxContext)
result = evaluate(node.bodyExpr, sphinxContext)
except Exception as ex:
del hsBindings.withRestrictionBindings[-1]
raise ex # re-throw the exception after removing withstack entry
del hsBindings.withRestrictionBindings[-1]
return result
def contextView(sphinxContext, fact=None):
if isinstance(fact, ModelFact):
return "{0}[{1}]".format(fact.qname,
", ".join("{2}={1}".format(aspectName(aspect),
factAspectValue(fact, aspect, view=True))
for aspect, fact in sphinxContext.hyperspaceBindings.aspectBoundFacts.items()
if factAspectValue(fact, aspect) and aspect != Aspect.CONCEPT))
else:
return "[{0}]".format(", ".join("{0}={1}".format(aspectName(aspect),
factAspectValue(fact, aspect, view=True))
for aspect, fact in sphinxContext.hyperspaceBindings.aspectBoundFacts.items()
if factAspectValue(fact, aspect)))
def aspectName(aspect):
if isinstance(aspect, QName):
return aspect
return {Aspect.LOCATION: "tuple",
Aspect.CONCEPT: "primary",
Aspect.ENTITY_IDENTIFIER: "entity",
Aspect.PERIOD: "period",
Aspect.UNIT: "unit",
Aspect.NON_XDT_SEGMENT: "segment",
Aspect.NON_XDT_SCENARIO: "scenario",
}.get(aspect)
if aspect in Aspect.label:
return Aspect.label[aspect]
else:
return str(aspect)
def factAspectValue(fact, aspect, view=False):
if fact is DEFAULT:
return 'none'
elif fact is NONDEFAULT:
return '*'
elif fact is DEFAULTorNONDEFAULT:
return '**'
elif aspect == Aspect.LOCATION:
parentQname = fact.getparent().qname
if parentQname == XbrlConst.qnXbrliXbrl: # not tuple
return NONE
return parentQname # tuple
elif aspect == Aspect.CONCEPT:
return fact.qname
elif fact.isTuple or fact.context is None:
return NONE #subsequent aspects don't exist for tuples
elif aspect == Aspect.UNIT:
if fact.unit is None:
return NONE
measures = fact.unit.measures
if measures[1]:
return "{0} / {1}".format(' '.join(str(m) for m in measures[0]),
' '.join(str(m) for m in measures[1]))
else:
return ' '.join(str(m) for m in measures[0])
else:
context = fact.context
if aspect == Aspect.PERIOD:
return ("forever" if context.isForeverPeriod else
XmlUtil.dateunionValue(context.instantDatetime, subtractOneDay=True) if context.isInstantPeriod else
XmlUtil.dateunionValue(context.startDatetime) + "-" + XmlUtil.dateunionValue(context.endDatetime, subtractOneDay=True))
elif aspect == Aspect.ENTITY_IDENTIFIER:
if view:
return context.entityIdentifier[1]
else:
return context.entityIdentifier # (scheme, identifier)
elif aspect in (Aspect.COMPLETE_SEGMENT, Aspect.COMPLETE_SCENARIO,
Aspect.NON_XDT_SEGMENT, Aspect.NON_XDT_SCENARIO):
return ''.join(XmlUtil.xmlstring(elt, stripXmlns=True, prettyPrint=True)
for elt in context.nonDimValues(aspect))
elif aspect == Aspect.DIMENSIONS:
return context.dimAspects(fact.xpCtx.defaultDimensionAspects)
elif isinstance(aspect, QName):
dimValue = context.dimValue(aspect)
if dimValue is None:
return NONE
else:
if isinstance(dimValue, QName): #default dim
return dimValue
elif dimValue.isExplicit:
return dimValue.memberQname
else: # explicit
return dimValue.typedMember.xValue # typed element value
evaluator = {
"astAnnotationDeclaration": evaluateAnnotationDeclaration,
"astBinaryOperation": evaluateBinaryOperation,
"astComment": evaluateNoOp,
"astFor": evaluateFor,
"astFormulaRule": evaluateRule,
"astFunctionDeclaration": evaluateFunctionDeclaration,
"astFunctionReference": evaluateFunctionReference,
"astHyperspaceExpression": evaluateHyperspaceExpression,
"astIf": evaluateIf,
"astMessage": evaluateMessage,
"astMethodReference": evaluateMethodReference,
"astNamespaceDeclaration": evaluateNoOp,
"astNode": evaluateNoOp,
"astNoOp": evaluateNoOp,
"astNumericLiteral": evaluateNumericLiteral,
"astPreconditionDeclaration": evaluatePreconditionDeclaration,
"astQnameLiteral": evaluateQnameLiteral,
"astReportRule": evaluateRule,
"astSourceFile": evaluateNoOp,
"astRuleBasePrecondition": evaluateRuleBasePrecondition,
"astPreconditionReference": evaluatePreconditionReference,
"astStringLiteral": evaluateStringLiteral,
"astTagAssignment": evaluateTagAssignment,
"astTagReference": evaluateTagReference,
"astValidationRule": evaluateRule,
"astValuesIteration": evaluateValuesIteration,
"astVariableAssignment": evaluateVariableAssignment,
"astVariableReference": evaluateVariableReference,
"astUnaryOperation": evaluateUnaryOperation,
"astWith": evaluateWith,
}
SphinxMethodsModuleInit()
|
|
import datetime
import ujson
from django.http import HttpResponse
from mock import patch
from typing import Any, Dict, List, Text, Union, Mapping
from zerver.lib.actions import (
do_change_is_admin,
do_set_realm_property,
do_deactivate_realm,
do_deactivate_stream,
)
from zerver.lib.send_email import send_future_email
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import tornado_redirected_to_list
from zerver.lib.test_runner import slow
from zerver.models import get_realm, Realm, UserProfile, ScheduledEmail, get_stream
class RealmTest(ZulipTestCase):
def assert_user_profile_cache_gets_new_name(self, user_profile, new_realm_name):
# type: (UserProfile, Text) -> None
self.assertEqual(user_profile.realm.name, new_realm_name)
def test_do_set_realm_name_caching(self):
# type: () -> None
"""The main complicated thing about setting realm names is fighting the
cache, and we start by populating the cache for Hamlet, and we end
by checking the cache to ensure that the new value is there."""
self.example_user('hamlet')
realm = get_realm('zulip')
new_name = u'Zed You Elle Eye Pea'
do_set_realm_property(realm, 'name', new_name)
self.assertEqual(get_realm(realm.string_id).name, new_name)
self.assert_user_profile_cache_gets_new_name(self.example_user('hamlet'), new_name)
def test_update_realm_name_events(self):
# type: () -> None
realm = get_realm('zulip')
new_name = u'Puliz'
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'name', new_name)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='name',
value=new_name,
))
def test_update_realm_description_events(self):
# type: () -> None
realm = get_realm('zulip')
new_description = u'zulip dev group'
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
do_set_realm_property(realm, 'description', new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_update_realm_description(self):
# type: () -> None
email = self.example_email("iago")
self.login(email)
realm = get_realm('zulip')
new_description = u'zulip dev group'
data = dict(description=ujson.dumps(new_description))
events = [] # type: List[Mapping[str, Any]]
with tornado_redirected_to_list(events):
result = self.client_patch('/json/realm', data)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.description, new_description)
event = events[0]['event']
self.assertEqual(event, dict(
type='realm',
op='update',
property='description',
value=new_description,
))
def test_realm_description_length(self):
# type: () -> None
new_description = u'A' * 1001
data = dict(description=ujson.dumps(new_description))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Realm description is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.description, new_description)
def test_realm_name_length(self):
# type: () -> None
new_name = u'A' * (Realm.MAX_REALM_NAME_LENGTH + 1)
data = dict(name=ujson.dumps(new_name))
# create an admin user
email = self.example_email("iago")
self.login(email)
result = self.client_patch('/json/realm', data)
self.assert_json_error(result, 'Realm name is too long.')
realm = get_realm('zulip')
self.assertNotEqual(realm.name, new_name)
def test_admin_restrictions_for_changing_realm_name(self):
# type: () -> None
new_name = 'Mice will play while the cat is away'
user_profile = self.example_user('othello')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, False)
req = dict(name=ujson.dumps(new_name))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Must be a realm administrator')
def test_unauthorized_name_change(self):
# type: () -> None
data = {'full_name': 'Sir Hamlet'}
user_profile = self.example_user('hamlet')
email = user_profile.email
self.login(email)
do_set_realm_property(user_profile.realm, 'name_changes_disabled', True)
url = '/json/settings'
result = self.client_patch(url, data)
self.assertEqual(result.status_code, 200)
# Since the setting fails silently, no message is returned
self.assert_in_response("", result)
def test_do_deactivate_realm_clears_user_realm_cache(self):
# type: () -> None
"""The main complicated thing about deactivating realm names is
updating the cache, and we start by populating the cache for
Hamlet, and we end by checking the cache to ensure that his
realm appears to be deactivated. You can make this test fail
by disabling cache.flush_realm()."""
self.example_user('hamlet')
realm = get_realm('zulip')
do_deactivate_realm(realm)
user = self.example_user('hamlet')
self.assertTrue(user.realm.deactivated)
def test_do_deactivate_realm_clears_scheduled_jobs(self):
# type: () -> None
user = self.example_user('hamlet')
send_future_email('zerver/emails/followup_day1', to_user_id=user.id, delay=datetime.timedelta(hours=1))
self.assertEqual(ScheduledEmail.objects.count(), 1)
do_deactivate_realm(user.realm)
self.assertEqual(ScheduledEmail.objects.count(), 0)
def test_do_deactivate_realm_on_deactived_realm(self):
# type: () -> None
"""Ensure early exit is working in realm deactivation"""
realm = get_realm('zulip')
self.assertFalse(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
do_deactivate_realm(realm)
self.assertTrue(realm.deactivated)
def test_change_notifications_stream(self):
# type: () -> None
# We need an admin user.
email = 'iago@zulip.com'
self.login(email)
disabled_notif_stream_id = -1
req = dict(notifications_stream_id = ujson.dumps(disabled_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream, None)
new_notif_stream_id = 4
req = dict(notifications_stream_id = ujson.dumps(new_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.notifications_stream.id, new_notif_stream_id)
invalid_notif_stream_id = 1234
req = dict(notifications_stream_id = ujson.dumps(invalid_notif_stream_id))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, 'Invalid stream id')
realm = get_realm('zulip')
self.assertNotEqual(realm.notifications_stream.id, invalid_notif_stream_id)
def test_get_default_notifications_stream(self):
# type: () -> None
realm = get_realm("zulip")
verona = get_stream("verona", realm)
realm.notifications_stream_id = verona.id
realm.save()
notifications_stream = realm.get_notifications_stream()
self.assertEqual(notifications_stream.id, verona.id)
do_deactivate_stream(notifications_stream)
self.assertIsNone(realm.get_notifications_stream())
def test_change_realm_default_language(self):
# type: () -> None
new_lang = "de"
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, new_lang)
# we need an admin user.
email = self.example_email("iago")
self.login(email)
req = dict(default_language=ujson.dumps(new_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_success(result)
realm = get_realm('zulip')
self.assertEqual(realm.default_language, new_lang)
# Test to make sure that when invalid languages are passed
# as the default realm language, correct validation error is
# raised and the invalid language is not saved in db
invalid_lang = "invalid_lang"
req = dict(default_language=ujson.dumps(invalid_lang))
result = self.client_patch('/json/realm', req)
self.assert_json_error(result, "Invalid language '%s'" % (invalid_lang,))
realm = get_realm('zulip')
self.assertNotEqual(realm.default_language, invalid_lang)
class RealmAPITest(ZulipTestCase):
def setUp(self):
# type: () -> None
user_profile = self.example_user('cordelia')
email = user_profile.email
self.login(email)
do_change_is_admin(user_profile, True)
def set_up_db(self, attr, value):
# type: (str, Any) -> None
realm = get_realm('zulip')
setattr(realm, attr, value)
realm.save()
def update_with_api(self, name, value):
# type: (str, Union[Text, int, bool]) -> Realm
result = self.client_patch('/json/realm', {name: ujson.dumps(value)})
self.assert_json_success(result)
return get_realm('zulip') # refresh data
def do_test_realm_update_api(self, name):
# type: (str) -> None
"""Test updating realm properties.
If new realm properties have been added to the Realm model but the
test_values dict below has not been updated, this will raise an
assertion error.
"""
bool_tests = [False, True] # type: List[bool]
test_values = dict(
default_language=[u'de', u'en'],
description=[u'Realm description', u'New description'],
message_retention_days=[10, 20],
name=[u'Zulip', u'New Name'],
waiting_period_threshold=[10, 20],
) # type: Dict[str, Any]
vals = test_values.get(name)
if Realm.property_types[name] is bool:
vals = bool_tests
if vals is None:
raise AssertionError('No test created for %s' % (name))
self.set_up_db(name, vals[0])
realm = self.update_with_api(name, vals[1])
self.assertEqual(getattr(realm, name), vals[1])
realm = self.update_with_api(name, vals[0])
self.assertEqual(getattr(realm, name), vals[0])
@slow("Tests a dozen properties in a loop")
def test_update_realm_properties(self):
# type: () -> None
for prop in Realm.property_types:
self.do_test_realm_update_api(prop)
def test_update_realm_allow_message_editing(self):
# type: () -> None
"""Tests updating the realm property 'allow_message_editing'."""
self.set_up_db('allow_message_editing', False)
self.set_up_db('message_content_edit_limit_seconds', 0)
realm = self.update_with_api('allow_message_editing', True)
realm = self.update_with_api('message_content_edit_limit_seconds', 100)
self.assertEqual(realm.allow_message_editing, True)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('allow_message_editing', False)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 100)
realm = self.update_with_api('message_content_edit_limit_seconds', 200)
self.assertEqual(realm.allow_message_editing, False)
self.assertEqual(realm.message_content_edit_limit_seconds, 200)
|
|
#!/usr/bin/env python
"""Usage: ghdecoy.py [ARGS] CMD
ARGS:
-h|--help : display this help message
-k : do not delete generated repository and upload script
-n : just create the decoy repo but don't push it to github
-s : push over ssh instead of https
-v|--version : print version information and exit
-w : only create commits on working days (Mo-Fr)
-d DIR : directory to craft the the fake repository in (default: /tmp)
-l LANG : make decoy repo look like language LANG
-m COUNT : only fill gaps of at least COUNT days (default: 5)
-r REPO : use the repository REPO (default: decoy)
-p NUM : sets the darkest shade of contribution 'pixels' to be
created to NUM. Valid values are 1-4 (default: 4).
-u USER : use the username USER instead of the current unix user
CMD : one of the following:
fill : fill all occurrences of 5 or more consecutive
days without commits with random noise
append : same as fill, but only fills the blank space
after the last existing commit
DATE[-DATE][,...] : fill only the given date(s). Overrides
'-m'. See man page for examples.
"""
import getopt
import sys
import os
import urllib2
import re
import random
import subprocess
import shutil
from datetime import datetime, timedelta
__version__ = '0.5.0'
content_templates = {
'raw': {
'ext': '',
'data': 'echo {1} > decoy',
},
'c': {
'ext': '.c',
'data': 'echo \'#include <stdio.h>\' > decoy.c\n' + \
'echo \'#include <stdlib.h>\' >> decoy.c\n' + \
'echo \'\' >> decoy.c\n' + \
'echo \'int main(void)\' >> decoy.c\n' + \
'echo \'{{\' >> decoy.c\n' + \
'echo \' puts("Hello World{1}!");\' >> decoy.c\n' + \
'echo \' return EXIT_SUCCESS;\' >> decoy.c\n' + \
'echo \'}}\' >> decoy.c\n',
},
'cpp': {
'ext': '.cpp',
'data': 'echo \'#include <iostream.h>\' > decoy.cpp\n' + \
'echo \'\' >> decoy.cpp\n' + \
'echo \'main()\' >> decoy.cpp\n' + \
'echo \'{{\' >> decoy.cpp\n' + \
'echo \' cout << "Hello World{1}!" << endl;\' >> decoy.cpp\n' + \
'echo \' return 0;\' >> decoy.cpp\n' + \
'echo \'}}\' >> decoy.cpp\n',
},
'csharp': {
'ext': '.cs',
'data': 'echo \'using System;\' > decoy.cs\n' + \
'echo \'\' >> decoy.cs\n' + \
'echo \'class Program\' >> decoy.cs\n' + \
'echo \'{{\' >> decoy.cs\n' + \
'echo \' static void Main()\' >> decoy.cs\n' + \
'echo \' {{\' >> decoy.cs\n' + \
'echo \' Console.WriteLine("Hello, world{1}!");\' >> decoy.cs\n' + \
'echo \' }}\' >> decoy.cs\n' + \
'echo \'}}\' >> decoy.cs\n',
},
'css': {
'ext': '.css',
'data': 'echo \'body:before {{\' > decoy.css\n' + \
'echo \' content: "Hello World{1}";\' >> decoy.css\n' + \
'echo \'}}\' >> decoy.css\n',
},
'go': {
'ext': '.go',
'data': 'echo \'package main\' > decoy.go\n' + \
'echo \'import "fmt"\' >> decoy.go\n' + \
'echo \'func main() {{\' >> decoy.go\n' + \
'echo \' fmt.Printf("Hello World{1}")\' >> decoy.go\n' + \
'echo \'}}\' >> decoy.go\n',
},
'html': {
'ext': '.html',
'data': 'echo \'<HTML>\' > decoy.html\n' + \
'echo \'<!-- Hello World in HTML -->\' >> decoy.html\n' + \
'echo \'<HEAD>\' >> decoy.html\n' + \
'echo \'<TITLE>Hello World{1}!</TITLE>\' >> decoy.html\n' + \
'echo \'</HEAD>\' >> decoy.html\n' + \
'echo \'<BODY>\' >> decoy.html\n' + \
'echo \'Hello World!\' >> decoy.html\n' + \
'echo \'</BODY>\' >> decoy.html\n' + \
'echo \'</HTML>\' >> decoy.html\n',
},
'java': {
'ext': '.java',
'data': 'echo \'public class HelloWorld {{\' > decoy.java\n' + \
'echo \'\' >> decoy.java\n' + \
'echo \' public static void main(String[] args) {{\' >> decoy.java\n' + \
'echo \' // Prints "Hello, World" to the terminal window.\' >> decoy.java\n' + \
'echo \' System.out.println("Hello, World{1}");\' >> decoy.java\n' + \
'echo \' }}\' >> decoy.java\n' + \
'echo \'\' >> decoy.java\n' + \
'echo \'}}\' >> decoy.java\n',
},
'jscript': {
'ext': '.js',
'data': 'echo \'function factorial{1}(n) {{\' > decoy.js\n' + \
'echo \' if (n == 0) {{\' >> decoy.js\n' + \
'echo \' return 1;\' >> decoy.js\n' + \
'echo \' }}\' >> decoy.js\n' + \
'echo \' return n * factorial(n - 1);\' >> decoy.js\n' + \
'echo \'}}\' >> decoy.js\n',
},
'nasm': {
'ext': '.asm',
'data': 'echo \' SECTION .data\' > decoy.asm\n' + \
'echo \'\' >> decoy.asm\n' + \
'echo \' msg db "Hello, world{1}!",0xa ; \' >> decoy.asm\n' + \
'echo \' len equ $ - msg\' >> decoy.asm\n' + \
'echo \'\' >> decoy.asm\n' + \
'echo \' SECTION .text\' >> decoy.asm\n' + \
'echo \' global main\' >> decoy.asm\n' + \
'echo \'\' >> decoy.asm\n' + \
'echo \'main:\' >> decoy.asm\n' + \
'echo \' mov eax,4\' >> decoy.asm\n' + \
'echo \' mov ebx,1\' >> decoy.asm\n' + \
'echo \' mov ecx,msg\' >> decoy.asm\n' + \
'echo \' mov edx,len\' >> decoy.asm\n' + \
'echo \' int 0x80\' >> decoy.asm\n' + \
'echo \'\' >> decoy.asm\n' + \
'echo \' mov eax,1\' >> decoy.asm\n' + \
'echo \' mov ebx,0\' >> decoy.asm\n' + \
'echo \' int 0x80\' >> decoy.asm\n',
},
'perl': {
'ext': '.pl',
'data': 'echo \'#!/usr/bin/env perl\' > decoy.pl\n' + \
'echo \'\' >> decoy.pl\n' + \
'echo \'use warnings;\' >> decoy.pl\n' + \
'echo \'use strict;\' >> decoy.pl\n' + \
'echo \'\' >> decoy.pl\n' + \
'echo \'print "Hello World{1}!";\' >> decoy.pl\n',
},
'php': {
'ext': '.php',
'data': 'echo \'<?php\' > decoy.php\n' + \
'echo \'echo "Hello World{1}!";\' >> decoy.php\n' + \
'echo \'?> \' >> decoy.php\n',
},
'python': {
'ext': '.py',
'data': 'echo \'#!/usr/bin/env/python\' > decoy.py\n' + \
'echo \'print "Hello World {1}"\' >> decoy.py',
},
'ruby': {
'ext': '.rb',
'data': 'echo \'class HelloWorld\' > decoy.rb\n' + \
'echo \' def initialize(name)\' >> decoy.rb\n' + \
'echo \' @name = name.capitalize\' >> decoy.rb\n' + \
'echo \' end\' >> decoy.rb\n' + \
'echo \' def speak\' >> decoy.rb\n' + \
'echo \' puts "Hello #{{@name}}{1}!"\' >> decoy.rb\n' + \
'echo \' end\' >> decoy.rb\n' + \
'echo \'end\' >> decoy.rb\n' + \
'echo \'\' >> decoy.rb\n' + \
'echo \'hello = HelloWorld.new("World")\' >> decoy.rb\n' + \
'echo \'hello.speak\' >> decoy.rb\n',
}
}
known_languages = content_templates.keys()
def usage():
"""Prints the usage message."""
print __doc__
def version():
"""Prints version information."""
print "ghdecoy.py {}".format(__version__)
def get_calendar(user):
"""Retrieves the given user's contribution data from Github."""
url = 'https://github.com/users/' + user + '/contributions'
try:
page = urllib2.urlopen(url)
except (urllib2.HTTPError, urllib2.URLError) as err:
print "There was a problem fetching data from {0}".format(url)
print err
return None
return page.readlines()
def calendar_valid(cal):
"""Quick santiy check to see if the fetched calendar looks valid."""
svg = 'js-calendar-graph-svg'
if len(cal) < 495:
return False
if any(svg in c for c in cal):
return True
return False
def get_factor(data):
"""Calculates the factor by which the calender data has to be scaled."""
max_val = 0
for entry in data:
if entry['count'] < 0:
sys.stderr.write(
"Warning: Found invalid value ({}) at {}.\n".format(
entry['count'], entry['date']
)
)
entry['count'] = 0
if entry['count'] > max_val:
max_val = entry['count']
factor = max_val / 4.0
if factor == 0:
return 1
factor = int(factor)
return factor
def cal_scale(scale_factor, data_out):
"""Scales the calendar data by a given factor."""
for entry in data_out:
entry['count'] *= scale_factor
def lang_valid(lang):
if lang in known_languages:
return True
return False
def parse_timeframe_arg(frame, conf):
intervals = []
singledates = []
dates = frame.split(',')
for d in dates:
interval = d.split('-')
if interval[0] != d:
try:
intervals.append(
[datetime.strptime(interval[0],"%Y%m%d") +
timedelta(hours=12),
datetime.strptime(interval[1],"%Y%m%d") +
timedelta(hours=12)])
except ValueError:
print "Invalid value: {}".format(d)
return False
else:
try:
singledates.append(datetime.strptime(d,"%Y%m%d") +
timedelta(hours=12))
except ValueError:
print "Invalid value: {}".format(d)
return False
conf['timeframe'] = {
'intervals': intervals,
'singledates': singledates,
}
return True
def parse_args(argv):
"""Parses the script's arguments via getopt."""
try:
opts, args = getopt.getopt(
argv[1:], "fhknsvwd:l:m:p:r:u:", ["help", "version"])
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(1)
conf = {
'dryrun': False,
'force_data': False,
'keep': False,
'lang': 'python',
'max_shade': 4,
'min_days': 5,
'repo': 'decoy',
'ssh': False,
'timeframe': {},
'user': os.getenv("USER"),
'wdir': '/tmp',
'workday': False,
}
for opt, arg in opts:
if opt == "-d":
conf['wdir'] = arg
elif opt in ("-h", "--help"):
usage()
sys.exit(0)
elif opt == "-f":
conf['force_data'] = True
elif opt == "-k":
conf['keep'] = True
elif opt == "-l":
conf['lang'] = arg
elif opt == "-m":
conf['min_days'] = int(arg)
elif opt == "-n":
conf['dryrun'] = True
elif opt == "-p":
val = int(arg)
if 0 < val < 5:
conf['max_shade'] = val
elif opt == "-r":
conf['repo'] = arg
elif opt == "-s":
conf['ssh'] = True
elif opt == "-u":
conf['user'] = arg
elif opt == "-w":
conf['workday'] = True
elif opt in ("-v", "--version"):
version()
sys.exit(0)
if len(args) != 1:
usage()
sys.exit(1)
if not lang_valid(conf['lang']):
print "Invalid language: {}".format(conf['lang'])
sys.exit(1)
if args[0] in ("append", "fill"):
conf['action'] = args[0]
elif parse_timeframe_arg(args[0], conf) == True:
conf['action'] = "timeframe"
else:
print "Invalid command: {}".format(args[0])
sys.exit(1)
if not conf['user']:
print "Could not determine username; please use -u"
sys.exit(1)
return conf
def parse_calendar(cal):
"""Parse the raw svg data into a dictionary."""
ret = []
for line in cal:
match = re.search(r'data-count="(\d+)".*data-date="(\d+-\d+-\d+)"',
line)
if not match:
continue
ret.append({'date': match.group(2) + "T12:00:00",
'count': int(match.group(1))})
return ret
def create_dataset(data_in, action, min_days, max_shade, force, timeframe, workday):
"""Creates a data set representing the desired commits."""
ret = []
idx_start = -1
idx_cur = 0
idx_max = len(data_in) - 1
if idx_max == -1:
sys.stderr.write("Warning: Empty input; not creating dataset\n")
return ret
random.seed()
if force:
for i in range(0, idx_max):
ret.append({'date': data_in[i]['date'],
'count': random.randint(0, max_shade)})
elif action == 'timeframe':
for in_date in timeframe['singledates']:
in_iso = format(in_date.isoformat())
for cal_date in data_in:
if cal_date['date'] == in_iso:
ret.append({'date': cal_date['date'],
'count': random.randint(0, max_shade)})
for in_interval in timeframe['intervals']:
diff_days = (in_interval[1] - in_interval[0]).days
while diff_days >= 0:
in_iso = (in_interval[0] + timedelta(days=diff_days)).isoformat()
for cal_date in data_in:
if cal_date['date'] == in_iso:
if workday and (datetime.strptime(in_iso, "%Y-%m-%dT%H:%M:%S").isoweekday() == 6 or datetime.strptime(in_iso, "%Y-%m-%dT%H:%M:%S").isoweekday() == 7):
continue
ret.append({'date': cal_date['date'],
'count': random.randint(0, max_shade)})
diff_days -= 1
else:
if action == 'append':
idx_cur = idx_max
for entry in reversed(data_in):
if entry['count']:
break
idx_cur -= 1
# NOTE: This won't fill the last day if it is not preceded by at least one
# other empty day. Doesn't matter though, as we're only filling blocks of
# at least three continuous empty days.
for entry in data_in[idx_cur:]:
if entry['count'] or idx_cur == idx_max:
if idx_start > -1:
idx_range = range(idx_start,
idx_cur if entry['count'] else idx_cur + 1)
idx_start = -1
if len(idx_range) < min_days:
idx_cur += 1
continue
for i in idx_range:
# TODO: maybe add something like this to be able to only
# fill working days ?
# if datetime.strptime(
# data_in[i]['date'],
# "%Y-%m-%dT%H:%M:%S").isoweekday() != 6
# and datetime.strptime(
# data_in[i]['date'],
# "%Y-%m-%dT%H:%M:%S").isoweekday() != 7:
if workday and (datetime.strptime( data_in[i]['date'], "%Y-%m-%dT%H:%M:%S").isoweekday() == 6 or datetime.strptime( data_in[i]['date'], "%Y-%m-%dT%H:%M:%S").isoweekday() == 7):
continue
ret.append({'date': data_in[i]['date'],
'count': random.randint(0, max_shade)})
elif idx_start == -1:
idx_start = idx_cur
idx_cur += 1
cal_scale(get_factor(data_in), ret)
return ret
def get_content_template(lang):
git_cmd = '\nGIT_AUTHOR_DATE={0} GIT_COMMITTER_DATE={0} git commit -a -m "ghdecoy" > /dev/null\n'
return content_templates[lang]['data'] + git_cmd
def create_script(conf, data_out, template):
"""Creates a bash script that executes the actual git operations.
The bash script created by this function creates a git repository, fills
it with commits as specified via it's arguments and pushes it to github.
"""
content_template = get_content_template(conf['lang'])
fake_commits = []
j = 0
for entry in data_out:
for i in range(entry['count']):
fake_commits.append(
content_template.format(entry['date'], j))
j += 1
script_name = ''.join([conf['wdir'], '/ghdecoy.sh'])
script_fo = open(script_name, "w")
script_fo.write(
template.format(conf['repo'], content_templates[conf['lang']]['ext'], ''.join(fake_commits) ,conf['user']))
script_fo.close()
def create_template(conf):
""" Creates a template format string for the repo creation script."""
template = (
'#!/bin/bash\n'
'set -e\n'
'REPO={0}\n'
'git init $REPO\n'
'cd $REPO\n'
'touch decoy{1}\n'
'git add decoy{1}\n'
'{2}\n'
)
if conf['ssh']:
template = ''.join([template,
'git remote add origin git@github.com:{3}/$REPO.git\n'])
else:
template = ''.join([template,
'git remote add origin https://github.com/{3}/$REPO.git\n'])
template = ''.join([template, 'set +e\ngit pull\nset -e\n'])
if not conf['dryrun']:
template = ''.join([template, 'git push -f -u origin master\n'])
return template
def main():
"""The scripts main function."""
conf = parse_args(sys.argv)
ret = 0
cal = get_calendar(conf['user'])
if not cal:
sys.stderr.write("Error: Unable to fetch calendar.\n")
sys.exit(1)
if not calendar_valid(cal):
sys.stderr.write("Error: That doesn't look like contribution data.\n"
"Check user name and try again.\n")
sys.exit(1)
data_out = create_dataset(parse_calendar(cal), conf['action'],
conf['min_days'], conf['max_shade'],
conf['force_data'], conf['timeframe'],
conf['workday'])
if not data_out:
print "No commits to be pushed."
sys.exit(ret)
create_script(conf, data_out, create_template(conf))
os.chdir(conf['wdir'])
try:
subprocess.check_call(['sh', './ghdecoy.sh'])
except subprocess.CalledProcessError as err:
print err
ret = 1
if not conf['keep']:
shutil.rmtree(conf['repo'], True)
os.remove('ghdecoy.sh')
sys.exit(ret)
if __name__ == '__main__':
main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrapper optimizer for Elastic Average SGD """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import optimizer
from tensorflow.python.training import saver
from tensorflow.python.training import session_run_hook
from tensorflow.python.training.saving import saveable_object_util
LOCAL_VARIABLE_NAME = 'local_center_variable'
GLOBAL_VARIABLE_NAME = 'global_center_variable'
GLOBAL_STEP = 'global_step'
class ElasticAverageCustomGetter(object):
"""Custom_getter class is used to do:
1. Change trainable variables to local collection and place them at worker
device
2. Generate global variables(global center variables)
3. Generate local variables(local center variables) which record the global
variables and place them at worker device
Notice that the class should be used with tf.replica_device_setter,
so that the global center variables and global step variable can be placed
at ps device. Besides, use 'tf.compat.v1.get_variable' instead of
'tf.Variable' to
use this custom getter.
For example,
ea_custom_getter = ElasticAverageCustomGetter(worker_device)
with tf.device(
tf.compat.v1.train.replica_device_setter(
worker_device=worker_device,
ps_device="/job:ps",
cluster=cluster)),
tf.compat.v1.variable_scope('',custom_getter=ea_custom_getter):
...
create your model here
...
with tf.device(worker_device):
opt = tf.compat.v1.train.MomentumOptimizer(...)
optimizer = ElasticAverageOptimizer(
opt,
num_worker=2,
moving_rate=0.01, # or use default value
communication_period=20,
ea_custom_getter=ea_custom_getter)
...
train_op = optimizer.apply_gradients(
grads_vars,
global_step=global_step)
...
hooks = [optimizer.make_session_run_hook(is_chief, task_index)]
...
with tf.compat.v1.train.MonitoredTrainingSession(master=server.target,
is_chief=is_chief,
checkpoint_dir=("...),
save_checkpoint_secs=600,
hooks=hooks) as mon_sess:
"""
def __init__(self, worker_device):
"""Create a new `ElasticAverageCustomGetter`.
Args:
worker_device: String. Name of the `worker` job.
"""
self._worker_device = worker_device
self._local_map = {}
self._global_map = {}
def __call__(self, getter, name, trainable, collections, *args, **kwargs):
if trainable:
with ops.device(self._worker_device):
local_var = getter(
name,
trainable=True,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
if kwargs['reuse'] == True:
return local_var
global_center_variable = getter(
name='%s/%s' % (GLOBAL_VARIABLE_NAME, name),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
*args,
**kwargs)
with ops.device(self._worker_device):
local_center_variable = getter(
name='%s/%s' % (LOCAL_VARIABLE_NAME, name),
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
*args,
**kwargs)
if kwargs['partitioner'] is None:
self._local_map[local_var] = local_center_variable
self._global_map[local_var] = global_center_variable
else:
v_list = list(local_var)
for i in range(len(v_list)):
self._local_map[v_list[i]] \
= list(local_center_variable)[i]
self._global_map[v_list[i]] \
= list(global_center_variable)[i]
return local_var
else:
kwargs['trainable'] = trainable
kwargs['collections'] = collections
if ops.GraphKeys.LOCAL_VARIABLES in collections:
with ops.device(self._worker_device):
return getter(name, *args, **kwargs)
else:
return getter(name, *args, **kwargs)
class ElasticAverageOptimizer(optimizer.Optimizer):
"""Wrapper optimizer that implements the Elastic Average SGD algorithm.
This is an async optimizer. During the training, Each worker will update
the local variables and maintains its own local_step, which starts from 0
and is incremented by 1 after each update of local variables. Whenever
the communication period divides the local step, the worker requests
the current global center variables and then computed the elastic difference
between global center variables and local variables. The elastic difference
then be used to update both local variables and global variables.
"""
# Default value as paper described
BETA = 0.9
def __init__(self,
opt,
num_worker,
ea_custom_getter,
communication_period=10,
moving_rate=None,
rho=None,
use_locking=True,
synchronous=False,
name='ElasticAverageOptimizer'):
"""Construct a new gradient descent optimizer.
Args:
opt: The actual optimizer that will be used to update local variables.
Must be one of the Optimizer classes.
num_worker: The number of workers
ea_custom_getter: The ElasticAverageCustomGetter
communication_period: An int point value to controls the frequency of the
communication between every worker and the ps.
moving_rate: A floating point value to control the elastic difference.
rho: the amount of exploration we allow in the model. The default value is
moving_rate/learning_rate rho=0.0 is suggested in async mode.
use_locking: If True use locks for update operations.
synchronous: Add_sync_queues_and_barrier or not.
True: all workers will wait for each other before start training
False: worker can start training when its initilization is done,
no need to wait for everyone is ready. in case one worker is
restarted, it can join and continue training without being
blocked.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "ElasticAverageOptimizer".
"""
super(ElasticAverageOptimizer, self).__init__(use_locking, name)
self._opt = opt
self._num_worker = num_worker
self._period = communication_period
self._local_map = ea_custom_getter._local_map
self._global_map = ea_custom_getter._global_map
self._synchronous = synchronous
if moving_rate is None:
self._moving_rate = self.BETA / communication_period / num_worker
else:
self._moving_rate = moving_rate
if rho is None:
self._rho = self._moving_rate / self._opt._learning_rate
else:
self._rho = rho
self._local_step = variable_scope.get_variable(
initializer=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
name='local_step')
self._opt._prepare()
def compute_gradients(self,
loss,
var_list=None,
gate_gradients=optimizer.Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
grad_loss=None):
"""Compute gradients of `loss` for the variables in `var_list`.
Add rho*elastic_difference to loss to control the exploration
This is the first part of `minimize()`. It returns a list
of (gradient, variable) pairs where "gradient" is the gradient
for "variable". Note that "gradient" can be a `Tensor`, an
`IndexedSlices`, or `None` if there is no gradient for the
given variable.
Args:
loss: A Tensor containing the value to minimize.
var_list: Optional list or tuple of `tf.Variable` to update to minimize
`loss`. Defaults to the list of variables collected in the graph under
the key `GraphKey.TRAINABLE_VARIABLES`.
gate_gradients: How to gate the computation of gradients. Can be
`GATE_NONE`, `GATE_OP`, or `GATE_GRAPH`.
aggregation_method: Specifies the method used to combine gradient terms.
Valid values are defined in the class `AggregationMethod`.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
Returns:
A list of (gradient, variable) pairs. Variable is always present, but
gradient can be `None`.
Raises:
TypeError: If `var_list` contains anything else than `Variable` objects.
ValueError: If some arguments are invalid.
"""
if not var_list:
var_list = variables.trainable_variables()
elastic_difference = [
math_ops.subtract(v, lv)
for v, lv in zip(variables.trainable_variables(),
[self._local_map[var] for var in var_list])
]
distance_loss = self._rho * math_ops.add_n(
[gen_nn_ops.l2_loss(ed) for ed in elastic_difference])
total_loss = loss + distance_loss
return self._opt.compute_gradients(total_loss, var_list, gate_gradients,
aggregation_method,
colocate_gradients_with_ops, grad_loss)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to global variables.
This is the second part of `minimize()`. It returns an `Operation` that
applies gradients.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
`compute_gradients()`.
global_step: Optional `Variable` to increment by one after the variables
have been updated.
name: Optional name for the returned operation. Default to the name
passed to the `Optimizer` constructor.
Returns:
An `Operation` that applies the specified gradients. If `global_step`
was not None, that operation also increments `global_step`.
Raises:
TypeError: If `grads_and_vars` is malformed.
ValueError: If none of the variables have gradients.
"""
global_old = set(n.op.name for n in variables.global_variables())
apply_updates = self._opt.apply_gradients(grads_and_vars)
global_new = set(n.op.name for n in variables.global_variables())
with ops.control_dependencies([apply_updates]):
local_update = state_ops.assign_add(
self._local_step, 1, name='local_step_update').op
# this is for place the variables created by optimizer to local collection
# e.g., AdamOptimizer will create beta as global variables
def _adjust_optimizer_variable_collection(opt_vars):
g = ops.get_default_graph()
idx = 0
for _ in range(len(g._collections[ops.GraphKeys.GLOBAL_VARIABLES])):
var = g.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)[idx]
name = var.op.name
if name in opt_vars:
ops.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, var)
del g.get_collection_ref(ops.GraphKeys.GLOBAL_VARIABLES)[idx]
else:
idx += 1
_adjust_optimizer_variable_collection(global_new - global_old)
# update global variables.
def _Update_global_variables():
local_vars = [v for g, v in grads_and_vars if g is not None]
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
local_center_vars_update = []
for lvar, var in zip(local_center_vars, global_center_vars):
local_center_vars_update.append(lvar.assign(var))
update_ops = []
differences = []
with ops.control_dependencies(local_center_vars_update):
for v, lv in zip(local_vars, local_center_vars):
with ops.device(v.device):
differences.append(math_ops.subtract(v, lv))
for lvar, diff in zip(local_vars, differences):
with ops.device(lvar.device):
update_ops.append(
state_ops.assign_sub(lvar,
math_ops.multiply(self._moving_rate,
diff)))
for var, diff in zip(global_center_vars, differences):
with ops.device(var.device):
update_ops.append(
state_ops.assign_add(var,
math_ops.multiply(self._moving_rate,
diff)))
if global_step:
with ops.colocate_with(global_step):
update_ops.append(state_ops.assign_add(global_step, 1))
variable_update = control_flow_ops.group(*(update_ops))
return variable_update
with ops.control_dependencies([local_update]):
condition = math_ops.equal(
math_ops.mod(self._local_step, self._period), 0)
conditional_update = control_flow_ops.cond(condition,
_Update_global_variables,
control_flow_ops.no_op)
return conditional_update
def get_init_op(self, task_index):
"""Returns the op to let all the local variables and local center
variables equal to the global center variables before the training begins
"""
def _Add_sync_queues_and_barrier(enqueue_after_list):
"""Adds ops to enqueue on all worker queues"""
sync_queues = [
data_flow_ops.FIFOQueue(
self._num_worker, [dtypes.bool],
shapes=[[]],
shared_name='%s%s' % ('variable_init_sync_queue', i))
for i in range(self._num_worker)
]
queue_ops = []
# For each other worker, add an entry in a queue
token = constant_op.constant(False)
with ops.control_dependencies(enqueue_after_list):
for i, q in enumerate(sync_queues):
if i == task_index:
queue_ops.append(control_flow_ops.no_op())
else:
queue_ops.append(q.enqueue(token))
queue_ops.append(
sync_queues[task_index].dequeue_many(len(sync_queues) - 1))
return control_flow_ops.group(*queue_ops)
init_ops = []
local_vars = variables.trainable_variables()
global_center_vars = [self._global_map[var] for var in local_vars]
local_center_vars = [self._local_map[var] for var in local_vars]
if not (local_vars and global_center_vars and local_center_vars):
raise ValueError('The lists of local_variables, global_center_variables, '
'local_center_variables should not be empty ')
for lvar, gc_var, lc_var in zip(local_vars, global_center_vars,
local_center_vars):
init_ops.append(state_ops.assign(lvar, gc_var))
init_ops.append(state_ops.assign(lc_var, gc_var))
init_op = control_flow_ops.group(*(init_ops))
if self._synchronous == False:
return init_op
sync_queue_op = _Add_sync_queues_and_barrier([init_op])
return sync_queue_op
def make_session_run_hook(self, is_chief, task_index):
"""Creates a hook to handle ElasticAverageOptimizerHook ops such as initialization."""
return _ElasticAverageOptimizerHook(self, is_chief, task_index)
def swapping_saver(self, var_list=None, name='swapping_saver', **kwargs):
"""Create a saver copy global_center_variable to trainable variables
Please call this function after all your variables created with
ElasticAverageCustomGetter. For evaluations or inference, use this saver
during training. It will save the global_center_variable of the trained
parameters under the original parameter names.
Args:
var_list: List of variables to save, as per `Saver()`. If set to None,
save all the trainable_variables that have been created before this
call.
name: The name of the saver.
**kwargs: Keyword arguments of `Saver()`.
Returns:
A `tf.compat.v1.train.Saver` object.
Raises:
RuntimeError: global_center_variable is empty, please make sure
this is called after model created and
ElasticAverageCustomGetter is used when declaring you model
"""
if not self._global_map:
raise RuntimeError('global_center_variable is empty, please make sure '
'this is called after model created and '
'ElasticAverageCustomGetter is used when declaring '
'you model')
if var_list is None:
var_list = variables.trainable_variables()
if not isinstance(var_list, dict):
var_list = saveable_object_util.op_list_to_dict(var_list)
swapped_var_list = {}
for key, var in var_list.items():
tensor = var
if not isinstance(var, list):
for tvar in variables.trainable_variables():
if tvar.op.name == var.op.name:
tensor = self._global_map.get(tvar, var)
break
else: #partitioned variable
tensor = [self._global_map.get(lvar, lvar) for lvar in var]
swapped_var_list[key] = tensor
return saver.Saver(swapped_var_list, name=name, **kwargs)
class _ElasticAverageOptimizerHook(session_run_hook.SessionRunHook):
def __init__(self, ea_optimizer, is_chief, task_index):
"""Creates hook to handle ElasticAverageOptimizer initialization ops.
Args:
ea_optimizer: `ElasticAverageOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
"""
self._ea_optimizer = ea_optimizer
self._is_chief = is_chief
self._task_index = task_index
def begin(self):
self._local_init_op = variables.local_variables_initializer()
self._global_init_op = None
if self._is_chief:
self._global_init_op = variables.global_variables_initializer()
self._variable_init_op = self._ea_optimizer.get_init_op(self._task_index)
def after_create_session(self, session, coord):
"""Run initialization ops"""
session.run(self._variable_init_op)
|
|
from __future__ import absolute_import
import ast
import os
import pytest
from ..api_crawler import api_crawler, differ
sample_class = """
class SampleClass:
def __init__(self):
pass
"""
sample_function = """
def sample_function(self):
pass
"""
sample_code = """
class SampleClass:
def __init__(self):
pass
def sample_function(self):
pass
"""
@pytest.fixture
def test_crawler():
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, os.pardir))
return api_crawler(path)
def test_get_crawl_dict(test_crawler):
crawl_dict = test_crawler.get_crawl_dict()
assert crawl_dict
def test_name_is_public(test_crawler):
filename = "_apple"
assert not test_crawler.is_public(filename)
filename = "__init__"
assert test_crawler.is_public(filename)
filename = "__getattribute__"
assert test_crawler.is_public(filename)
filename = "apple"
assert test_crawler.is_public(filename)
def test_filename_is_public(test_crawler):
# Should not crawl __init__.py files
filename = "__init__.py"
assert not test_crawler.is_public(filename)
filename = "_apple.py"
assert not test_crawler.is_public(filename)
filename = "apple.py"
assert test_crawler.is_public(filename)
def test_is_toplevel_function(test_crawler):
parsed_function = ast.parse(sample_function).body[0]
assert test_crawler.is_toplevel_function(parsed_function)
def test_is_class(test_crawler):
parsed_class = ast.parse(sample_class).body[0]
assert test_crawler.is_class(parsed_class)
def test_get_functions(test_crawler):
functions = test_crawler.get_functions(sample_code)
assert "sample_function" in functions
def test_get_classes(test_crawler):
classes = test_crawler.get_classes(sample_code)
assert "SampleClass" in classes
old_version = {
"models": {},
"bands": {
"classes": {
"Radiohead": {
"methods": ["thom", "jonny", "colin", "ed", "phil"]
},
"Beatles": {
"methods": ["Here Comes the Sun"]
}
},
"functions": ["john", "paul", "ringo"]
}
}
new_version = {
"bands": {
"classes": {
"Radiohead": {
"methods": ["thom", "colin", "ed", "phil"]
},
"Pixies": {"methods": ["debaser"]}
},
"functions": ["john", "paul", "george"]
}
}
expected_diff = {
"models": {},
"bands": {
"classes": {
"Radiohead": {
"methods": ["jonny"]
},
"Beatles": {}
},
"functions": ["ringo"]
}
}
expected_additions = {
'bands': {
'classes': {
'Pixies': {}
},
'functions': ['george']
}
}
expected_parsed_diff = [
'DELETED models',
'DELETED bands.Radiohead.jonny',
'DELETED bands.Apple',
'DELETED bands.ringo',
'DELETED bands.Beatles'
]
expected_parsed_additions = [
'ADDED bands.george',
'ADDED bands.Pixies'
]
single_class_old = {
"bands": {"functions": [], "classes": {"Radiohead": {"methods": ["thom", "jonny", "colin", "ed", "phil"]}}}
}
single_class_new = {
"bands": {"functions": [], "classes": {"Radiohead": {"methods": ["thom", "colin", "ed", "phil"]}}}
}
expected_single_class = {
"bands": {"classes": {"Radiohead": {"methods": ["jonny"]}}}
}
@pytest.fixture
def test_differ():
return differ(old_version, new_version)
def test_accurate_diff(test_differ):
test_differ.additions = False
raw_diff = test_differ.diff_modules()
assert raw_diff == expected_diff
def test_catch_key_error(test_differ):
test_differ.additions = False
test_differ.former = single_class_old
test_differ.latter = single_class_new
raw_diff = test_differ.diff_modules()
assert raw_diff == expected_single_class
test_differ.former = old_version
test_differ.latter = new_version
def test_get_diff(test_differ):
diff = test_differ.get_diff()
expected_diff = expected_parsed_diff + expected_parsed_additions
for x in diff:
assert x in expected_diff
def test_diff_additions(test_differ):
test_differ.additions = True
raw_diff = test_differ.diff_modules()
assert raw_diff == expected_additions
def test_removed_parsing(test_differ):
test_differ.additions = False
raw_diff = test_differ.diff_modules()
raw_diff = test_differ.pretty_diff(raw_diff)
for x in raw_diff:
assert x in expected_parsed_diff
def test_additions_parsing(test_differ):
test_differ.additions = True
raw_diff = test_differ.diff_modules()
raw_diff = test_differ.pretty_diff(raw_diff)
for x in raw_diff:
assert x in expected_parsed_additions
def test_operators(test_differ):
a = {"one", "two", "three", "four"}
b = {"one", "two", "three", "five"}
assert test_differ.diff_operation(a, b) == ["four"]
assert test_differ.combinaton_diff_operation(a, b) == ["five"]
def test_diff_files(test_differ):
test_differ.additions = False
intersection, diff = test_differ.diff_files()
assert list(diff.keys()) == ["models"]
for x in intersection.keys():
assert x in ["models", "bands"]
test_differ.additions = True
intersection, diff = test_differ.diff_files()
assert list(diff.keys()) == []
for x in intersection.keys():
assert x in ["models", "bands"]
def test_diff_classes_functions(test_differ):
test_differ.additions = False
intersection, diff = test_differ.diff_files()
diff = test_differ.diff_functions_classes(diff, intersection)
assert diff["bands"]["functions"] == ["ringo"]
for x in diff["bands"]["classes"].keys():
assert x in expected_diff["bands"]["classes"].keys()
test_differ.additions = True
intersection, diff = test_differ.diff_files()
diff = test_differ.diff_functions_classes(diff, intersection)
assert diff["bands"]["functions"] == ["george"]
assert list(diff["bands"]["classes"].keys()) == ["Pixies"]
def test_diff_methods(test_differ):
test_differ.additions = False
intersection, diff = test_differ.diff_files()
diff = test_differ.diff_functions_classes(diff, intersection)
diff = test_differ.diff_methods(diff, intersection)
assert diff["bands"]["classes"]["Radiohead"]["methods"] == ["jonny"]
|
|
"""
Support for VirtualBox using the VBoxManage command
.. versionadded:: 2016.3.0
If the ``vboxdrv`` kernel module is not loaded, this module can automatically
load it by configuring ``autoload_vboxdrv`` in ``/etc/salt/minion``:
.. code-block:: yaml
autoload_vboxdrv: True
The default for this setting is ``False``.
:depends: virtualbox
"""
import logging
import os.path
import re
# pylint: disable=import-error,no-name-in-module
import salt.utils.files
import salt.utils.path
from salt.exceptions import CommandExecutionError
# pylint: enable=import-error,no-name-in-module
LOG = logging.getLogger(__name__)
UUID_RE = re.compile("[^{}]".format("a-zA-Z0-9._-"))
NAME_RE = re.compile("[^{}]".format("a-zA-Z0-9._-"))
def __virtual__():
"""
Only load the module if VBoxManage is installed
"""
if vboxcmd():
if __opts__.get("autoload_vboxdrv", False) is True:
if not __salt__["kmod.is_loaded"]("vboxdrv"):
__salt__["kmod.load"]("vboxdrv")
return True
return (
False,
"The vboxmanaged execution module failed to load: VBoxManage is not installed.",
)
def vboxcmd():
"""
Return the location of the VBoxManage command
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.vboxcmd
"""
return salt.utils.path.which("VBoxManage")
def list_ostypes():
"""
List the available OS Types
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_ostypes
"""
return list_items("ostypes", True, "ID")
def list_nodes_min():
"""
Return a list of registered VMs, with minimal information
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_nodes_min
"""
ret = {}
cmd = "{} list vms".format(vboxcmd())
for line in salt.modules.cmdmod.run(cmd).splitlines():
if not line.strip():
continue
comps = line.split()
name = comps[0].replace('"', "")
ret[name] = True
return ret
def list_nodes_full():
"""
Return a list of registered VMs, with detailed information
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_nodes_full
"""
return list_items("vms", True, "Name")
def list_nodes():
"""
Return a list of registered VMs
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.list_nodes
"""
ret = {}
nodes = list_nodes_full()
for node in nodes:
ret[node] = {
"id": nodes[node]["UUID"],
"image": nodes[node]["Guest OS"],
"name": nodes[node]["Name"],
"state": None,
"private_ips": [],
"public_ips": [],
}
ret[node]["size"] = "{} RAM, {} CPU".format(
nodes[node]["Memory size"],
nodes[node]["Number of CPUs"],
)
return ret
def start(name):
"""
Start a VM
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.start my_vm
"""
ret = {}
cmd = "{} startvm {}".format(vboxcmd(), name)
ret = salt.modules.cmdmod.run(cmd).splitlines()
return ret
def stop(name):
"""
Stop a VM
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.stop my_vm
"""
cmd = "{} controlvm {} poweroff".format(vboxcmd(), name)
ret = salt.modules.cmdmod.run(cmd).splitlines()
return ret
def register(filename):
"""
Register a VM
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.register my_vm_filename
"""
if not os.path.isfile(filename):
raise CommandExecutionError(
"The specified filename ({}) does not exist.".format(filename)
)
cmd = "{} registervm {}".format(vboxcmd(), filename)
ret = salt.modules.cmdmod.run_all(cmd)
if ret["retcode"] == 0:
return True
return ret["stderr"]
def unregister(name, delete=False):
"""
Unregister a VM
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.unregister my_vm_filename
"""
nodes = list_nodes_min()
if name not in nodes:
raise CommandExecutionError(
"The specified VM ({}) is not registered.".format(name)
)
cmd = "{} unregistervm {}".format(vboxcmd(), name)
if delete is True:
cmd += " --delete"
ret = salt.modules.cmdmod.run_all(cmd)
if ret["retcode"] == 0:
return True
return ret["stderr"]
def destroy(name):
"""
Unregister and destroy a VM
CLI Example:
.. code-block:: bash
salt '*' vboxmanage.destroy my_vm
"""
return unregister(name, True)
def create(
name,
groups=None,
ostype=None,
register=True,
basefolder=None,
new_uuid=None,
**kwargs
):
"""
Create a new VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.create <name>
"""
nodes = list_nodes_min()
if name in nodes:
raise CommandExecutionError(
"The specified VM ({}) is already registered.".format(name)
)
params = ""
if name:
if NAME_RE.search(name):
raise CommandExecutionError("New VM name contains invalid characters")
params += " --name {}".format(name)
if groups:
if isinstance(groups, str):
groups = [groups]
if isinstance(groups, list):
params += " --groups {}".format(",".join(groups))
else:
raise CommandExecutionError(
"groups must be either a string or a list of strings"
)
ostypes = list_ostypes()
if ostype not in ostypes:
raise CommandExecutionError(
"The specified OS type ({}) is not available.".format(name)
)
else:
params += " --ostype " + ostype
if register is True:
params += " --register"
if basefolder:
if not os.path.exists(basefolder):
raise CommandExecutionError(
"basefolder {} was not found".format(basefolder)
)
params += " --basefolder {}".format(basefolder)
if new_uuid:
if NAME_RE.search(new_uuid):
raise CommandExecutionError("New UUID contains invalid characters")
params += " --uuid {}".format(new_uuid)
cmd = "{} create {}".format(vboxcmd(), params)
ret = salt.modules.cmdmod.run_all(cmd)
if ret["retcode"] == 0:
return True
return ret["stderr"]
def clonevm(
name=None,
uuid=None,
new_name=None,
snapshot_uuid=None,
snapshot_name=None,
mode="machine",
options=None,
basefolder=None,
new_uuid=None,
register=False,
groups=None,
**kwargs
):
"""
Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonevm <name> <new_name>
"""
if (name and uuid) or (not name and not uuid):
raise CommandExecutionError(
"Either a name or a uuid must be specified, but not both."
)
params = ""
nodes_names = list_nodes_min()
nodes_uuids = list_items("vms", True, "UUID").keys()
if name:
if name not in nodes_names:
raise CommandExecutionError(
"The specified VM ({}) is not registered.".format(name)
)
params += " " + name
elif uuid:
if uuid not in nodes_uuids:
raise CommandExecutionError(
"The specified VM ({}) is not registered.".format(name)
)
params += " " + uuid
if snapshot_name and snapshot_uuid:
raise CommandExecutionError(
"Either a snapshot_name or a snapshot_uuid may be specified, but not both"
)
if snapshot_name:
if NAME_RE.search(snapshot_name):
raise CommandExecutionError("Snapshot name contains invalid characters")
params += " --snapshot {}".format(snapshot_name)
elif snapshot_uuid:
if UUID_RE.search(snapshot_uuid):
raise CommandExecutionError("Snapshot name contains invalid characters")
params += " --snapshot {}".format(snapshot_uuid)
valid_modes = ("machine", "machineandchildren", "all")
if mode and mode not in valid_modes:
raise CommandExecutionError(
'Mode must be one of: {} (default "machine")'.format(", ".join(valid_modes))
)
else:
params += " --mode " + mode
valid_options = ("link", "keepallmacs", "keepnatmacs", "keepdisknames")
if options and options not in valid_options:
raise CommandExecutionError(
"If specified, options must be one of: {}".format(", ".join(valid_options))
)
else:
params += " --options " + options
if new_name:
if NAME_RE.search(new_name):
raise CommandExecutionError("New name contains invalid characters")
params += " --name {}".format(new_name)
if groups:
if isinstance(groups, str):
groups = [groups]
if isinstance(groups, list):
params += " --groups {}".format(",".join(groups))
else:
raise CommandExecutionError(
"groups must be either a string or a list of strings"
)
if basefolder:
if not os.path.exists(basefolder):
raise CommandExecutionError(
"basefolder {} was not found".format(basefolder)
)
params += " --basefolder {}".format(basefolder)
if new_uuid:
if NAME_RE.search(new_uuid):
raise CommandExecutionError("New UUID contains invalid characters")
params += " --uuid {}".format(new_uuid)
if register is True:
params += " --register"
cmd = "{} clonevm {}".format(vboxcmd(), name)
ret = salt.modules.cmdmod.run_all(cmd)
if ret["retcode"] == 0:
return True
return ret["stderr"]
def clonemedium(
medium,
uuid_in=None,
file_in=None,
uuid_out=None,
file_out=None,
mformat=None,
variant=None,
existing=False,
**kwargs
):
"""
Clone a new VM from an existing VM
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.clonemedium <name> <new_name>
"""
params = ""
valid_mediums = ("disk", "dvd", "floppy")
if medium in valid_mediums:
params += medium
else:
raise CommandExecutionError(
"Medium must be one of: {}.".format(", ".join(valid_mediums))
)
if (uuid_in and file_in) or (not uuid_in and not file_in):
raise CommandExecutionError(
"Either uuid_in or file_in must be used, but not both."
)
if uuid_in:
if medium == "disk":
item = "hdds"
elif medium == "dvd":
item = "dvds"
elif medium == "floppy":
item = "floppies"
items = list_items(item)
if uuid_in not in items:
raise CommandExecutionError("UUID {} was not found".format(uuid_in))
params += " " + uuid_in
elif file_in:
if not os.path.exists(file_in):
raise CommandExecutionError("File {} was not found".format(file_in))
params += " " + file_in
if (uuid_out and file_out) or (not uuid_out and not file_out):
raise CommandExecutionError(
"Either uuid_out or file_out must be used, but not both."
)
if uuid_out:
params += " " + uuid_out
elif file_out:
try:
# pylint: disable=resource-leakage
salt.utils.files.fopen(file_out, "w").close()
# pylint: enable=resource-leakage
os.unlink(file_out)
params += " " + file_out
except OSError:
raise CommandExecutionError("{} is not a valid filename".format(file_out))
if mformat:
valid_mformat = ("VDI", "VMDK", "VHD", "RAW")
if mformat not in valid_mformat:
raise CommandExecutionError(
"If specified, mformat must be one of: {}".format(
", ".join(valid_mformat)
)
)
else:
params += " --format " + mformat
valid_variant = ("Standard", "Fixed", "Split2G", "Stream", "ESX")
if variant and variant not in valid_variant:
if not os.path.exists(file_in):
raise CommandExecutionError(
"If specified, variant must be one of: {}".format(
", ".join(valid_variant)
)
)
else:
params += " --variant " + variant
if existing:
params += " --existing"
cmd = "{} clonemedium {}".format(vboxcmd(), params)
ret = salt.modules.cmdmod.run_all(cmd)
if ret["retcode"] == 0:
return True
return ret["stderr"]
def list_items(item, details=False, group_by="UUID"):
"""
Return a list of a specific type of item. The following items are available:
vms
runningvms
ostypes
hostdvds
hostfloppies
intnets
bridgedifs
hostonlyifs
natnets
dhcpservers
hostinfo
hostcpuids
hddbackends
hdds
dvds
floppies
usbhost
usbfilters
systemproperties
extpacks
groups
webcams
screenshotformats
CLI Example:
.. code-block:: bash
salt 'hypervisor' vboxmanage.items <item>
salt 'hypervisor' vboxmanage.items <item> details=True
salt 'hypervisor' vboxmanage.items <item> details=True group_by=Name
Some items do not display well, or at all, unless ``details`` is set to
``True``. By default, items are grouped by the ``UUID`` field, but not all
items contain that field. In those cases, another field must be specified.
"""
types = (
"vms",
"runningvms",
"ostypes",
"hostdvds",
"hostfloppies",
"intnets",
"bridgedifs",
"hostonlyifs",
"natnets",
"dhcpservers",
"hostinfo",
"hostcpuids",
"hddbackends",
"hdds",
"dvds",
"floppies",
"usbhost",
"usbfilters",
"systemproperties",
"extpacks",
"groups",
"webcams",
"screenshotformats",
)
if item not in types:
raise CommandExecutionError("Item must be one of: {}.".format(", ".join(types)))
flag = ""
if details is True:
flag = " -l"
ret = {}
tmp_id = None
tmp_dict = {}
cmd = "{} list{} {}".format(vboxcmd(), flag, item)
for line in salt.modules.cmdmod.run(cmd).splitlines():
if not line.strip():
continue
comps = line.split(":")
if len(comps) < 1:
continue
if tmp_id is not None:
ret[tmp_id] = tmp_dict
line_val = ":".join(comps[1:]).strip()
if comps[0] == group_by:
tmp_id = line_val
tmp_dict = {}
tmp_dict[comps[0]] = line_val
return ret
|
|
"""
Tools for dealing with ROMS model output
See Octant project as well
Created on Fri Mar 08 15:09:46 2013
@author: mrayson
"""
import numpy as np
from netCDF4 import Dataset, MFDataset, num2date
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from scipy import interpolate
# Private modules
from interpXYZ import interpXYZ
import othertime
from timeseries import timeseries
import operator
from maptools import ll2lcc
from mygeometry import MyLine
import pdb
try:
from octant.slice import isoslice
except:
print 'Warning - could not import octant package.'
import pdb
class roms_grid(object):
"""
Class for ROMS grid
"""
def __init__(self,ncfile):
self.grdfile = ncfile
self.readGrid()
def readGrid(self):
"""
Read in the main grid variables from the grid netcdf file
"""
try:
nc = MFDataset(self.grdfile, 'r')
except:
nc = Dataset(self.grdfile, 'r')
varnames = ['angle','lon_rho','lat_rho','lon_psi','lat_psi','lon_u','lat_u',\
'lon_v','lat_v','h','f','mask_rho','mask_psi','mask_u','mask_v','pm','pn']
for vv in varnames:
try:
setattr(self,vv,nc.variables[vv][:])
except:
print 'Cannot find variable: %s'%vv
nc.close()
def Writefile(self,outfile,verbose=True):
"""
Writes subsetted grid and coordinate variables to a netcdf file
Code modified from roms.py in the Octant package
"""
self.outfile = outfile
Mp, Lp = self.lon_rho.shape
M, L = self.lon_psi.shape
N = self.s_rho.shape[0] # vertical layers
xl = self.lon_rho[self.mask_rho==1.0].ptp()
el = self.lat_rho[self.mask_rho==1.0].ptp()
# Write ROMS grid to file
nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')
nc.Description = 'ROMS subsetted history file'
nc.Author = ''
nc.Created = datetime.now().isoformat()
nc.type = 'ROMS HIS file'
nc.createDimension('xi_rho', Lp)
nc.createDimension('xi_u', L)
nc.createDimension('xi_v', Lp)
nc.createDimension('xi_psi', L)
nc.createDimension('eta_rho', Mp)
nc.createDimension('eta_u', Mp)
nc.createDimension('eta_v', M)
nc.createDimension('eta_psi', M)
nc.createDimension('s_rho', N)
nc.createDimension('s_w', N+1)
nc.createDimension('ocean_time', None)
nc.createVariable('xl', 'f8', ())
nc.variables['xl'].units = 'meters'
nc.variables['xl'] = xl
nc.createVariable('el', 'f8', ())
nc.variables['el'].units = 'meters'
nc.variables['el'] = el
nc.createVariable('spherical', 'S1', ())
nc.variables['spherical'] = 'F'
def write_nc_var(var, name, dimensions, units=None):
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
nc.variables[name][:] = var
if verbose:
print ' ... wrote ', name
# Grid variables
write_nc_var(self.angle, 'angle', ('eta_rho', 'xi_rho'))
write_nc_var(self.h, 'h', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.f, 'f', ('eta_rho', 'xi_rho'), 'seconds-1')
write_nc_var(self.mask_rho, 'mask_rho', ('eta_rho', 'xi_rho'))
write_nc_var(self.mask_u, 'mask_u', ('eta_u', 'xi_u'))
write_nc_var(self.mask_v, 'mask_v', ('eta_v', 'xi_v'))
write_nc_var(self.mask_psi, 'mask_psi', ('eta_psi', 'xi_psi'))
write_nc_var(self.lon_rho, 'lon_rho', ('eta_rho', 'xi_rho'), 'degrees')
write_nc_var(self.lat_rho, 'lat_rho', ('eta_rho', 'xi_rho'), 'degrees')
write_nc_var(self.lon_u, 'lon_u', ('eta_u', 'xi_u'), 'degrees')
write_nc_var(self.lat_u, 'lat_u', ('eta_u', 'xi_u'), 'degrees')
write_nc_var(self.lon_v, 'lon_v', ('eta_v', 'xi_v'), 'degrees')
write_nc_var(self.lat_v, 'lat_v', ('eta_v', 'xi_v'), 'degrees')
write_nc_var(self.lon_psi, 'lon_psi', ('eta_psi', 'xi_psi'), 'degrees')
write_nc_var(self.lat_psi, 'lat_psi', ('eta_psi', 'xi_psi'), 'degrees')
write_nc_var(self.pm, 'pm', ('eta_rho', 'xi_rho'), 'degrees')
write_nc_var(self.pn, 'pn', ('eta_rho', 'xi_rho'), 'degrees')
# Vertical coordinate variables
write_nc_var(self.s_rho, 's_rho', ('s_rho',))
write_nc_var(self.s_w, 's_w', ('s_w',))
write_nc_var(self.Cs_r, 'Cs_r', ('s_rho',))
write_nc_var(self.Cs_w, 'Cs_w', ('s_w',))
write_nc_var(self.hc, 'hc', ())
write_nc_var(self.Vstretching, 'Vstretching', ())
write_nc_var(self.Vtransform, 'Vtransform', ())
nc.sync()
def nc_add_dimension(self,outfile,name,length):
"""
Add a dimension to an existing netcdf file
"""
nc = Dataset(outfile, 'a')
nc.createDimension(name, length)
nc.close()
def nc_add_var(self,outfile,data,name,dimensions,units=None,long_name=None,coordinates=None):
"""
Add a new variable and write the data
"""
nc = Dataset(outfile, 'a')
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
if coordinates is not None:
nc.variables[name].coordinates = coordinates
if long_name is not None:
nc.variables[name].long_name = long_name
nc.variables[name][:] = data.copy()
nc.sync()
nc.close()
def nc_add_varnodata(self,outfile,name,dimensions,units=None,long_name=None,coordinates=None):
"""
Add a new variable and doesn't write the data
"""
nc = Dataset(outfile, 'a')
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
if coordinates is not None:
nc.variables[name].coordinates = coordinates
if long_name is not None:
nc.variables[name].long_name = long_name
nc.close()
def findNearset(self,x,y,grid='rho'):
"""
Return the J,I indices of the nearst grid cell to x,y
"""
if grid == 'rho':
lon = self.lon_rho
lat = self.lat_rho
elif grid == 'u':
lon = self.lon_u
lat = self.lat_u
elif grid =='v':
lon = self.lon_v
lat = self.lat_v
elif grid =='psi':
lon = self.lon_psi
lat = self.lat_psi
dist = np.sqrt( (lon - x)**2 + (lat - y)**2)
return np.argwhere(dist==dist.min())
def utmconversion(self,lon,lat,utmzone,isnorth):
"""
Convert the ROMS grid to utm coordinates
"""
from maptools import ll2utm
M,N = lon.shape
xy = ll2utm(np.hstack((np.reshape(lon,(M*N,1)),np.reshape(lat,(M*N,1)))),utmzone,north=isnorth)
return np.reshape(xy[:,0],(M,N)), np.reshape(xy[:,1],(M,N))
class ROMS(roms_grid):
"""
General class for reading and plotting ROMS model output
"""
varname = 'zeta'
JRANGE = None
IRANGE = None
zlayer = False # True load z layer, False load sigma layer
K = [0] # Layer to extract, 0 bed, -1 surface, -99 all
tstep = [0] # - 1 last step, -99 all time steps
clim = None # Plot limits
def __init__(self,romsfile,**kwargs):
self.__dict__.update(kwargs)
self.romsfile = romsfile
# Load the grid
roms_grid.__init__(self,self.romsfile)
# Open the netcdf object
self._openNC()
# Load the time information
try:
self._loadTime()
except:
print 'No time variable.'
# Check the spatial indices of the variable
self._loadVarCoords()
self.listCoordVars()
self._checkCoords(self.varname)
# Check the vertical coordinates
self._readVertCoords()
self._checkVertCoords(self.varname)
def listCoordVars(self):
"""
List all of the variables that have the 'coordinate' attribute
"""
self.coordvars=[]
for vv in self.nc.variables.keys():
if hasattr(self.nc.variables[vv],'coordinates'):
#print '%s - %s'%(vv,self.nc.variables[vv].long_name)
self.coordvars.append(vv)
return self.coordvars
def loadData(self,varname=None,tstep=None):
"""
Loads model data from the netcdf file
"""
if varname == None:
varname=self.varname
self._checkCoords(varname)
else:
self._checkCoords(varname)
if self.ndim == 4:
self._checkVertCoords(varname)
if tstep == None:
tstep = self.tstep
if self.ndim==1:
data = self.nc.variables[varname][tstep]
elif self.ndim == 2:
data = self.nc.variables[varname][self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
elif self.ndim == 3:
data = self.nc.variables[varname][tstep,self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
elif self.ndim == 4:
data = self.nc.variables[varname][tstep,self.K,self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
if self.ndim == 4 and self.zlayer==True:
# Slice along z layers
print 'Extracting data along z-coordinates...'
dataz = np.zeros((len(tstep),)+self.Z.shape+self.X.shape)
for ii,tt in enumerate(tstep):
#Z = self.calcDepth(zeta=self.loadData(varname='zeta',tstep=[tt]))
Z = self.calcDepth()[:,self.JRANGE[0]:self.JRANGE[1],\
self.IRANGE[0]:self.IRANGE[1]].squeeze()
if len(Z.shape) > 1:
dataz[ii,:,:] = isoslice(data[ii,:,:,:].squeeze(),Z,self.Z)
else:
# Isoslice won't work on 1-D arrays
F = interpolate.interp1d(Z,data[ii,:,:,:].squeeze(),bounds_error=False)
dataz[ii,:,:] = F(self.Z)[:,np.newaxis,np.newaxis]
data = dataz
#self._checkCoords(self.varname)
# Reduce rank
self.data = data.squeeze()
return self.data
def loadTimeSeries(self,x,y,z=None,varname=None,trange=None):
"""
Load a time series at point x,y
Set z=None to load all layers, else load depth
"""
if varname == None:
self.varname = self.varname
else:
self.varname = varname
self._checkCoords(self.varname)
if self.ndim == 4:
self._checkVertCoords(self.varname)
if z == None:
self.zlayer=False
self.K = [-99]
else:
self.zlayer=True
self.K = [z]
if trange==None:
tstep=np.arange(0,self.Nt)
# Set the index range to grab
JI = self.findNearset(x,y,grid=self.gridtype)
self.JRANGE = [JI[0][0], JI[0][0]+1]
self.IRANGE = [JI[0][1], JI[0][1]+1]
if self.zlayer:
Zout = z
else:
# Return the depths at each time step
h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()
zeta=self.loadData(varname='zeta',tstep=tstep)
h = h*np.ones(zeta.shape)
Zout = get_depth(self.S,self.C,self.hc,h,zeta=zeta, Vtransform=self.Vtransform).squeeze()
return self.loadData(varname=varname,tstep=tstep), Zout
def calcDepth(self,zeta=None):
"""
Calculates the depth array for the current variable
"""
#h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()
if self.gridtype == 'rho':
h = self.h
elif self.gridtype == 'psi':
h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])
elif self.gridtype == 'u':
h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])
elif self.gridtype == 'v':
h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])
return get_depth(self.S,self.C,self.hc,h,zeta=zeta, Vtransform=self.Vtransform).squeeze()
def depthInt(self,var,grid='rho',cumulative=False):
"""
Depth-integrate data in variable, var (array [Nz, Ny, Nx])
Set cumulative = True for cumulative integration i.e. for pressure calc.
"""
sz = var.shape
if not sz[0] == self.Nz:
raise Exception, 'length of dimension 0 must equal %d (currently %d)'%(self.Nz,sz[0])
if not len(sz)==3:
raise Exception, 'only 3-D arrays are supported.'
if grid == 'rho':
h = self.h
elif grid == 'psi':
h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])
elif grid == 'u':
h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])
elif grid == 'v':
h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])
z_w = get_depth(self.s_w,self.Cs_w,self.hc,h,Vtransform=self.Vtransform).squeeze()
dz = np.diff(z_w,axis=0)
if cumulative:
return np.cumsum(dz*var,axis=0)
else:
return np.sum(dz*var,axis=0)
def depthAvg(self,var,grid='rho'):
"""
Depth-average data in variable, var (array [Nz, Ny, Nx])
"""
sz = var.shape
if not sz[0] == self.Nz:
raise Exception, 'length of dimension 0 must equal %d (currently %d)'%(self.Nz,sz[0])
if not len(sz)==3:
raise Exception, 'only 3-D arrays are supported.'
if grid == 'rho':
h = self.h
elif grid == 'psi':
h = 0.5 (self.h[1:,1:] + self.h[0:-1,0:-1])
elif grid == 'u':
h = 0.5 (self.h[:,1:] + self.h[:,0:-1])
elif grid == 'v':
h = 0.5 (self.h[1:,:] + self.h[0:-1,:])
z_w = get_depth(self.s_w,self.Cs_w,self.hc,h,Vtransform=self.Vtransform).squeeze()
dz = np.diff(z_w,axis=0)
return np.sum(dz*var,axis=0) / h
def areaInt(self,var,grid='rho'):
"""
Calculate the area integral of var
"""
if grid == 'rho':
dx = 1.0/self.pm
dy = 1.0/self.pn
elif grid == 'psi':
dx = 1.0/(0.5*(self.pm[1:,1:] + self.pm[0:-1,0:-1]))
dy = 1.0/(0.5*(self.pn[1:,1:] + self.pn[0:-1,0:-1]))
elif grid == 'u':
dx = 1.0/(0.5 * (self.pm[:,1:] + self.pm[:,0:-1]))
dy = 1.0/(0.5 * (self.pn[:,1:] + self.pn[:,0:-1]))
elif grid == 'v':
dx = 0.5 * (self.pm[1:,:] + self.pm[0:-1,:])
dy = 0.5 * (self.pn[1:,:] + self.pn[0:-1,:])
A = dx*dy
return np.sum(var*A)
def gradZ(self,var,grid='rho',cumulative=False):
"""
Depth-gradient of data in variable, var (array [Nz, Ny, Nx])
"""
sz = var.shape
#print sz
if not sz[0] == self.Nz:
raise Exception, 'length of dimension 0 must equal %d (currently %d)'%(self.Nz,sz[0])
h = self.h[self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]].squeeze()
z_r = get_depth(self.s_rho,self.Cs_r,self.hc,h,Vtransform=self.Vtransform).squeeze()
dz = np.diff(z_r,axis=0)
dz_mid = 0.5 * (dz[1:,...] + dz[0:-1,...]) # N-2
var_mid = 0.5 * (var[1:,...] + var[0:-1,...])
dv_dz = np.zeros(sz)
# 2-nd order mid-points
dv_dz[1:-1,...] = (var_mid[1:,...] - var_mid[0:-1,...]) / dz_mid
# 1st order end points
dv_dz[0,...] = (var[1,...] - var[0,...]) / dz[0,...]
dv_dz[-1,...] = (var[-1,...] - var[-2,...]) / dz[-1,...]
return dv_dz
def MLD(self,tstep,thresh=-0.006,z_max=-20.0):
"""
Mixed layer depth calculation
thresh is the density gradient threshold
z_max is the min mixed layer depth
"""
# Load the density data
self.K=[-99]
drho_dz=self.gradZ(self.loadData(varname='rho',tstep=tstep))
# Mask drho_dz where z >= z_max
z = self.calcDepth()
mask = z >= z_max
drho_dz[mask] = 0.0
#
mld_ind = np.where(drho_dz <= thresh)
zout = -99999.0*np.ones(z.shape)
zout[mld_ind[0],mld_ind[1],mld_ind[2]] = z[mld_ind[0],mld_ind[1],mld_ind[2]]
mld = np.max(zout,axis=0)
# Isoslice averages when there is more than one value
#mld = isoslice(z,drho_dz,thresh)
mld = np.max([mld,-self.h],axis=0)
return mld
def MLDmask(self,mld,grid='rho'):
"""
Compute a 3D mask for variables beneath the mixed layer
"""
if grid == 'rho':
h = self.h
elif grid == 'psi':
h = 0.5 * (self.h[1:,1:] + self.h[0:-1,0:-1])
mld =0.5 * (mld[1:,1:] + mld[0:-1,0:-1])
elif grid == 'u':
h = 0.5 * (self.h[:,1:] + self.h[:,0:-1])
mld = 0.5 * (mld[:,1:] + mld[:,0:-1])
elif grid == 'v':
h = 0.5 * (self.h[1:,:] + self.h[0:-1,:])
mld = 0.5 * (mld[1:,:] + mld[0:-1,:])
z = get_depth(self.s_rho,self.Cs_r,self.hc,h,Vtransform=self.Vtransform).squeeze()
mask = np.zeros(z.shape)
for jj in range(mld.shape[0]):
for ii in range(mld.shape[1]):
ind = z[:,jj,ii] >= mld[jj,ii]
if np.size(ind)>0:
mask[ind,jj,ii]=1.0
return mask
def pcolor(self,data=None,titlestr=None,colorbar=True,ax=None,fig=None,**kwargs):
"""
Pcolor plot of the data in variable
"""
if data==None:
data=self.loadData()
if self.clim==None:
clim=[data.min(),data.max()]
else:
clim=self.clim
if fig==None:
fig = plt.gcf()
if ax==None:
ax = fig.gca()
p1 = ax.pcolormesh(self.X,self.Y,data,vmin=clim[0],vmax=clim[1],**kwargs)
ax.set_aspect('equal')
if colorbar:
plt.colorbar(p1)
if titlestr==None:
plt.title(self._genTitle(self.tstep[0]))
else:
plt.title(titlestr)
return p1
def contourf(self, data=None, clevs=20, titlestr=None,colorbar=True,**kwargs):
"""
contour plot of the data in variable
"""
if data==None:
data=self.loadData()
if self.clim==None:
clim=[data.min(),data.max()]
else:
clim=self.clim
fig = plt.gcf()
ax = fig.gca()
p1 = plt.contourf(self.X,self.Y,data,clevs,vmin=clim[0],vmax=clim[1],**kwargs)
ax.set_aspect('equal')
if colorbar:
plt.colorbar(p1)
if titlestr==None:
plt.title(self._genTitle(self.tstep[0]))
else:
plt.title(titlestr)
return p1
def contourbathy(self,clevs=np.arange(0,3000,100),**kwargs):
p1 = plt.contour(self.lon_rho,self.lat_rho,self.h,clevs,**kwargs)
return p1
def getTstep(self,tstart,tend,timeformat='%Y%m%d.%H%M'):
"""
Returns a vector of the time indices between tstart and tend
tstart and tend can be string with format=timeformat ['%Y%m%d.%H%M' - default]
Else tstart and tend can be datetime objects
"""
try:
t0 = datetime.strptime(tstart,timeformat)
t1 = datetime.strptime(tend,timeformat)
except:
# Assume the time is already in datetime format
t0 = tstart
t1 = tend
n1 = othertime.findNearest(t0,self.time)
n2 = othertime.findNearest(t1,self.time)
if n1==n2:
return [n1,n2]
else:
return range(n1,n2)
def _genTitle(self,tstep):
"""
Generates a title for plots
"""
if self.zlayer:
titlestr = '%s [%s]\nz: %6.1f m, %s'%(self.long_name,self.units,self.Z,datetime.strftime(self.time[tstep],'%d-%b-%Y %H:%M:%S'))
else:
titlestr = '%s [%s]\nsigma[%d], %s'%(self.long_name,self.units,self.K[0],datetime.strftime(self.time[tstep],'%d-%b-%Y %H:%M:%S'))
return titlestr
def _checkCoords(self,varname):
"""
Load the x and y coordinates of the present variable, self.varname
"""
#print 'updating coordinate info...'
# check if the variable is in the file to begin
if varname not in self.coordvars:
print 'Warning - variable %s not in file'%varname
varname=self.coordvars[0]
self.varname=varname
C = self.varcoords[varname].split()
self.ndim = len(C)
if self.ndim==1:
return
self.xcoord = C[0]
self.ycoord = C[1]
if self.JRANGE==None:
self.JRANGE = [0,self[self.xcoord].shape[0]+1]
if self.IRANGE==None:
self.IRANGE = [0,self[self.xcoord].shape[1]+1]
# Check the dimension size
if self.JRANGE[1] > self[self.xcoord].shape[0]+1:
print 'Warning JRANGE outside of size range. Setting equal size.'
self.JRANGE[1] = self[self.xcoord].shape[0]+1
if self.IRANGE[1] > self[self.xcoord].shape[1]+1:
print 'Warning JRANGE outside of size range. Setting equal size.'
self.IRANGE[1] = self[self.xcoord].shape[1]+1
self.X = self[self.xcoord][self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
self.Y = self[self.ycoord][self.JRANGE[0]:self.JRANGE[1],self.IRANGE[0]:self.IRANGE[1]]
self.xlims = [self.X.min(),self.X.max()]
self.ylims = [self.Y.min(),self.Y.max()]
# Load the long_name and units from the variable
try:
self.long_name = self.nc.variables[varname].long_name
except:
self.long_name = varname
try:
self.units = self.nc.variables[varname].units
except:
self.units = ' '
# Set the grid type
if self.xcoord[-3:]=='rho':
self.gridtype='rho'
self.mask=self.mask_rho
elif self.xcoord[-3:]=='n_u':
self.gridtype='u'
self.mask=self.mask_u
elif self.xcoord[-3:]=='n_v':
self.gridtype='v'
self.mask=self.mask_v
def _checkVertCoords(self,varname):
"""
Load the vertical coordinate info
"""
# First put K into a list
#if not type(self.K)=='list':
# self.K = [self.K]
try:
K = self.K[0] # a list
self.K = self.K
except:
# not a list
self.K = [self.K]
C = self.varcoords[varname].split()
ndim = len(C)
if ndim == 4:
self.zcoord = C[2]
self.Nz = len(self[self.zcoord])
if self.K[0] == -99:
self.K = range(0,self.Nz)
if self.zlayer==True: # Load all layers when zlayer is true
self.Z = np.array(self.K)
self.K = range(0,self.Nz)
if self.zcoord == 's_rho':
self.S = self.s_rho[self.K]
self.C = self.Cs_r[self.K]
elif self.zcoord == 's_w':
self.S = self.s_w[self.K]
self.C = self.Cs_w[self.K]
def _readVertCoords(self):
"""
Read the vertical coordinate information
"""
nc = self.nc
self.Cs_r = nc.variables['Cs_r'][:]
self.Cs_w = nc.variables['Cs_w'][:]
self.s_rho = nc.variables['s_rho'][:]
self.s_w = nc.variables['s_w'][:]
self.hc = nc.variables['hc'][:]
self.Vstretching = nc.variables['Vstretching'][:]
self.Vtransform = nc.variables['Vtransform'][:]
def _loadVarCoords(self):
"""
Load the variable coordinates into a dictionary
"""
self.varcoords={}
for vv in self.nc.variables.keys():
if hasattr(self.nc.variables[vv],'coordinates'):
self.varcoords.update({vv:self.nc.variables[vv].coordinates})
def _openNC(self):
"""
Load the netcdf object
"""
try:
self.nc = MFDataset(self.romsfile)
except:
self.nc = Dataset(self.romsfile, 'r')
def _loadTime(self):
"""
Load the netcdf time as a vector datetime objects
"""
#nc = Dataset(self.ncfile, 'r', format='NETCDF4')
nc = self.nc
t = nc.variables['ocean_time']
self.time = num2date(t[:],t.units)
self.Nt = np.size(self.time)
def __getitem__(self,y):
x = self.__dict__.__getitem__(y)
return x
def __setitem__(self,key,value):
if key == 'varname':
self.varname=value
self._checkCoords(value)
else:
self.__dict__[key]=value
class ROMSLagSlice(ROMS):
"""ROMS Lagrangian slice class"""
def __init__(self,x,y,time,width,nwidth,romsfile,**kwargs):
# Load the ROMS file
ROMS.__init__(self,romsfile,**kwargs)
# Clip points outside of the time and domain limits
self._clip_points(x,y,time)
# Create an array with the slice coordinates
self._create_slice_coords(width,nwidth)
# Reproject coordinates into distance along- and across-track
self._project_coords()
def __call__(self,varname):
"""
Load the variable name and interpolate onto all time steps
"""
# Load the data
self.loadData(varname=varname,tstep=range(self.Nt))
# Interpolate onto the time step
self.slicedata=np.zeros((self.Nt,self.ntrack,self.nwidth))
print 'Interpolating slice data...'
for tt in range(self.Nt):
#print 'Interpolating step %d of %d...'%(tt,self.Nt)
self.slicedata[tt,...]=\
self.interp(self.data[tt,...].squeeze())
def interp(self,phi):
"""
Interpolate onto the lagrangian grid
"""
if self.xcoord == 'lon_rho':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Frho'):
xy = np.array([self.lon_rho.ravel(),self.lat_rho.ravel()]).T
Frho = interpXYZ(xy, xyout)
F = Frho
elif self.xcoord=='lon_psi':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Fpsi'):
xy = np.array([self.lon_psi.ravel(),self.lat_psi.ravel()]).T
Fpsi = interpXYZ(xy, xyout)
F = Fpsi
data = F(phi.ravel())
return data.reshape((self.ntrack,self.nwidth))
def tinterp(self,dt):
"""
Interpolate from the lagrangian grid to the timestep along the track
at t = t0 + dt
"""
# Find the high and low indices
tlow = np.zeros((self.ntrack,),np.int16)
thigh = np.zeros((self.ntrack,),np.int16)
for ii in range(self.ntrack):
ind = np.argwhere(self.track_tsec[ii]+dt>=self.tsec)
if ind.size>0:
tlow[ii]=ind[-1]
else:
tlow[ii]=0
thigh[ii] = min(tlow[ii]+1,self.Nt)
# Calculate the interpolation weights
w1 =\
(self.track_tsec+dt-self.tsec[tlow])/(self.tsec[thigh]-self.tsec[tlow])
w1 = np.repeat(w1[...,np.newaxis],self.nwidth,axis=-1)
return (1.-w1)*self.slicedata[tlow,range(self.ntrack),:] +\
w1*self.slicedata[thigh,range(self.ntrack),:]
def project(self,lon,lat):
"""
Projects the coordinates in lon/lat into lagrangian coordinates
"""
xyin = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
xy = np.array([lon,lat]).T
if len(xy.shape)==1:
xy = xy[np.newaxis,...]
F = interpXYZ(xyin, xy)
return F(self.Xalong.ravel()), F(self.Ycross.ravel())
def pcolor(self,z,**kwargs):
scale=0.001
X = self.Xalong*scale
Y = self.Ycross*scale
ax=plt.gca()
h=plt.pcolormesh(X,Y,z,**kwargs)
ax.set_xlim([X.min(),X.max()])
ax.set_ylim([Y.min(),Y.max()])
return h
def contour(self,z,VV,filled=True,**kwargs):
scale=0.001
X = self.Xalong*scale
Y = self.Ycross*scale
ax=plt.gca()
if filled:
h=plt.contourf(X,Y,z,VV,**kwargs)
else:
h=plt.contour(X,Y,z,VV,**kwargs)
ax.set_xlim([X.min(),X.max()])
ax.set_ylim([Y.min(),Y.max()])
return h
def _clip_points(self,x,y,time):
time = np.array(time)
# Convert both times and check it is inside of the time domain
self.tsec = othertime.SecondsSince(self.time,basetime=self.time[0])
ttrack = othertime.SecondsSince(time,basetime=self.time[0])
indtime = operator.and_(ttrack>=0,ttrack<=self.tsec[-1])
# Check for points inside of the spatial domain
indx = operator.and_(x>=self.X.min(),x<=self.X.max())
indy = operator.and_(y>=self.Y.min(),y<=self.Y.max())
indxy = operator.and_(indx,indy)
ind = operator.and_(indtime,indxy)
self.track_time=time[ind]
self.track_tsec = othertime.SecondsSince(self.track_time,basetime=self.time[0])
self.track_x = x[ind]
self.track_y = y[ind]
self.ntrack = self.track_x.shape[0]
def _create_slice_coords(self,width,nwidth):
"""
Create the lagrangian coordinates
These are for interpolation
"""
self.centreline= MyLine([[self.track_x[ii],self.track_y[ii]]\
for ii in range(self.ntrack)])
# Compute the normalized distance along the line
normdist = (self.track_tsec-self.track_tsec[0])\
/(self.track_tsec[-1]-self.track_tsec[0])
#P = line.perpendicular(0.4,1.)
perplines = [self.centreline.perpline(normdist[ii],width) \
for ii in range(self.ntrack)]
self.nwidth=nwidth
# Initialize the output coordinates
self.lonslice = np.zeros((self.ntrack,self.nwidth))
self.latslice = np.zeros((self.ntrack,self.nwidth))
for ii,ll in enumerate(perplines):
points = ll.multipoint(self.nwidth)
for jj,pp in enumerate(points):
self.lonslice[ii,jj]=pp.x
self.latslice[ii,jj]=pp.y
def _project_coords(self):
"""
Project the slice into along and across track coordinates
These coordinates are for plotting only
"""
def dist(x,x0,y,y0):
return np.sqrt( (x-x0)**2. + (y-y0)**2. )
# Convert the slice to lambert conformal
LL = np.array([self.lonslice.ravel(),self.latslice.ravel()])
XY = ll2lcc(LL.T)
xslice = XY[:,0].reshape((self.ntrack,self.nwidth))
yslice = XY[:,1].reshape((self.ntrack,self.nwidth))
# Get the mid-point of the line and calculate the along-track distance
xmid = xslice[:,self.nwidth//2]
ymid = yslice[:,self.nwidth//2]
along_dist = np.zeros((self.ntrack,))
along_dist[1:] = np.cumsum(dist(xmid[1:],xmid[:-1],ymid[1:],ymid[:-1]))
# Get the across track distance
xend = xslice[0,:]
yend = yslice[0,:]
acrossdist = np.zeros((self.nwidth,))
acrossdist[1:] = np.cumsum(dist(xend[1:],xend[:-1],yend[1:],yend[:-1]))
acrossdist -= acrossdist.mean()
self.Ycross,self.Xalong =np.meshgrid(acrossdist,along_dist)
def interp(self,phi):
"""
Interpolate onto the lagrangian grid
"""
if self.xcoord == 'lon_rho':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Frho'):
xy = np.array([self.lon_rho.ravel(),self.lat_rho.ravel()]).T
Frho = interpXYZ(xy, xyout)
F = Frho
elif self.xcoord=='lon_psi':
xyout = np.array([self.lonslice.ravel(),self.latslice.ravel()]).T
if not self.__dict__.has_key('Fpsi'):
xy = np.array([self.lon_psi.ravel(),self.lat_psi.ravel()]).T
Fpsi = interpXYZ(xy, xyout)
F = Fpsi
data = F(phi.ravel())
return data.reshape((self.ntrack,self.nwidth))
class ROMSslice(ROMS):
"""
Class for slicing ROMS data
"""
def __init__(self,ncfile,lon,lat,**kwargs):
"""
"""
ROMS.__init__(self,ncfile,**kwargs)
self.xyout = np.array([lon,lat]).T
self.nslice = self.xyout.shape[0]
def __call__(self,varname):
# Load the data
dataslice = self.loadData(varname=varname)
ndim = dataslice.ndim
# Create the interpolation object
self.xy = np.array([self.X.ravel(),self.Y.ravel()]).T
self.F = interpXYZ(self.xy,self.xyout)
Nt = len(self.tstep)
Nk = len(self.K)
# Interpolate onto the output data
data = np.zeros((Nt,Nk,self.nslice))
if ndim == 2:
return self.F(dataslice.ravel())
elif Nt>1 and Nk==1:
for tt in range(Nt):
data[tt,:,:] = self.F(dataslice[tt,:,:].ravel())
elif Nk>1 and Nt==1:
for kk in range(Nk):
data[:,kk,:] = self.F(dataslice[:,kk,:].ravel())
else: # 4D array
for kk in range(Nk):
for tt in range(Nt):
data[tt,kk,:] = self.F(dataslice[tt,kk,:,:].ravel())
data[data>1e36]=0.
return data.squeeze()
def lagrangian(self,varname,time):
"""
Lagrangian slice
Returns all of the data at each point along the slice with the
starting point for each slice beginning at time.
"""
self.tstep = range(self.Nt)
data = self.__call__(varname)
# Find the start time index
t0 = [self.getTstep(tt,tt)[0] for tt in time]
nt = self.Nt - min(t0)
sz = (nt,)+data.shape[1::]
dataout = np.zeros(sz)
for ii in range(self.nslice):
t1 = self.Nt-t0[ii]
dataout[0:t1,...,ii] = data[t0[ii]::,...,ii]
return dataout
class roms_timeseries(ROMS, timeseries):
"""
Class for loading a timeseries object from ROMS model output
"""
IJ = False
varname = 'u'
zlayer=False
def __init__(self,ncfile,XY,z=None,**kwargs):
"""
Loads a time series from point X,Y. Set z = None (default) to load all layers
if self.IJ = True, loads index X=I, Y=J directly
"""
self.__dict__.update(kwargs)
self.XY = XY
self.z = z
# Initialise the class
ROMS.__init__(self,ncfile,varname=self.varname,K=[-99])
self.tstep = range(0,self.Nt) # Load all time steps
self.update()
def update(self):
"""
Updates the class
"""
#
self._checkCoords(self.varname)
# Load I and J indices from the coordinates
self.setIJ(self.XY)
# Load the vertical coordinates
if not self.z == None:
self.zlayer = True
if self.zlayer == False:
if self.ndim==4:
self.Z = self.calcDepth()[:,self.JRANGE[0]:self.JRANGE[1],\
self.IRANGE[0]:self.IRANGE[1]].squeeze()
else:
self.Z = self.z
# Load the data into a time series object
timeseries.__init__(self,self.time[self.tstep],self.loadData())
def contourf(self,clevs=20,**kwargs):
"""
z-t contour plot of the time series
"""
h1 = plt.contourf(self.time[self.tstep],self.Z,self.y.T,clevs,**kwargs)
#plt.colorbar()
plt.xticks(rotation=17)
return h1
def setIJ(self,xy):
if self.IJ:
I0 = xy[0]
J0 = xy[1]
else:
ind = self.findNearset(xy[0],xy[1],grid=self.gridtype)
J0=ind[0][0]
I0=ind[0][1]
self.JRANGE = [J0,J0+1]
self.IRANGE = [I0,I0+1]
def __setitem__(self,key,value):
if key == 'varname':
self.varname=value
self.update()
elif key == 'XY':
self.XY = value
self.update()
else:
self.__dict__[key]=value
class roms_subset(roms_grid):
"""
Class for subsetting ROMS output
"""
gridfile = None
def __init__(self,ncfiles,bbox,timelims,**kwargs):
self.__dict__.update(kwargs)
if self.gridfile==None:
self.gridfile=ncfiles[0]
self.ncfiles = ncfiles
self.x0 = bbox[0]
self.x1 = bbox[1]
self.y0 = bbox[2]
self.y1 = bbox[3]
# Step 1) Find the time steps
self.t0 = datetime.strptime(timelims[0],'%Y%m%d%H%M%S')
self.t1 = datetime.strptime(timelims[1],'%Y%m%d%H%M%S')
# Multifile object
ftime = MFncdap(ncfiles,timevar='ocean_time')
ind0 = othertime.findNearest(self.t0,ftime.time)
ind1 = othertime.findNearest(self.t1,ftime.time)
self.time = ftime.time[ind0:ind1]
self.tind,self.fname = ftime(self.time) # list of time indices and corresponding files
self.Nt = len(self.tind)
# Step 2) Subset the grid variables
roms_grid.__init__(self,self.gridfile)
self.SubsetGrid()
# Step 3) Read the vertical coordinate variables
self.ReadVertCoords()
def SubsetGrid(self):
"""
Subset the grid variables
"""
#Find the grid indices
ind = self.findNearset(self.x0,self.y0)
self.J0=ind[0][0]
self.I0=ind[0][1]
ind = self.findNearset(self.x1,self.y1)
self.J1=ind[0][0]
self.I1=ind[0][1]
# Define the dimensions
M = self.J1-self.J0
N = self.I1-self.I0
self.eta_rho = M
self.xi_rho = N
self.eta_psi = M-1
self.xi_psi = N-1
self.eta_u = M-1
self.xi_u = N
self.eta_v = M
self.xi_v = N-1
# Subset the horizontal coordinates
self.lon_rho = self.lon_rho[self.J0:self.J1,self.I0:self.I1]
self.lat_rho = self.lat_rho[self.J0:self.J1,self.I0:self.I1]
self.mask_rho = self.mask_rho[self.J0:self.J1,self.I0:self.I1]
self.lon_psi = self.lon_psi[self.J0:self.J1-1,self.I0:self.I1-1]
self.lat_psi = self.lat_psi[self.J0:self.J1-1,self.I0:self.I1-1]
self.mask_psi = self.mask_psi[self.J0:self.J1-1,self.I0:self.I1-1]
self.lon_u = self.lon_u[self.J0:self.J1-1,self.I0:self.I1]
self.lat_u = self.lat_u[self.J0:self.J1-1,self.I0:self.I1]
self.mask_u = self.mask_u[self.J0:self.J1-1,self.I0:self.I1]
self.lon_v = self.lon_v[self.J0:self.J1,self.I0:self.I1-1]
self.lat_v = self.lat_v[self.J0:self.J1,self.I0:self.I1-1]
self.mask_v = self.mask_v[self.J0:self.J1,self.I0:self.I1-1]
self.h = self.h[self.J0:self.J1,self.I0:self.I1]
self.angle = self.angle[self.J0:self.J1,self.I0:self.I1]
def ReadVertCoords(self):
"""
"""
nc = Dataset(self.fname[0])
self.Cs_r = nc.variables['Cs_r'][:]
#self.Cs_w = nc.variables['Cs_w'][:]
self.s_rho = nc.variables['s_rho'][:]
#self.s_w = nc.variables['s_w'][:]
self.hc = nc.variables['hc'][:]
self.Vstretching = nc.variables['Vstretching'][:]
self.Vtransform = nc.variables['Vtransform'][:]
nc.close()
def ReadData(self,tstep):
"""
Reads the data from the file for the present time step
"""
fname = self.fname[tstep]
t0 = self.tind[tstep]
print 'Reading data at time: %s...'%datetime.strftime(self.time[tstep],'%Y-%m-%d %H:%M:%S')
nc = Dataset(fname)
self.ocean_time = nc.variables['ocean_time'][t0]
self.zeta = nc.variables['zeta'][t0,self.J0:self.J1,self.I0:self.I1]
self.temp = nc.variables['temp'][t0,:,self.J0:self.J1,self.I0:self.I1]
self.salt = nc.variables['salt'][t0,:,self.J0:self.J1,self.I0:self.I1]
self.u = nc.variables['u'][t0,:,self.J0:self.J1-1,self.I0:self.I1]
self.v = nc.variables['v'][t0,:,self.J0:self.J1,self.I0:self.I1-1]
nc.close()
def Writefile(self,outfile,verbose=True):
"""
Writes subsetted grid and coordinate variables to a netcdf file
Code modified from roms.py in the Octant package
"""
self.outfile = outfile
Mp, Lp = self.lon_rho.shape
M, L = self.lon_psi.shape
N = self.s_rho.shape[0] # vertical layers
pdb.set_trace()
xl = self.lon_rho[self.mask_rho==1.0].ptp()
el = self.lat_rho[self.mask_rho==1.0].ptp()
# Write ROMS grid to file
nc = Dataset(outfile, 'w', format='NETCDF3_CLASSIC')
nc.Description = 'ROMS subsetted history file'
nc.Author = ''
nc.Created = datetime.now().isoformat()
nc.type = 'ROMS HIS file'
nc.createDimension('xi_rho', Lp)
nc.createDimension('xi_u', Lp)
nc.createDimension('xi_v', L)
nc.createDimension('xi_psi', L)
nc.createDimension('eta_rho', Mp)
nc.createDimension('eta_u', M)
nc.createDimension('eta_v', Mp)
nc.createDimension('eta_psi', M)
nc.createDimension('s_rho', N)
nc.createDimension('ocean_time', None)
nc.createVariable('xl', 'f8', ())
nc.variables['xl'].units = 'meters'
nc.variables['xl'] = xl
nc.createVariable('el', 'f8', ())
nc.variables['el'].units = 'meters'
nc.variables['el'] = el
nc.createVariable('spherical', 'S1', ())
nc.variables['spherical'] = 'F'
def write_nc_var(var, name, dimensions, units=None):
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
nc.variables[name][:] = var
if verbose:
print ' ... wrote ', name
def create_nc_var(name, dimensions, units=None):
nc.createVariable(name, 'f8', dimensions)
if units is not None:
nc.variables[name].units = units
if verbose:
print ' ... wrote ', name
# Grid variables
write_nc_var(self.angle, 'angle', ('eta_rho', 'xi_rho'))
write_nc_var(self.h, 'h', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.mask_rho, 'mask_rho', ('eta_rho', 'xi_rho'))
write_nc_var(self.mask_u, 'mask_u', ('eta_u', 'xi_u'))
write_nc_var(self.mask_v, 'mask_v', ('eta_v', 'xi_v'))
write_nc_var(self.mask_psi, 'mask_psi', ('eta_psi', 'xi_psi'))
write_nc_var(self.lon_rho, 'lon_rho', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.lat_rho, 'lat_rho', ('eta_rho', 'xi_rho'), 'meters')
write_nc_var(self.lon_u, 'lon_u', ('eta_u', 'xi_u'), 'meters')
write_nc_var(self.lat_u, 'lat_u', ('eta_u', 'xi_u'), 'meters')
write_nc_var(self.lon_v, 'lon_v', ('eta_v', 'xi_v'), 'meters')
write_nc_var(self.lat_v, 'lat_v', ('eta_v', 'xi_v'), 'meters')
write_nc_var(self.lon_psi, 'lon_psi', ('eta_psi', 'xi_psi'), 'meters')
write_nc_var(self.lat_psi, 'lat_psi', ('eta_psi', 'xi_psi'), 'meters')
# Vertical coordinate variables
write_nc_var(self.s_rho, 's_rho', ('s_rho'))
write_nc_var(self.Cs_r, 'Cs_r', ('s_rho'))
write_nc_var(self.hc, 'hc', ())
write_nc_var(self.Vstretching, 'Vstretching', ())
write_nc_var(self.Vtransform, 'Vtransform', ())
# Create the data variables
create_nc_var('ocean_time',('ocean_time'),'seconds since 1970-01-01 00:00:00')
create_nc_var('zeta',('ocean_time','eta_rho','xi_rho'),'meter')
create_nc_var('salt',('ocean_time','s_rho','eta_rho','xi_rho'),'psu')
create_nc_var('temp',('ocean_time','s_rho','eta_rho','xi_rho'),'degrees C')
create_nc_var('u',('ocean_time','s_rho','eta_u','xi_u'),'meter second-1')
create_nc_var('v',('ocean_time','s_rho','eta_v','xi_v'),'meter second-1')
nc.close()
def Writedata(self, tstep):
nc = Dataset(self.outfile, 'a')
nc.variables['ocean_time'][tstep]=self.ocean_time
nc.variables['zeta'][tstep,:,:]=self.zeta
nc.variables['salt'][tstep,:,:,:]=self.salt
nc.variables['temp'][tstep,:,:,:]=self.temp
nc.variables['u'][tstep,:,:,:]=self.u
nc.variables['v'][tstep,:,:,:]=self.v
nc.close()
def Go(self):
"""
Downloads and append each time step to a file
"""
for ii in range(0,self.Nt):
self.ReadData(ii)
self.Writedata(ii)
print '##################\nDone!\n##################'
class roms_interp(roms_grid):
"""
Class for intperpolating ROMS output in space and time
"""
utmzone = 15
isnorth = True
# Interpolation options
interpmethod='idw' # 'nn', 'idw', 'kriging', 'griddata'
NNear=3
p = 1.0 # power for inverse distance weighting
# kriging options
varmodel = 'spherical'
nugget = 0.1
sill = 0.8
vrange = 250.0
def __init__(self,romsfile, xi, yi, zi, timei, **kwargs):
self.__dict__.update(kwargs)
self.romsfile = romsfile
self.xi = xi
self.yi = yi
self.zi = zi
self.timei = timei
# Step 1) Find the time steps
self.t0 = timei[0]
self.t1 = timei[-1]
# Multifile object
ftime = MFncdap(self.romsfile,timevar='ocean_time')
ind0 = othertime.findNearest(self.t0,ftime.time)
ind1 = othertime.findNearest(self.t1,ftime.time)
self.time = ftime.time[ind0:ind1+1]
self.tind,self.fname = ftime(self.time) # list of time indices and corresponding files
# Step 2) Prepare the grid variables for the interpolation class
roms_grid.__init__(self,self.romsfile[0])
# rho points
x,y = self.utmconversion(self.lon_rho,self.lat_rho,self.utmzone,self.isnorth)
self.xy_rho = np.vstack((x[self.mask_rho==1],y[self.mask_rho==1])).T
# uv point (averaged onto interior rho points)
self.mask_uv = self.mask_rho[0:-1,0:-1]
x = x[0:-1,0:-1]
y = y[0:-1,0:-1]
self.xy_uv = np.vstack((x[self.mask_uv==1],y[self.mask_uv==1])).T
# Step 3) Build the interpolants for rho and uv points
#self.xy_out = np.hstack((xi,yi))
#self.xy_out = np.hstack((xi[...,np.newaxis],yi[...,np.newaxis]))
self.xy_out = np.vstack((xi.ravel(),yi.ravel())).T
self.Frho = interpXYZ(self.xy_rho,self.xy_out,method=self.interpmethod,NNear=self.NNear,\
p=self.p,varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,vrange=self.vrange)
self.Fuv = interpXYZ(self.xy_uv,self.xy_out,method=self.interpmethod,NNear=self.NNear,\
p=self.p,varmodel=self.varmodel,nugget=self.nugget,sill=self.sill,vrange=self.vrange)
# Read the vertical coordinate
self.ReadVertCoords()
# Dimesions sizes
self.Nx = self.xy_out.shape[0]
self.Nz = self.zi.shape[0]
self.Nt = len(self.timei)
self.Nz_roms = self.s_rho.shape[0]
self.Nt_roms = self.time.shape[0]
def interp(self,zinterp='linear',tinterp='linear',setUV=True,seth=True):
"""
Performs the interpolation in this order:
1) Interpolate onto the horizontal coordinates
2) Interpolate onto the vertical coordinates
3) Interpolate onto the time coordinates
"""
# Initialise the output arrays @ roms time step
zetaroms, temproms, saltroms, uroms, vroms = self.initArrays(self.Nt_roms,self.Nx,self.Nz)
tempold = np.zeros((self.Nz_roms,self.Nx))
saltold = np.zeros((self.Nz_roms,self.Nx))
uold = np.zeros((self.Nz_roms,self.Nx))
vold = np.zeros((self.Nz_roms,self.Nx))
# Interpolate h
h = self.Frho(self.h[self.mask_rho==1])
# Loop through each time step
for tstep in range(0,self.Nt_roms):
# Read all variables
self.ReadData(tstep)
# Interpolate zeta
if seth:
zetaroms[tstep,:] = self.Frho(self.zeta[self.mask_rho==1])
# Interpolate other 3D variables
for k in range(0,self.Nz_roms):
tmp = self.temp[k,:,:]
tempold[k,:] = self.Frho(tmp[self.mask_rho==1])
tmp = self.salt[k,:,:]
saltold[k,:] = self.Frho(tmp[self.mask_rho==1])
if setUV:
tmp = self.u[k,:,:]
uold[k,:] = self.Fuv(tmp[self.mask_uv==1])
tmp = self.v[k,:,:]
vold[k,:] = self.Fuv(tmp[self.mask_uv==1])
####added by dongyu, adding a low-pass filter####
vft=5
uold[abs(uold)>vft] = 0.0
vold[abs(vold)>vft] = 0.0
#pdb.set_trace()
# Calculate depths (zeta dependent)
#zroms = get_depth(self.s_rho,self.Cs_r,self.hc, h, zetaroms[tstep,:], Vtransform=self.Vtransform)
zroms = get_depth(self.s_rho,self.Cs_r,self.hc, h, zeta=zetaroms[tstep,:], Vtransform=self.Vtransform)
#pdb.set_trace()
# Interpolate vertically
for ii in range(0,self.Nx):
y = tempold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
temproms[tstep,:,ii] = Fz(self.zi)
y = saltold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
saltroms[tstep,:,ii] = Fz(self.zi)
if setUV:
y = uold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
uroms[tstep,:,ii] = Fz(self.zi)
y = vold[:,ii]
Fz = interpolate.interp1d(zroms[:,ii],y,kind=zinterp,bounds_error=False,fill_value=y[0])
vroms[tstep,:,ii] = Fz(self.zi)
#pdb.set_trace()
# End time loop
# Initialise the output arrays @ output time step
# Interpolate temporally
if self.Nt_roms > 1:
print 'Temporally interpolating ROMS variables...'
troms = othertime.SecondsSince(self.time)
tout = othertime.SecondsSince(self.timei)
if seth:
print '\tzeta...'
Ft = interpolate.interp1d(troms,zetaroms,axis=0,kind=tinterp,bounds_error=False)
zetaout = Ft(tout)
else:
zetaout=-1
print '\ttemp...'
Ft = interpolate.interp1d(troms,temproms,axis=0,kind=tinterp,bounds_error=False)
tempout = Ft(tout)
print '\tsalt...'
Ft = interpolate.interp1d(troms,saltroms,axis=0,kind=tinterp,bounds_error=False)
saltout = Ft(tout)
if setUV:
print '\tu...'
Ft = interpolate.interp1d(troms,uroms,axis=0,kind=tinterp,bounds_error=False)
uout = Ft(tout)
print '\tv...'
Ft = interpolate.interp1d(troms,vroms,axis=0,kind=tinterp,bounds_error=False)
vout = Ft(tout)
else:
uout = vout = -1
#pdb.set_trace()
else:
zetaout = zetaroms
tempout = temproms
saltout = saltroms
uout = uroms
vout = vroms
return zetaout, tempout, saltout, uout, vout
def initArrays(self,Nt,Nx,Nz):
zetaout = np.zeros((Nt,Nx))
tempout = np.zeros((Nt,Nz,Nx))
saltout = np.zeros((Nt,Nz,Nx))
uout = np.zeros((Nt,Nz,Nx))
vout = np.zeros((Nt,Nz,Nx))
return zetaout, tempout, saltout, uout, vout
def ReadData(self,tstep):
"""
Reads the data from the file for the present time step
"""
fname = self.fname[tstep]
t0 = self.tind[tstep]
print 'Interpolating data at time: %s of %s...'%(datetime.strftime(self.time[tstep],'%Y-%m-%d %H:%M:%S'),\
datetime.strftime(self.time[-1],'%Y-%m-%d %H:%M:%S'))
nc = Dataset(fname)
self.ocean_time = nc.variables['ocean_time'][t0]
self.zeta = nc.variables['zeta'][t0,:,:]
self.temp = nc.variables['temp'][t0,:,:,:]
self.salt = nc.variables['salt'][t0,:,:,:]
u = nc.variables['u'][t0,:,:,:]
v = nc.variables['v'][t0,:,:,:]
nc.close()
# Rotate the vectors
self.u,self.v = rotateUV( (u[...,:,0:-1]+u[...,:,1::])*0.5,(v[...,0:-1,:]+v[...,1::,:])*0.5,self.angle[0:-1,0:-1])
def ReadVertCoords(self):
"""
"""
nc = Dataset(self.romsfile[0])
self.Cs_r = nc.variables['Cs_r'][:]
#self.Cs_w = nc.variables['Cs_w'][:]
self.s_rho = nc.variables['s_rho'][:]
#self.s_w = nc.variables['s_w'][:]
self.hc = nc.variables['hc'][:]
self.Vstretching = nc.variables['Vstretching'][:]
self.Vtransform = nc.variables['Vtransform'][:]
nc.close()
class MFncdap(object):
"""
Multi-file class for opendap netcdf files
MFDataset module is not compatible with opendap data
"""
timevar = 'time'
def __init__(self,ncfilelist,**kwargs):
self.__dict__.update(kwargs)
self.timelookup = {}
self.time = np.zeros((0,))
for f in ncfilelist:
print f
nc = Dataset(f)
t = nc.variables[self.timevar]
time = num2date(t[:],t.units)
nc.close()
self.timelookup.update({f:time})
self.time = np.hstack((self.time,np.asarray(time)))
self.time = np.asarray(self.time)
def __call__(self,time):
"""
Return the filenames and time index of the closest time
"""
fname = []
tind =[]
for t in time:
flag=1
for f in self.timelookup.keys():
if t >= self.timelookup[f][0] and t<=self.timelookup[f][-1]:
# print 'Found tstep %s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')
tind.append(othertime.findNearest(t,self.timelookup[f][:]))
fname.append(f)
flag=0
# if flag:
# print 'Warning - could not find matching file for time:%s'%datetime.strptime(t,'%Y-%m-%d %H:%M:%S')
# tind.append(-1)
# fname.append(-1)
return tind, fname
def get_depth(S,C,hc,h,zeta=None, Vtransform=1):
"""
Calculates the sigma coordinate depth
"""
if zeta == None:
zeta = 0.0*h
N = len(S)
#Nj,Ni = np.size(h)
shp = (N,)+h.shape
z = np.zeros(shp)
if Vtransform == 1:
for k in range(0,N):
z0 = (S[k]-C[k])*hc + C[k]*h
z[k,...] = z0 + (zeta *(1.0 + z0/h))
elif Vtransform == 2:
for k in range(0,N):
z0 = (hc*S[k]+C[k]*h)/(hc+h)
z[k,...] = zeta + (zeta+h)*z0
return z
def rotateUV(uroms,vroms,ang):
"""
Rotates ROMS output vectors to cartesian u,v
"""
u = uroms*np.cos(ang) - vroms*np.sin(ang)
v = uroms*np.sin(ang) + vroms*np.cos(ang)
return u,v
###############
## Testing
##grdfile = 'http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6_grid/txla_grd_v4_new.nc'
#grdfile = 'C:\\Projects\\GOMGalveston\\MODELLING\\ROMS\\txla_grd_v4_new.nc'
##grd = roms_grid(grdfile)
#
##ncfiles = ['http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6/ocean_his_%04d.nc'%i for i in range(1,3)]
##MF = MFncdap(ncfiles,timevar='ocean_time')
##
##tsteps = [datetime(2003,2,16)+timedelta(hours=i*4) for i in range(0,24)]
##tind,fname = MF(tsteps)
#
#ncfiles = ['http://barataria.tamu.edu:8080/thredds/dodsC/txla_nesting6/ocean_his_%04d.nc'%i for i in range(100,196)]
#timelims = ('20090501000000','20090701000000')
##timelims = ('20090501000000','20090502000000')
#bbox = [-95.53,-94.25,28.3,30.0]
#
#roms = roms_subset(ncfiles,bbox,timelims,gridfile=grdfile)
#outfile = 'C:\\Projects\\GOMGalveston\\MODELLING\\ROMS\\txla_subset_HIS_MayJun2009.nc'
#roms.Writefile(outfile)
#roms.Go()
#
##roms2 = roms_subset([outfile],bbox,timelims)
|
|
"""Main structure class for MathML formatting."""
import sys, contextmakers, measurers, generators
from xml import sax
from nodelocator import NodeLocator
def isHighSurrogate(ch):
"""Tests whether a Unicode character is from the high surrogates range"""
code = ord(ch)
return (0xD800 <= code and code <= 0xDBFF)
def isLowSurrogate(ch):
"""Tests whether a Unicode character is from the low surrogates range"""
code = ord(ch)
return (0xDC00 <= code and code < 0xDFFF)
def decodeSurrogatePair(hi, lo):
"""Returns a scalar value that corresponds to a surrogate pair"""
return ((ord(hi) - 0xD800) * 0x400) + (ord(lo) - 0xDC00) + 0x10000
globalDefaults = {
# Font and color properties
u"mathvariant": u"normal",
u"mathsize": u"12pt",
u"mathcolor": u"black",
u"mathbackground": u"transparent",
u"displaystyle": u"false",
u"scriptlevel": u"0",
# Script size factor and minimum value
u"scriptsizemultiplier": u"0.71",
u"scriptminsize": u"8pt",
# Spaces
u"veryverythinmathspace": u"0.0555556em",
u"verythinmathspace": u"0.111111em",
u"thinmathspace": u"0.166667em",
u"mediummathspace": u"0.222222em",
u"thickmathspace": u"0.277778em",
u"verythickmathspace": u"0.333333em",
u"veryverythickmathspace": u"0.388889em",
# Line thickness and slope for mfrac
u"linethickness": "1",
u"bevelled": u"false",
u"enumalign": u"center",
u"denomalign": u"center",
# String quotes for ms
u"lquote": u"\"",
u"rquote": u"\"",
# Properties for mspace
u"height": u"0ex",
u"depth": u"0ex",
u"width": u"0em",
# Properties for mfenced
u"open": u"(",
u"close": u")",
u"separators": u",",
# Property for menclose
u"notation": u"longdiv",
# Properties for mtable
u"align": u"axis",
u"rowalign": u"baseline",
u"columnalign": u"center",
u"columnwidth": u"auto",
u"equalrows": u"false",
u"equalcolumns": u"false",
u"rowspacing": u"1.0ex",
u"columnspacing": u"0.8em",
u"framespacing": u"0.4em 0.5ex",
u"rowlines": u"none",
u"columnlines": u"none",
u"frame": u"none"
}
specialChars = { u'\u2145': u'D',
u'\u2146': u'd',
u'\u2147': u'e',
u'\u2148': u'i',
u'\u00A0': u' ' }
class FontMetricRecord:
"""Structure to track usage of a single font family"""
def __init__(self, family, metric):
self.family = family
self.metric = metric
self.used = False
class MathNode:
"""MathML node descriptor.
This class defines properties and methods that permit to building blocks
to combine with each other, creating a complex mathematical expression.
It uses dynamic binding to find methods to process specific MathML
elements: these methods are contained in three other modules -
contextmakers, measurers, and generators.
"""
def __init__(self, elementName, attributes, locator, config, parent):
self.elementName = elementName
self.config = config
if locator is not None:
self.locator = locator
elif parent is not None: # handy when we add nodes in preprocessing
self.locator = parent.locator
else:
self.locator = NodeLocator(None)
self.text = u''
self.children = []
self.attributes = attributes
self.parent = parent
self.metriclist = None
self.nominalMetric = None
if parent is not None:
self.nodeIndex = len(parent.children)
self.defaults = parent.defaults
parent.children.append(self)
else:
self.defaults = globalDefaults.copy()
self.defaults.update(config.defaults)
self.nodeIndex = 0
def makeContext (self):
contextmakers.__dict__.get(u"context_"+self.elementName,
contextmakers.default_context)(self)
def makeChildContext (self, child):
contextmakers.__dict__.get(u"child_context_"+self.elementName,
contextmakers.default_child_context)(self, child)
def measure(self):
# Create the context for the node
self.makeContext()
# Measure all children
for ch in self.children: ch.measure()
# Perform node-specific measurement
self.measureNode()
def measureNode(self):
measureMethod = measurers.__dict__.get(u"measure_"+self.elementName,
measurers.default_measure)
if self.config.verbose and measureMethod is measurers.default_measure:
self.warning("MathML element '%s' is unsupported" % self.elementName)
measureMethod(self)
def draw (self, output):
generators.__dict__.get(u"draw_"+self.elementName,
generators.default_draw)(self, output)
def makeImage(self, output):
if self.elementName != 'math':
self.warning("Root element in MathML document must be 'math'")
self.measure()
generators.drawImage(self, output)
def warning(self, msg):
self.locator.message(msg, "WARNING")
def error(self, msg):
self.locator.message(msg, "ERROR")
def info(self, msg):
if self.config.verbose: self.locator.message(msg, "INFO")
def debug(self, event, msg):
if event.strip() in self.config.debug: self.locator.message(msg, "DEBUG")
def parseInt (self, x):
try: return int(x, 10)
except TypeError:
self.error("Cannot parse string '%s' as an integer" % str(x))
return 0
def parseFloat (self, x):
try: value = float(x)
except ValueError:
self.error("Cannot parse string '%s' as a float" % str(x))
return 0.0
text = str(value).lower()
if text.find("nan") >= 0 or text.find("inf") >= 0:
self.error("Cannot parse string '%s' as a float" % str(x))
return 0.0
return value
def parseLength(self, lenattr, unitlessScale = 0.75):
lenattr = lenattr.strip()
if lenattr.endswith("pt"):
return self.parseFloat(lenattr[:-2])
elif lenattr.endswith("mm"):
return self.parseFloat(lenattr[:-2]) * 72.0 / 25.4
elif lenattr.endswith("cm"):
return self.parseFloat(lenattr[:-2]) * 72.0 / 2.54
elif lenattr.endswith("in"):
return self.parseFloat(lenattr[:-2]) * 72.0
elif lenattr.endswith("pc"):
return self.parseFloat(lenattr[:-2]) * 12.0
elif lenattr.endswith("px"):
# pixels are calculated for 96 dpi
return self.parseFloat(lenattr[:-2]) * 72.0 / 96.0
elif lenattr.endswith("em"):
return self.parseFloat(lenattr[:-2]) * self.fontSize
elif lenattr.endswith("ex"):
return self.parseFloat(lenattr[:-2]) * self.fontSize * self.metric().xheight
else:
# unitless lengths are treated as if expressed in pixels
return self.parseFloat(lenattr) * unitlessScale
def parseSpace(self, spaceattr, unitlessScale = 0.75):
sign = 1.0
spaceattr = spaceattr.strip()
if spaceattr.endswith(u"mathspace"):
if spaceattr.startswith(u"negative"):
sign = -1.0
spaceattr = spaceattr[8:]
realspaceattr = self.defaults.get(spaceattr);
if realspaceattr is None:
self.error("Bad space token: '%s'" % spaceattr)
realspaceattr = "0em"
return self.parseLength(realspaceattr, unitlessScale)
else:
return self.parseLength(spaceattr, unitlessScale)
def parsePercent(self, lenattr, percentBase):
value = self.parseFloat(lenattr[:-1])
if value is not None: return percentBase * value / 100
else: return 0
def parseLengthOrPercent(self, lenattr, percentBase, unitlessScale = 0.75):
if lenattr.endswith(u"%"): return self.parsePercent(lenattr, percentBase)
else: return self.parseLength(lenattr, unitlessScale)
def parseSpaceOrPercent(self, lenattr, percentBase, unitlessScale = 0.75):
if lenattr.endswith(u"%"): return self.parsePercent(lenattr, percentBase)
else: return self.parseSpace(lenattr, unitlessScale)
def getProperty(self, key, defvalue = None):
return self.attributes.get(key, self.defaults.get(key, defvalue))
def getListProperty(self, attr, value = None):
if value is None: value = self.getProperty(attr)
splitvalue = value.split()
if len(splitvalue) > 0: return splitvalue
self.error("Bad value for '%s' attribute: empty list" % attr)
return self.defaults[attr].split()
def getUCSText(self):
codes = []
hisurr = None
for ch in self.text:
chcode = ord(ch)
# Processing surrogate pairs
if isLowSurrogate(ch):
if hisurr is None:
self.error("Invalid Unicode sequence - low surrogate character (U+%X) not preceded by a high surrogate" % ord(ch))
else:
chcode = decodeSurrogatePair(hisurr, ch)
hisurr = None
if hisurr is not None:
self.error("Invalid Unicode sequence - high surrogate character (U+%X) not followed by a low surrogate" % ord(hisurr))
hisurr = None
if isHighSurrogate(ch):
hisurr = ch; continue
codes.append(chcode)
if hisurr is not None:
self.error("Invalid Unicode sequence - high surrogate character (U+%X) not followed by a low surrogate" % ord(hisurr))
return codes
def fontpool(self):
if self.metriclist is None:
def fillMetricList(familylist):
metriclist = []
for family in familylist:
metric = self.config.findfont(self.fontweight, self.fontstyle, family)
if metric is not None:
metriclist.append(FontMetricRecord(family, metric))
if len(metriclist) == 0:
self.warning("Cannot find any font metric for family "+(", ".join(familylist)))
return None
else: return metriclist
self.metriclist = fillMetricList(self.fontfamilies)
if self.metriclist is None:
self.fontfamilies = self.config.fallbackFamilies
self.metriclist = fillMetricList(self.fontfamilies)
if self.metriclist is None:
self.error("Fatal error: cannot find any font metric for the node; fallback font families misconfiguration")
raise sax.SAXException("Fatal error: cannot find any font metric for the node")
return self.metriclist
def metric(self):
if self.nominalMetric is None:
self.nominalMetric = self.fontpool()[0].metric
for fd in self.metriclist:
if fd.used:
self.nominalMetric = fd.metric; break
return self.nominalMetric
def axis(self):
return self.metric().axisposition * self.fontSize
def nominalLineWidth(self):
return self.metric().rulewidth * self.fontSize
def nominalThinStrokeWidth(self):
return 0.04 * self.originalFontSize
def nominalMediumStrokeWidth(self):
return 0.06 * self.originalFontSize
def nominalThickStrokeWidth(self):
return 0.08 * self.originalFontSize
def nominalLineGap(self):
return self.metric().vgap * self.fontSize
def nominalAscender(self):
return self.metric().ascender * self.fontSize
def nominalDescender(self):
return (- self.metric().descender * self.fontSize)
def hasGlyph(self, ch):
for fdesc in self.fontpool():
if fdesc.metric.chardata.get(ch) is not None:
return True
return False
def findChar(self, ch):
for fd in self.fontpool():
cm = fd.metric.chardata.get(ch)
if cm: return (cm, fd)
else:
if 0 < ch and ch < 0xFFFF and unichr(ch) in specialChars.keys():
return self.findChar(ord(specialChars[unichr(ch)]))
self.warning("Glyph U+%X not found" % ch)
return None
def measureText(self):
"""Measures text contents of a node"""
if len(self.text) == 0:
self.isSpace = True; return
cm0 = None; cm1 = None;
ucstext = self.getUCSText()
for chcode in ucstext:
chardesc = self.findChar(chcode)
if chardesc is None:
self.width += self.metric().missingGlyph.width
else:
(cm, fd) = chardesc
fd.used = True
if chcode == ucstext[0]: cm0 = cm
if chcode == ucstext[-1]: cm1 = cm
self.width += cm.width
if self.height + self.depth == 0:
self.height = cm.bbox[3]
self.depth = - cm.bbox[1]
elif cm.bbox[3] != cm.bbox[1]: # exclude space
self.height = max (self.height, cm.bbox[3])
self.depth = max (self.depth, - cm.bbox[1])
# Normalize to the font size
self.width *= self.fontSize
self.depth *= self.fontSize
self.height *= self.fontSize
# Add ascender/descender values
self.ascender = self.nominalAscender()
self.descender = self.nominalDescender()
# Shape correction
if cm0 is not None: self.leftBearing = max(0, - cm0.bbox[0]) * self.fontSize
if cm1 is not None: self.rightBearing = max(0, cm1.bbox[2] - cm.width) * self.fontSize
self.width += self.leftBearing + self.rightBearing
# Reset nominal metric
self.nominalMetric = None
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import os
import platform
import subprocess
import sys
from distutils.command.build import build
import pkg_resources
from setuptools import find_packages, setup
from setuptools.command.install import install
from setuptools.command.test import test
base_dir = os.path.dirname(__file__)
about = {}
with open(os.path.join(base_dir, "cryptography", "__about__.py")) as f:
exec(f.read(), about)
SETUPTOOLS_DEPENDENCY = "setuptools"
CFFI_DEPENDENCY = "cffi>=0.8"
SIX_DEPENDENCY = "six>=1.4.1"
VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__'])
requirements = [
CFFI_DEPENDENCY,
SIX_DEPENDENCY,
SETUPTOOLS_DEPENDENCY
]
# If you add a new dep here you probably need to add it in the tox.ini as well
test_requirements = [
"pytest",
"pyasn1",
"pretend",
"iso8601",
]
# If there's no vectors locally that probably means we are in a tarball and
# need to go and get the matching vectors package from PyPi
if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")):
test_requirements.append(VECTORS_DEPENDENCY)
def cc_is_available():
return sys.platform == "darwin" and list(map(
int, platform.mac_ver()[0].split("."))) >= [10, 8, 0]
backends = [
"openssl = cryptography.hazmat.backends.openssl:backend"
]
if cc_is_available():
backends.append(
"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend",
)
def get_ext_modules():
from cryptography.hazmat.bindings.commoncrypto.binding import (
Binding as CommonCryptoBinding
)
from cryptography.hazmat.bindings.openssl.binding import (
Binding as OpenSSLBinding
)
from cryptography.hazmat.primitives import constant_time, padding
ext_modules = [
OpenSSLBinding().ffi.verifier.get_extension(),
constant_time._ffi.verifier.get_extension(),
padding._ffi.verifier.get_extension()
]
if cc_is_available():
ext_modules.append(CommonCryptoBinding().ffi.verifier.get_extension())
return ext_modules
class CFFIBuild(build):
"""
This class exists, instead of just providing ``ext_modules=[...]`` directly
in ``setup()`` because importing cryptography requires we have several
packages installed first.
By doing the imports here we ensure that packages listed in
``setup_requires`` are already installed.
"""
def finalize_options(self):
self.distribution.ext_modules = get_ext_modules()
build.finalize_options(self)
class CFFIInstall(install):
"""
As a consequence of CFFIBuild and it's late addition of ext_modules, we
need the equivalent for the ``install`` command to install into platlib
install-dir rather than purelib.
"""
def finalize_options(self):
self.distribution.ext_modules = get_ext_modules()
install.finalize_options(self)
class PyTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
# This means there's a vectors/ folder with the package in here.
# cd into it, install the vectors package and then refresh sys.path
if VECTORS_DEPENDENCY not in test_requirements:
subprocess.check_call(
[sys.executable, "setup.py", "install"], cwd="vectors"
)
pkg_resources.get_distribution("cryptography_vectors").activate()
def run_tests(self):
# Import here because in module scope the eggs are not loaded.
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
def keywords_with_side_effects(argv):
"""
Get a dictionary with setup keywords that (can) have side effects.
:param argv: A list of strings with command line arguments.
:returns: A dictionary with keyword arguments for the ``setup()`` function.
This setup.py script uses the setuptools 'setup_requires' feature because
this is required by the cffi package to compile extension modules. The
purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
build process as a result of setup.py invocations that don't need the cffi
module to be built (setup.py serves the dual purpose of exposing package
metadata).
All of the options listed by ``python setup.py --help`` that print
information should be recognized here. The commands ``clean``,
``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
Any combination of these options and commands is also supported.
This function was originally based on the `setup.py script`_ of SciPy (see
also the discussion in `pip issue #25`_).
.. _pip issue #25: https://github.com/pypa/pip/issues/25
.. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
"""
no_setup_requires_arguments = (
'-h', '--help',
'-n', '--dry-run',
'-q', '--quiet',
'-v', '--verbose',
'-V', '--version',
'--author',
'--author-email',
'--classifiers',
'--contact',
'--contact-email',
'--description',
'--egg-base',
'--fullname',
'--help-commands',
'--keywords',
'--licence',
'--license',
'--long-description',
'--maintainer',
'--maintainer-email',
'--name',
'--no-user-cfg',
'--obsoletes',
'--platforms',
'--provides',
'--requires',
'--url',
'clean',
'egg_info',
'register',
'sdist',
'upload',
)
def is_short_option(argument):
"""Check whether a command line argument is a short option."""
return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
def expand_short_options(argument):
"""Expand combined short options into canonical short options."""
return ('-' + char for char in argument[1:])
def argument_without_setup_requirements(argv, i):
"""Check whether a command line argument needs setup requirements."""
if argv[i] in no_setup_requires_arguments:
# Simple case: An argument which is either an option or a command
# which doesn't need setup requirements.
return True
elif (is_short_option(argv[i]) and
all(option in no_setup_requires_arguments
for option in expand_short_options(argv[i]))):
# Not so simple case: Combined short options none of which need
# setup requirements.
return True
elif argv[i - 1:i] == ['--egg-base']:
# Tricky case: --egg-info takes an argument which should not make
# us use setup_requires (defeating the purpose of this code).
return True
else:
return False
if all(argument_without_setup_requirements(argv, i)
for i in range(1, len(argv))):
return {
"cmdclass": {
"build": DummyCFFIBuild,
"install": DummyCFFIInstall,
"test": DummyPyTest,
}
}
else:
return {
"setup_requires": requirements,
"cmdclass": {
"build": CFFIBuild,
"install": CFFIInstall,
"test": PyTest,
}
}
setup_requires_error = ("Requested setup command that needs 'setup_requires' "
"while command line arguments implied a side effect "
"free command or option.")
class DummyCFFIBuild(build):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py build`` as
one of the 'side effect free' commands or options.
"""
def run(self):
raise RuntimeError(setup_requires_error)
class DummyCFFIInstall(install):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py install``
as one of the 'side effect free' commands or options.
"""
def run(self):
raise RuntimeError(setup_requires_error)
class DummyPyTest(test):
"""
This class makes it very obvious when ``keywords_with_side_effects()`` has
incorrectly interpreted the command line arguments to ``setup.py test`` as
one of the 'side effect free' commands or options.
"""
def run_tests(self):
raise RuntimeError(setup_requires_error)
with open(os.path.join(base_dir, "README.rst")) as f:
long_description = f.read()
setup(
name=about["__title__"],
version=about["__version__"],
description=about["__summary__"],
long_description=long_description,
license=about["__license__"],
url=about["__uri__"],
author=about["__author__"],
author_email=about["__email__"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: BSD",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
],
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
install_requires=requirements,
tests_require=test_requirements,
# for cffi
zip_safe=False,
ext_package="cryptography",
entry_points={
"cryptography.backends": backends,
},
**keywords_with_side_effects(sys.argv)
)
|
|
"""
Support for Cover devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover/
"""
import asyncio
from datetime import timedelta
import functools as ft
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.components import group
from homeassistant.const import (
SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_STOP_COVER, SERVICE_OPEN_COVER_TILT, SERVICE_CLOSE_COVER_TILT,
SERVICE_STOP_COVER_TILT, SERVICE_SET_COVER_TILT_POSITION, STATE_OPEN,
STATE_CLOSED, STATE_UNKNOWN, ATTR_ENTITY_ID)
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'cover'
DEPENDENCIES = ['group']
SCAN_INTERVAL = timedelta(seconds=15)
GROUP_NAME_ALL_COVERS = 'all covers'
ENTITY_ID_ALL_COVERS = group.ENTITY_ID_FORMAT.format('all_covers')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DEVICE_CLASSES = [
'window', # Window control
'garage', # Garage door control
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
SUPPORT_OPEN = 1
SUPPORT_CLOSE = 2
SUPPORT_SET_POSITION = 4
SUPPORT_STOP = 8
SUPPORT_OPEN_TILT = 16
SUPPORT_CLOSE_TILT = 32
SUPPORT_STOP_TILT = 64
SUPPORT_SET_TILT_POSITION = 128
ATTR_CURRENT_POSITION = 'current_position'
ATTR_CURRENT_TILT_POSITION = 'current_tilt_position'
ATTR_POSITION = 'position'
ATTR_TILT_POSITION = 'tilt_position'
COVER_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
COVER_SET_COVER_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
COVER_SET_COVER_TILT_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_TILT_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
SERVICE_TO_METHOD = {
SERVICE_OPEN_COVER: {'method': 'async_open_cover'},
SERVICE_CLOSE_COVER: {'method': 'async_close_cover'},
SERVICE_SET_COVER_POSITION: {
'method': 'async_set_cover_position',
'schema': COVER_SET_COVER_POSITION_SCHEMA},
SERVICE_STOP_COVER: {'method': 'async_stop_cover'},
SERVICE_OPEN_COVER_TILT: {'method': 'async_open_cover_tilt'},
SERVICE_CLOSE_COVER_TILT: {'method': 'async_close_cover_tilt'},
SERVICE_STOP_COVER_TILT: {'method': 'async_stop_cover_tilt'},
SERVICE_SET_COVER_TILT_POSITION: {
'method': 'async_set_cover_tilt_position',
'schema': COVER_SET_COVER_TILT_POSITION_SCHEMA},
}
def is_closed(hass, entity_id=None):
"""Return if the cover is closed based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_COVERS
return hass.states.is_state(entity_id, STATE_CLOSED)
def open_cover(hass, entity_id=None):
"""Open all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_OPEN_COVER, data)
def close_cover(hass, entity_id=None):
"""Close all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_CLOSE_COVER, data)
def set_cover_position(hass, position, entity_id=None):
"""Move to specific position all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_POSITION] = position
hass.services.call(DOMAIN, SERVICE_SET_COVER_POSITION, data)
def stop_cover(hass, entity_id=None):
"""Stop all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_STOP_COVER, data)
def open_cover_tilt(hass, entity_id=None):
"""Open all or specified cover tilt."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_OPEN_COVER_TILT, data)
def close_cover_tilt(hass, entity_id=None):
"""Close all or specified cover tilt."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_CLOSE_COVER_TILT, data)
def set_cover_tilt_position(hass, tilt_position, entity_id=None):
"""Move to specific tilt position all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_TILT_POSITION] = tilt_position
hass.services.call(DOMAIN, SERVICE_SET_COVER_TILT_POSITION, data)
def stop_cover_tilt(hass, entity_id=None):
"""Stop all or specified cover tilt."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_STOP_COVER_TILT, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for covers."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_COVERS)
yield from component.async_setup(config)
@asyncio.coroutine
def async_handle_cover_service(service):
"""Handle calls to the cover services."""
covers = component.async_extract_from_service(service)
method = SERVICE_TO_METHOD.get(service.service)
params = service.data.copy()
params.pop(ATTR_ENTITY_ID, None)
# call method
for cover in covers:
yield from getattr(cover, method['method'])(**params)
update_tasks = []
for cover in covers:
if not cover.should_poll:
continue
update_coro = hass.async_add_job(
cover.async_update_ha_state(True))
if hasattr(cover, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
descriptions = yield from hass.async_add_job(
load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
for service_name in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service_name].get(
'schema', COVER_SERVICE_SCHEMA)
hass.services.async_register(
DOMAIN, service_name, async_handle_cover_service,
descriptions.get(service_name), schema=schema)
return True
class CoverDevice(Entity):
"""Representation a cover."""
# pylint: disable=no-self-use
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def state(self):
"""Return the state of the cover."""
closed = self.is_closed
if closed is None:
return STATE_UNKNOWN
return STATE_CLOSED if closed else STATE_OPEN
@property
def state_attributes(self):
"""Return the state attributes."""
data = {}
current = self.current_cover_position
if current is not None:
data[ATTR_CURRENT_POSITION] = self.current_cover_position
current_tilt = self.current_cover_tilt_position
if current_tilt is not None:
data[ATTR_CURRENT_TILT_POSITION] = self.current_cover_tilt_position
return data
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.current_cover_position is not None:
supported_features |= SUPPORT_SET_POSITION
if self.current_cover_tilt_position is not None:
supported_features |= (
SUPPORT_OPEN_TILT | SUPPORT_CLOSE_TILT | SUPPORT_STOP_TILT |
SUPPORT_SET_TILT_POSITION)
return supported_features
@property
def is_closed(self):
"""Return if the cover is closed or not."""
raise NotImplementedError()
def open_cover(self, **kwargs):
"""Open the cover."""
raise NotImplementedError()
def async_open_cover(self, **kwargs):
"""Open the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.open_cover, **kwargs))
def close_cover(self, **kwargs):
"""Close cover."""
raise NotImplementedError()
def async_close_cover(self, **kwargs):
"""Close cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.close_cover, **kwargs))
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
pass
def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.set_cover_position, **kwargs))
def stop_cover(self, **kwargs):
"""Stop the cover."""
pass
def async_stop_cover(self, **kwargs):
"""Stop the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(ft.partial(self.stop_cover, **kwargs))
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
pass
def async_open_cover_tilt(self, **kwargs):
"""Open the cover tilt.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.open_cover_tilt, **kwargs))
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
pass
def async_close_cover_tilt(self, **kwargs):
"""Close the cover tilt.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.close_cover_tilt, **kwargs))
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
pass
def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.set_cover_tilt_position, **kwargs))
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
pass
def async_stop_cover_tilt(self, **kwargs):
"""Stop the cover.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.stop_cover_tilt, **kwargs))
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.polling.base_polling import LROBasePolling
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
kql_script_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_by_name_request(
kql_script_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_by_name_request_initial(
kql_script_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_rename_request_initial(
kql_script_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/kqlScripts/{kqlScriptName}/rename')
path_format_arguments = {
"kqlScriptName": _SERIALIZER.url("kql_script_name", kql_script_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class KqlScriptOperations(object):
"""KqlScriptOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.synapse.artifacts.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
kql_script_name: str,
kql_script: "_models.KqlScriptResource",
**kwargs: Any
) -> Optional["_models.KqlScriptResource"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.KqlScriptResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(kql_script, 'KqlScriptResource')
request = build_create_or_update_request_initial(
kql_script_name=kql_script_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('KqlScriptResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/kqlScripts/{kqlScriptName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
kql_script_name: str,
kql_script: "_models.KqlScriptResource",
**kwargs: Any
) -> LROPoller["_models.KqlScriptResource"]:
"""Creates or updates a KQL Script.
:param kql_script_name: KQL script name.
:type kql_script_name: str
:param kql_script: KQL script.
:type kql_script: ~azure.synapse.artifacts.models.KqlScriptResource
:keyword api_version: Api Version. The default value is "2021-11-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either KqlScriptResource or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.KqlScriptResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.KqlScriptResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
kql_script_name=kql_script_name,
kql_script=kql_script,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('KqlScriptResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/kqlScripts/{kqlScriptName}'} # type: ignore
@distributed_trace
def get_by_name(
self,
kql_script_name: str,
**kwargs: Any
) -> "_models.KqlScriptResource":
"""Get KQL script by name.
:param kql_script_name: KQL script name.
:type kql_script_name: str
:keyword api_version: Api Version. The default value is "2021-11-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KqlScriptResource, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.KqlScriptResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KqlScriptResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
request = build_get_by_name_request(
kql_script_name=kql_script_name,
api_version=api_version,
template_url=self.get_by_name.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KqlScriptResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_name.metadata = {'url': '/kqlScripts/{kqlScriptName}'} # type: ignore
def _delete_by_name_initial(
self,
kql_script_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
request = build_delete_by_name_request_initial(
kql_script_name=kql_script_name,
api_version=api_version,
template_url=self._delete_by_name_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_delete_by_name_initial.metadata = {'url': '/kqlScripts/{kqlScriptName}'} # type: ignore
@distributed_trace
def begin_delete_by_name(
self,
kql_script_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete KQL script by name.
:param kql_script_name: KQL script name.
:type kql_script_name: str
:keyword api_version: Api Version. The default value is "2021-11-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_by_name_initial(
kql_script_name=kql_script_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_name.metadata = {'url': '/kqlScripts/{kqlScriptName}'} # type: ignore
def _rename_initial(
self,
kql_script_name: str,
new_name: Optional[str] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_rename_request = _models.ArtifactRenameRequest(new_name=new_name)
_json = self._serialize.body(_rename_request, 'ArtifactRenameRequest')
request = build_rename_request_initial(
kql_script_name=kql_script_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._rename_initial.metadata['url'],
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
_rename_initial.metadata = {'url': '/kqlScripts/{kqlScriptName}/rename'} # type: ignore
@distributed_trace
def begin_rename(
self,
kql_script_name: str,
new_name: Optional[str] = None,
**kwargs: Any
) -> LROPoller[None]:
"""Rename KQL script.
:param kql_script_name: KQL script name.
:type kql_script_name: str
:param new_name: New name of the artifact.
:type new_name: str
:keyword api_version: Api Version. The default value is "2021-11-01-preview". Note that
overriding this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be LROBasePolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-11-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._rename_initial(
kql_script_name=kql_script_name,
new_name=new_name,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rename.metadata = {'url': '/kqlScripts/{kqlScriptName}/rename'} # type: ignore
|
|
"""
Unit tests for the parser.
"""
import unittest
class TestParser(unittest.TestCase):
def setUp(self):
self._parser_module = None
def getParserModule(self):
if self._parser_module is None:
from bigrig import parser
self._parser_module = parser
return self._parser_module
def makeStringParser(self, string):
parse_mod = self.getParserModule()
return parse_mod.make_string_parser(string)
def parseString(self, string):
parse_mod = self.getParserModule()
return parse_mod.parse_string(string)
def assertIsNode(self, expected, result, msg=None):
classname = result.__class__.__name__
self.assertEqual(expected, classname, msg=msg)
#
# Literals
#
def testParseObjectLiteralIdentifierKey(self):
string = "{foo:1}"
parser = self.makeStringParser(string)
result = parser.parse_object_literal()
self.assertIsNode('ObjectLiteral', result)
self.assertEquals(1, len(result.properties))
property = result.properties[0]
self.assertIsNode('ObjectProperty', property)
self.assertIsNode('PropertyName', property.name)
self.assertIsNode('NumberLiteral', property.value)
def testParseObjectLiteralStringLiteralKey(self):
string = "{'foo':1}"
parser = self.makeStringParser(string)
result = parser.parse_object_literal()
self.assertIsNode('ObjectLiteral', result)
self.assertEquals(1, len(result.properties))
property = result.properties[0]
self.assertIsNode('ObjectProperty', property)
self.assertIsNode('StringLiteral', property.name)
self.assertIsNode('NumberLiteral', property.value)
def testParseObjectLiteralNumberLiteralKey(self):
string = "{0:1}"
parser = self.makeStringParser(string)
result = parser.parse_object_literal()
self.assertIsNode('ObjectLiteral', result)
self.assertEquals(1, len(result.properties))
property = result.properties[0]
self.assertIsNode('ObjectProperty', property)
self.assertIsNode('NumberLiteral', property.name)
self.assertIsNode('NumberLiteral', property.value)
def testParseObjectLiteralInvalidKey(self):
ParseException = self.getParserModule().ParseException
string = "{function key(){}: value}"
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_object_literal)
def testParseObjectLiteralMultipleProperties(self):
string = '{"foo": "bar", baz: quux, 0: 1}'
parser = self.makeStringParser(string)
result = parser.parse_object_literal()
self.assertIsNode('ObjectLiteral', result)
self.assertEqual(3, len(result.properties))
for i in range(3):
self.assertIsNode('ObjectProperty', result.properties[i])
def testParseArrayLiteral(self):
string = "[1, 2, 3]"
parser = self.makeStringParser(string)
result = parser.parse_array_literal()
self.assertIsNode('ArrayLiteral', result)
self.assertEquals(3, len(result.elements))
for i in xrange(3):
self.assertIsNode('NumberLiteral', result.elements[i])
def testParseArrayLiteralElision(self):
string = "[1,,2]"
parser = self.makeStringParser(string)
result = parser.parse_array_literal()
self.assertIsNode('ArrayLiteral', result)
self.assertEquals(3, len(result.elements))
self.assertIsNode('NumberLiteral', result.elements[0])
self.assertIsNode('Elision', result.elements[1])
self.assertIsNode('NumberLiteral', result.elements[2])
def testParseRegExpLiteral(self):
string = "/a*/gi"
parser = self.makeStringParser(string)
result = parser.parse_regexp_literal()
self.assertIsNode('RegExpLiteral', result)
self.assertEquals("/a*/", result.pattern)
self.assertEquals("gi", result.flags)
string = "/"
parser = self.makeStringParser(string)
ParseException = self.getParserModule().ParseException
self.assertRaises(ParseException, parser.parse_regexp_literal)
string = '/*./gi'
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_regexp_literal)
#
# Expressions
#
def testParsePrimaryExpressions(self):
testCases = {
'parse-this': ('this', 'ThisNode'),
'parse-null': ('null', 'NullNode'),
'parse-true': ('true', 'TrueNode'),
'parse-false': ('false', 'FalseNode'),
'parse-ident': ('someIdentifier', 'Name'),
'parse-array': ('[1,2,3]', 'ArrayLiteral'),
'parse-obj': ('{foo:bar}', 'ObjectLiteral'),
'parse-regex': ('/^foo$/g', 'RegExpLiteral'),
}
for test_name, (string, expected) in testCases.iteritems():
parser = self.makeStringParser(string)
result = parser.parse_primary_expression()
fmt_args = (test_name, expected, result.__class__.__name__)
msg = '%s did not parse an %s; got %s' % fmt_args
self.assertIsNode(expected, result, msg=msg)
def testParseMemberExpressionDotProperty(self):
string = "foo.bar"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('DotProperty', result)
def testParseMemberExpressionBracketPropertyStringLiteralKey(self):
string = "foo['bar']"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('BracketProperty', result)
def testParseMemberExpressionBracketPropertyIdentifierKey(self):
string = "foo[bar]"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('BracketProperty', result)
def testParseMemberExpressionComplexProperties(self):
string = "foo.baz['bar']"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('BracketProperty', result)
self.assertIsNode('DotProperty', result.object)
def testParseMemberExpressionCall(self):
string = "foo.bar('bar')"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('CallExpression', result)
self.assertIsNode('DotProperty', result.expression)
def testParseMemberExpressionComplexCall(self):
string = "foo.bar[baz]('bar')"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('CallExpression', result)
self.assertIsNode('BracketProperty', result.expression)
self.assertIsNode('DotProperty', result.expression.object)
def testParseMemberExpressionCallMultipleArguments(self):
string = 'foo.bar(baz, quux, false)'
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('CallExpression', result)
self.assertEqual(3, len(result.arguments))
def testParseNewExpressionNoArguments(self):
string = "new foo"
parser = self.makeStringParser(string)
result = parser.parse_new_expression()
self.assertIsNode('NewExpression', result)
self.assertIsNode('Name', result.expression)
self.assertEqual(None, result.arguments)
def testParseNewExpressionArguments(self):
string = "new foo(bar)"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('NewExpression', result)
self.assertEqual(1, len(result.arguments))
def testParseNewExpressionComplicated1(self):
string = "new Something(argument1, argument2).method()[property]"
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('BracketProperty', result)
self.assertIsNode('CallExpression', result.object)
self.assertIsNode('DotProperty', result.object.expression)
self.assertIsNode('NewExpression', result.object.expression.object)
def testParseNewExpressionComplicated2(self):
string = 'new Something.Or.Other(argument)'
parser = self.makeStringParser(string)
result = parser.parse_new_expression()
self.assertIsNode('NewExpression', result)
self.assertIsNode('DotProperty', result.expression)
def testParseNewExpressionComplicated3(self):
string = 'new new Something()'
parser = self.makeStringParser(string)
result = parser.parse_new_expression()
self.assertIsNode('NewExpression', result)
self.assertEqual(None, result.arguments)
self.assertIsNode('NewExpression', result.expression)
def testParseMemberExpressionNewComplicated4(self):
string = 'new new Something().Or'
parser = self.makeStringParser(string)
result = parser.parse_new_expression()
self.assertIsNode('NewExpression', result)
self.assertEqual(None, result.arguments)
self.assertIsNode('DotProperty', result.expression)
self.assertIsNode('NewExpression', result.expression.object)
self.assertEqual([], result.expression.object.arguments)
def testParseMemberExpressionFunctionExpression(self):
string = 'function(){;}'
parser = self.makeStringParser(string)
result = parser.parse_member_expression()
self.assertIsNode('FunctionExpression', result)
def testParseUnaryExpression(self):
testCases = {
'parse-not': ('!a', 'UnaryOperation'),
'parse-negate': ('-a', 'UnaryOperation'),
'parse-positive': ('+a', 'UnaryOperation'),
'parse-bitnot': ('~a', 'UnaryOperation'),
'parse-delete': ('delete a', 'DeleteOperation'),
'parse-void': ('void a', 'VoidOperation'),
'parse-typeof': ('typeof a', 'TypeofOperation'),
'parse-prefix-count-plus': ('++a', 'PrefixCountOperation'),
'parse-prefix-count-minus': ('--a', 'PrefixCountOperation'),
}
for test_name, (string, expected) in testCases.iteritems():
parser = self.makeStringParser(string)
result = parser.parse_unary_expression()
fmt_args = (test_name, expected, result.__class__.__name__)
msg = '%s did not parse an %s; got %s' % fmt_args
self.assertIsNode(expected, result, msg=msg)
def testParsePostfixExpression(self):
testCases = [
'(!a)++',
'(a+b)++',
'd--',
'1--',
]
for testCase in testCases:
parser = self.makeStringParser(testCase)
result = parser.parse_unary_expression()
self.assertIsNode('PostfixCountOperation', result)
def testParseBinaryOperatorExpressionPrecedence(self):
string = "3 + 4 * 5"
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
self.assertIsNode('BinaryOperation', result.right)
self.assertIsNode('NumberLiteral', result.left)
def testParseBinaryOperatorParenthesesPrecedence1(self):
string = "(3 + 4) * 5"
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
self.assertIsNode('BinaryOperation', result.left)
self.assertIsNode('NumberLiteral', result.right)
def testParseBinaryOperatorParenthesesPrecedence2(self):
string = "a + (b - c)"
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
self.assertEqual(u'+', result.op)
self.assertIsNode('BinaryOperation', result.right)
self.assertEqual(u'-', result.right.op)
def testParseBinaryOperatorEqualPrecedence(self):
string = "a + b - c"
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
self.assertEqual(u'-', result.op)
self.assertIsNode('BinaryOperation', result.left)
self.assertEqual(u'+', result.left.op)
def testParseBinaryOperatorMultiplePrecedence(self):
string = "3 + 4 * 5 + 6"
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
self.assertIsNode('BinaryOperation', result.left)
self.assertIsNode('NumberLiteral', result.right)
self.assertEquals('+', result.left.op)
self.assertIsNode('BinaryOperation', result.left.right)
self.assertEquals('*', result.left.right.op)
def testParseBinaryOperatorComparison(self):
string = "3 + 4 && 5 + 6"
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
self.assertEquals('&&', result.op)
self.assertIsNode('BinaryOperation', result.left)
self.assertIsNode('BinaryOperation', result.right)
def testParseAssignmentExpression(self):
testCases = [
'a = b',
'a |= b',
'a ^= b',
'a &= b',
'a <<= b',
'a >>= b',
'a >>>= b,',
'a += b',
'a -= b',
'a *= b',
'a /= b',
'a %= b',
'a = b = c',
'a = b += c',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_assignment_expression()
self.assertIsNode('Assignment', result)
def testParseAssignmentExpressionInvalid(self):
testCases = [
'1 = 2',
'"a" ^= c',
'null = 2',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_assignment_expression)
def testParseConditionalExpression(self):
string = 'a ? b:c'
parser = self.makeStringParser(string)
result = parser.parse_conditional_expression()
self.assertIsNode('Conditional', result)
def testParseConditionalInvalid(self):
testCases = [
'a ? b',
'a ?:b',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_conditional_expression)
def testParseCommaExpression(self):
testCases = [
'a, b, c',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_expression()
self.assertIsNode('BinaryOperation', result)
def testParseExpression(self):
testCases = {
'call-functionexpression': ('(function() { return {};})();', 'CallExpression'),
}
for test_name, (string, expected) in testCases.iteritems():
parser = self.makeStringParser(string)
result = parser.parse_expression()
fmt_args = (test_name, expected, result.__class__.__name__)
msg = '%s did not parse an %s; got %s' % fmt_args
self.assertIsNode(expected, result, msg=msg)
#
# Statements
#
def testParseVariableStatement(self):
string = "var foo = bar, baz;"
parser = self.makeStringParser(string)
result = parser.parse_variable_statement()
self.assertIsNode('VariableStatement', result)
self.assertEquals(2, len(result.declarations))
def testParseIfStatement(self):
testCases = [
'if (x);',
'if (x); else ;',
'if (x) return; else if (y) print(y);',
'if (x) { print(x); return; }',
'if (x) {;} else {;}',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_if_statement()
self.assertIsNode('IfStatement', result)
def parseIfStatementInvalid(self):
testCases = [
'if (x) else y;',
'if x return;',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_if_statement)
def testParseWhileStatement(self):
testCases = [
'while (true);',
'while (x < y) print(x);',
'while (x < y) { print(x); print(y); }',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_while_statement()
self.assertIsNode('WhileStatement', result)
def parseWhileStatementInvalid(self):
testCases = [
'while x {;}',
'while (true)',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_while_statement)
def testParseDoWhileStatement(self):
testCases = [
'do print(x); while(true);',
'do { print(x); } while(x);',
'do { print(x); x += 1; } while(x < y);',
'do;while(true);',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_do_while_statement()
self.assertIsNode('DoWhileStatement', result)
def parseDoWhileStatementInvalid(self):
testCases = [
'do;while true',
'do;while(true)',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_do_while_statement)
def testParseWithStatement(self):
testCases = [
'with (x) { a = true; }',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_with_statement()
self.assertIsNode('WithStatement', result)
def testParseWithStatementInvalid(self):
testCases = [
'with x { a = true; }',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_with_statement)
def testParseForStatement(self):
testCases = [
'for (;;);',
'for (var i=0,j=0;;) {}',
'for ((x in b); c; u) {}',
'for (;x in b;) {},',
'for (;x in b;) { for (var a in b) print(a); },',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_for_statement()
self.assertIsNode('ForStatement', result)
def testParseForInStatement(self):
testCases = [
'for (var x in b);',
'for(x in b) { print(x); }',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_for_statement()
self.assertIsNode('ForInStatement', result)
def testParseReturnStatement(self):
testCases = [
'return /* comment */;',
'return label;',
'return\nlabel;',
'return a + b;',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_return_statement()
self.assertIsNode('ReturnStatement', result)
def testParseContinueStatement(self):
testCases = [
'continue /* comment */;',
'continue label;',
'continue\nlabel;',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_continue_statement()
self.assertIsNode('ContinueStatement', result)
def testParseBreakStatement(self):
testCases = [
'break /* comment */;',
'break label;',
'break\nlabel;',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_break_statement()
self.assertIsNode('BreakStatement', result)
def testParseLabelledStatement(self):
string = 'foo: bar;'
parser = self.makeStringParser(string)
result = parser.parse_expression_statement()
self.assertIsNode('LabelledStatement', result)
def testParseCaseClause(self):
testCases = [
'case foo:;',
'case bar: baz;',
'case 0:;',
'default: return;',
'case "b": doSomething(); break;',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_case_clause()
self.assertIsNode('CaseClause', result)
def testParseCaseClauseInvalid(self):
testCases = [
'case:;',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_case_clause)
def testParseSwitchStatement(self):
testCases = [
'switch(n) { case 1: break; case 2: break; default: print(n)}',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_switch_statement()
self.assertIsNode('SwitchStatement', result)
def testParseThrowStatement(self):
testCases = [
'throw a;',
'throw a + 5',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_throw_statement()
self.assertIsNode('Throw', result)
def testParseThrowStatementInvalid(self):
testCases = [
'throw;',
'throw\na;',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_throw_statement)
def testParseTryStatement(self):
testCases = [
'try { ; } catch(e) { ; }',
'try { ; } catch(e) { ; } finally { ; }',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_try_statement()
self.assertIsNode('TryStatement', result)
def testParseTryStatementInvalid(self):
testCases = [
'try {;}',
'try { ; } catch {}',
]
ParseException = self.getParserModule().ParseException
for string in testCases:
parser = self.makeStringParser(string)
self.assertRaises(ParseException, parser.parse_try_statement)
def testParseFunctionDeclaration(self):
testCases = [
'function foo() { return 5; }'
'function bar(a) { ; }',
'function baz(a,b,c) {;}',
'function baz(a,b,c) { function foo() {;} }',
]
for string in testCases:
parser = self.makeStringParser(string)
result = parser.parse_function_declaration()
self.assertIsNode('FunctionDeclaration', result)
#
# General parsing
#
def testValidPrograms(self):
testPrograms = [
'do s; while(e);',
'for (x in b);',
'for (;;);',
'for (var i=0,j=0;;) {}',
'for ((x in b); c; u) {}',
'for (;x in b;) {}',
'continue /* comment */;',
'continue label;',
'try { ; } catch(e) { ; }',
'try { ; } catch(e) { ; } finally { ; }',
'(function(){ console.log("Valid!"); })()',
]
ParseException = self.getParserModule().ParseException
for program in testPrograms:
try:
result = self.parseString(program)
except ParseException:
self.fail('Valid program "%s" failed to parse' % program)
def testInvalidPrograms(self):
testPrograms = [
'for (x in b; c; u) {}',
'throw\n',
'throw\n;',
'throw;',
'if (a > b)\nelse c = d',
'function(){ console.log("Invalid!"); }',
]
ParseException = self.getParserModule().ParseException
for program in testPrograms:
try:
self.parseString(program)
self.fail('Invalid program "%s" parsed without error' % program)
except ParseException:
continue
def testSemicolonInsertion(self):
testPrograms = {
'test-continue-noexpr': ('continue\n', 'continue;'),
'test-multiline-primaries': ('x\ny\nz', 'x;y;z;'),
'test-multiline-assignment': ('x=1\ny=2\nz=3', 'x=1;y=2;z=3'),
'test-return-newline-expr1': ('return\n1;', 'return;1;'),
'test-return-newline-expr2': ('return\na+b', 'return;a+b;'),
'test-multiline-prefix': ('a = b\n++c', 'a=b;++c;'),
'test-multiline-call': ('a = b + c\n(d + e)', 'a = b + c(d + e);'),
}
for test_name, (string, expected) in testPrograms.iteritems():
parser = self.makeStringParser(string)
result = parser.parse_program()
parser = self.makeStringParser(expected)
expected = parser.parse_program()
self.assertEqual(len(expected.statements), len(result.statements))
|
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
The GPO Reference Aggregate Manager v2, showing how to implement
the GENI AM API version 2. This AggregateManager has only fake resources.
Invoked from gcf-am.py
The GENI AM API is defined in the AggregateManager class.
"""
# Note: This AM uses SFA authorization to check that the caller
# has appropriate credentials to make the call. If this AM is used in
# conjunction with the policy-based authorization capability (in gcf.geni.auth)
# then this code needs to only extract expiration times from the credentials
# which can be done using the gcf.sfa.credential module
from __future__ import absolute_import
import base64
import datetime
import dateutil.parser
import logging
import os
import string
import uuid
import xml.dom.minidom as minidom
import xmlrpclib
import zlib
from .resource import Resource
from .aggregate import Aggregate
from .fakevm import FakeVM
from ... import geni
from ..util.urn_util import publicid_to_urn, URN
from ..util.tz_util import tzd
from ..SecureXMLRPCServer import SecureXMLRPCServer
from ..auth.base_authorizer import *
from .am_method_context import AMMethodContext
from ...gcf_version import GCF_VERSION
# See sfa/trust/rights.py
# These are names of operations
# from the rights.py privilege_table
# Credentials may list privileges that
# map to these operations, giving the caller permission
# to perform the functions
RENEWSLIVERPRIV = 'renewsliver'
CREATESLIVERPRIV = 'createsliver'
DELETESLIVERPRIV = 'deleteslice'
SLIVERSTATUSPRIV = 'getsliceresources'
SHUTDOWNSLIVERPRIV = 'shutdown'
# Publicid format resource namespace. EG Resource URNs
# will be <namespace>:resource:<resourcetype>_<resourceid>
# This is something like the name of your AM
# See gen-certs.CERT_AUTHORITY
RESOURCE_NAMESPACE = 'geni//gpo//gcf'
REFAM_MAXLEASE_DAYS = 365
class Slice(object):
"""A slice has a URN, a list of resources, and an expiration time in UTC."""
def __init__(self, urn, expiration):
self.id = str(uuid.uuid4())
self.urn = urn
self.expiration = expiration
self.resources = dict()
def getURN(self) : return self.urn
def status(self, resources):
"""Determine the status of the sliver by examining the status
of each resource in the sliver.
"""
# If any resource is 'shutdown', the sliver is 'shutdown'
# Else if any resource is 'failed', the sliver is 'failed'
# Else if any resource is 'configuring', the sliver is 'configuring'
# Else if all resources are 'ready', the sliver is 'ready'
# Else the sliver is 'unknown'
rstat = [res.status for res in resources]
if Resource.STATUS_SHUTDOWN in rstat:
return Resource.STATUS_SHUTDOWN
elif Resource.STATUS_FAILED in rstat:
return Resource.STATUS_FAILED
elif Resource.STATUS_CONFIGURING in rstat:
return Resource.STATUS_CONFIGURING
elif rstat == [Resource.STATUS_READY for res in self.resources.values()]:
# All resources report status of ready
return Resource.STATUS_READY
else:
return Resource.STATUS_UNKNOWN
# Simple class to hold a sliver urn to be compatible with V3 calls
class Sliver(object):
def __init__(self, urn):
self._urn = urn
def urn(self): return self._urn
class ReferenceAggregateManager(object):
'''A reference Aggregate Manager that manages fake resources.'''
# root_cert is a single cert or dir of multiple certs
# that are trusted to sign credentials
def __init__(self, root_cert, urn_authority, url, **kwargs):
self._url = url
self._api_version = 2
self._am_type = "gcf"
self._slices = dict()
self._agg = Aggregate()
self._agg.add_resources([FakeVM(self._agg) for _ in range(3)])
self._cred_verifier = geni.CredentialVerifier(root_cert)
self._urn_authority = urn_authority
self._my_urn = publicid_to_urn("IDN %s %s %s" % (self._urn_authority, 'authority', 'am'))
self.max_lease = datetime.timedelta(days=REFAM_MAXLEASE_DAYS)
self.logger = logging.getLogger('gcf.am2')
self.logger.info("Running %s AM v%d code version %s", self._am_type, self._api_version, GCF_VERSION)
def GetVersion(self, options):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
self.logger.info("Called GetVersion")
reqver = [dict(type="geni",
version="3",
schema="http://www.geni.net/resources/rspec/3/request.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
adver = [dict(type="geni",
version="3",
schema="http://www.geni.net/resources/rspec/3/ad.xsd",
namespace="http://www.geni.net/resources/rspec/3",
extensions=[])]
api_versions = dict()
api_versions[str(self._api_version)] = self._url
versions = dict(geni_api=2,
geni_api_versions=api_versions,
geni_request_rspec_versions=reqver,
geni_ad_rspec_versions=adver)
return dict(geni_api=versions['geni_api'],
code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=versions,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
self.logger.info('ListResources(%r)' % (options))
slice_urn = None
if options and 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
# could require list or listnodes?
privileges = ()
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
self._cred_verifier.verify_from_strings(self._server.get_pem_cert(),
credentials,
slice_urn,
privileges,
options)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if 'geni_rspec_version' not in options:
# This is a required option, so error out with bad arguments.
self.logger.error('No geni_rspec_version supplied to ListResources.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version was not supplied.')
if 'type' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a type field.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version does not have a type field.')
if 'version' not in options['geni_rspec_version']:
self.logger.error('ListResources: geni_rspec_version does not contain a version field.')
return self.errorResult(1, 'Bad Arguments: option geni_rspec_version does not have a version field.')
# Look to see what RSpec version the client requested
# Error-check that the input value is supported.
rspec_type = options['geni_rspec_version']['type']
if isinstance(rspec_type, str):
rspec_type = rspec_type.lower().strip()
rspec_version = options['geni_rspec_version']['version']
if rspec_type != 'geni':
self.logger.error('ListResources: Unknown RSpec type %s requested', rspec_type)
return self.errorResult(4, 'Bad Version: requested RSpec type %s is not a valid option.' % (rspec_type))
if rspec_version != '3':
self.logger.error('ListResources: Unknown RSpec version %s requested', rspec_version)
return self.errorResult(4, 'Bad Version: requested RSpec version %s is not a valid option.' % (rspec_type))
self.logger.info("ListResources requested RSpec %s (%s)", rspec_type, rspec_version)
if 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
if slice_urn in self._slices:
result = self.manifest_rspec(slice_urn)
else:
# return an empty rspec
return self._no_such_slice(slice_urn)
else:
all_resources = self._agg.catalog(None)
available = 'geni_available' in options and options['geni_available']
resource_xml = ""
for r in all_resources:
if available and not r.available:
continue
resource_xml = resource_xml + self.advert_resource(r)
result = self.advert_header() + resource_xml + self.advert_footer()
self.logger.debug("Result is now \"%s\"", result)
# Optionally compress the result
if 'geni_compressed' in options and options['geni_compressed']:
try:
result = base64.b64encode(zlib.compress(result))
except Exception, exc:
import traceback
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def CreateSliver(self, slice_urn, credentials, rspec, users, options):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
self.logger.info('CreateSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (CREATESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
creds = self._cred_verifier.verify_from_strings(self._server.get_pem_cert(),
credentials,
slice_urn,
privileges,
options)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# Grab the user_urn
user_urn = gid.GID(string=options['geni_true_caller_cert']).get_urn()
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slices:
self.logger.error('Slice %s already exists.', slice_urn)
return self.errorResult(17, 'Slice %s already exists' % (slice_urn))
rspec_dom = None
try:
rspec_dom = minidom.parseString(rspec)
except Exception, exc:
self.logger.error("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
return self.errorResult(1, 'Bad Args: RSpec is unparseable')
# Look at the version of the input request RSpec
# Make sure it is supported
# Then make sure that you return an RSpec in the same format
# EG if both V1 and V2 are supported, and the user gives V2 request,
# then you must return a V2 request and not V1
allresources = self._agg.catalog()
allrdict = dict()
for r in allresources:
if r.available:
allrdict[r.id] = r
# Note: This only handles unbound nodes. Any attempt by the client
# to specify a node is ignored.
resources = dict()
unbound = list()
for elem in rspec_dom.documentElement.getElementsByTagName('node'):
unbound.append(elem)
for elem in unbound:
client_id = elem.getAttribute('client_id')
keys = allrdict.keys()
if keys:
rid = keys[0]
resources[client_id] = allrdict[rid]
del allrdict[rid]
else:
return self.errorResult(6, 'Too Big: insufficient resources to fulfill request')
# determine max expiration time from credentials
# do not create a sliver that will outlive the slice!
expiration = datetime.datetime.utcnow() + self.max_lease
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp < expiration:
expiration = credexp
newslice = Slice(slice_urn, expiration)
self._agg.allocate(slice_urn, resources.values())
self._agg.allocate(user_urn, resources.values())
for cid, r in resources.items():
newslice.resources[cid] = r.id
r.status = Resource.STATUS_READY
r.available = False
self._slices[slice_urn] = newslice
self.logger.info("Created new slice %s" % slice_urn)
result = self.manifest_rspec(slice_urn)
self.logger.debug('Result = %s', result)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def DeleteSliver(self, slice_urn, credentials, options):
'''Stop and completely delete the named sliver, and return True.'''
self.logger.info('DeleteSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (DELETESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
try:
self._cred_verifier.verify_from_strings(self._server.get_pem_cert(),
credentials,
slice_urn,
privileges,
options)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# Grab the user_urn
user_urn = gid.GID(string=options['geni_true_caller_cert']).get_urn()
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slices:
sliver = self._slices[slice_urn]
resources = self._agg.catalog(slice_urn)
if sliver.status(resources) == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not deleted because it is shutdown",
slice_urn)
return self.errorResult(11, "Unavailable: Slice %s is unavailable." % (slice_urn))
for r in resources:
r.reset()
self._agg.deallocate(slice_urn, None)
self._agg.deallocate(user_urn, None)
del self._slices[slice_urn]
self.logger.info("Sliver %r deleted" % slice_urn)
return self.successResult(True)
else:
return self._no_such_slice(slice_urn)
def SliverStatus(self, slice_urn, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.
Return a dict of sliver urn, status, and a list of dicts resource
statuses.'''
# Loop over the resources in a sliver gathering status.
self.logger.info('SliverStatus(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (SLIVERSTATUSPRIV,)
try:
self._cred_verifier.verify_from_strings(self._server.get_pem_cert(),
credentials,
slice_urn,
privileges,
options)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
if slice_urn in self._slices:
theSlice = self._slices[slice_urn]
# Now calculate the status of the sliver
res_status = list()
resources = list()
expiration = theSlice.expiration
# Add UTC TZ, to have an RFC3339 compliant datetime, per the AM API
exp_with_tz = expiration.replace(tzinfo=dateutil.tz.tzutc())
exp_string = exp_with_tz.isoformat()
sliceurn = URN(urn=slice_urn)
sliceauth = sliceurn.getAuthority()
slicename = sliceurn.getName()
slivername = sliceauth + slicename # FIXME: really
# this should have a timestamp of when reserved to be unique over time
# Translate any slivername illegal punctation
other = '-.:/'
table = string.maketrans(other, '-' * len(other))
slivername = slivername.translate(table)
for cid, sliver_uuid in theSlice.resources.items():
resource = None
sliver_urn = None
for res in self._agg.resources:
if res.id == sliver_uuid:
self.logger.debug('Resource = %s', str(res))
resources.append(res)
sliver_urn = res.sliver_urn(self._urn_authority, slivername)
# Gather the status of all the resources
# in the sliver. This could be actually
# communicating with the resources, or simply
# reporting the state of initialized, started, stopped, ...
res_status.append(dict(geni_urn=sliver_urn,
geni_status=res.status,
geni_error=''))
self.logger.info("Calculated and returning slice %s status", slice_urn)
result = dict(geni_urn=slice_urn,
geni_status=theSlice.status(resources),
geni_resources=res_status,
geni_expires=exp_string)
return dict(code=dict(geni_code=0,
am_type="gcf2",
am_code=0),
value=result,
output="")
else:
return self._no_such_slice(slice_urn)
def RenewSliver(self, slice_urn, credentials, expiration_time, options):
'''Renew the local sliver that is part of the named Slice
until the given expiration time (in UTC with a TZ per RFC3339).
Requires at least one credential that is valid until then.
Return False on any error, True on success.'''
self.logger.info('RenewSliver(%r, %r)' % (slice_urn, expiration_time))
privileges = (RENEWSLIVERPRIV,)
try:
creds = self._cred_verifier.verify_from_strings(self._server.get_pem_cert(),
credentials,
slice_urn,
privileges,
options)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
# All the credentials we just got are valid
if slice_urn in self._slices:
# If any credential will still be valid at the newly
# requested time, then we can do this.
resources = self._agg.catalog(slice_urn)
sliver = self._slices.get(slice_urn)
if sliver.status(resources) == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not renewed because it is shutdown",
slice_urn)
return self.errorResult(11, "Unavailable: Slice %s is unavailable." % (slice_urn))
requested = dateutil.parser.parse(str(expiration_time), tzinfos=tzd)
# Per the AM API, the input time should be TZ-aware
# But since the slice cred may not (per ISO8601), convert
# it to naiveUTC for comparison
requested = self._naiveUTC(requested)
# Find the minimum allowable expiration based on credential expiration and policy
min_expiration = self.min_expire(creds, self.max_lease)
# if requested > min_expiration,
# If alap, set to min of requested and min_expiration
# Otherwise error
if requested > min_expiration:
if 'geni_extend_alap' in options and options['geni_extend_alap']:
self.logger.info("Got geni_extend_alap: revising slice %s renew request from %s to %s", slice_urn, requested, min_expiration)
requested = min_expiration
else:
self.logger.info("Cannot renew %r: %s past maxlease %s", slice_urn, expiration_time, self.max_lease)
return self.errorResult(19, "Out of range: Expiration %s is out of range (AM policy limits renewals to %s)." % (expiration_time, self.max_lease))
sliver.expiration = requested
return self.successResult(True, requested)
else:
return self._no_such_slice(slice_urn)
def Shutdown(self, slice_urn, credentials, options):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
self.logger.info('Shutdown(%r)' % (slice_urn))
privileges = (SHUTDOWNSLIVERPRIV,)
try:
self._cred_verifier.verify_from_strings(self._server.get_pem_cert(),
credentials,
slice_urn,
privileges,
options)
except Exception, e:
raise xmlrpclib.Fault('Insufficient privileges', str(e))
if slice_urn in self._slices:
resources = self._agg.catalog(slice_urn)
for resource in resources:
resource.status = Resource.STATUS_SHUTDOWN
self.logger.info("Sliver %r shut down" % slice_urn)
return self.successResult(True)
else:
self.logger.info("Shutdown: No such slice: %s.", slice_urn)
return self._no_such_slice(slice_urn)
# Return a slice and list slivers
def decode_urns(self, urns):
slice_urn = urns[0]
if slice_urn not in self._slices:
raise ApiErrorException(AM_API.SEARCH_FAILED,
'Unknown slice "%s%' % (slice_urn))
slice_obj = self._slices[slice_urn]
slivers = [Sliver(sliver_urn) \
for sliver_urn in slice_obj.resources.values()]
return slice_obj, slivers
def successResult(self, value, output=""):
code_dict = dict(geni_code=0,
am_type="gcf2",
am_code=0)
return dict(code=code_dict,
value=value,
output=output)
def _no_such_slice(self, slice_urn):
return self.errorResult(12, 'Search Failed: no slice "%s" found' % (slice_urn))
def errorResult(self, code, output, am_code=None):
code_dict = dict(geni_code=code, am_type="gcf2")
if am_code is not None:
code_dict['am_code'] = am_code
return dict(code=code_dict,
value="",
output=output)
def _naiveUTC(self, dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
def advert_resource(self, resource):
tmpl = '''<node component_manager_id="%s"
component_name="%s"
component_id="%s"
exclusive="%s">
<available now="%s"/>
</node>
'''
resource_id = str(resource.id)
resource_exclusive = str(False).lower()
resource_available = str(resource.available).lower()
resource_urn = resource.urn(self._urn_authority)
return tmpl % (self._my_urn,
resource_id,
resource_urn,
resource_exclusive,
resource_available)
def advert_header(self):
header = '''<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/ad.xsd"
type="advertisement">\n'''
return header
def advert_footer(self):
return '</rspec>\n'
def manifest_header(self):
header = '''<?xml version="1.0" encoding="UTF-8"?>
<rspec xmlns="http://www.geni.net/resources/rspec/3"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.geni.net/resources/rspec/3 http://www.geni.net/resources/rspec/3/manifest.xsd"
type="manifest">\n'''
return header
def manifest_slice(self, slice_urn):
sliceurn = URN(urn=slice_urn)
sliceauth = sliceurn.getAuthority()
slicename = sliceurn.getName()
slivername = sliceauth + slicename # FIXME: really
# this should have a timestamp of when reserved to be unique over time
# Translate any slivername illegal punctation
other = '-.:/'
table = string.maketrans(other, '-' * len(other))
slivername = slivername.translate(table)
tmpl = ''' <node client_id="%s"
component_id="%s"
component_manager_id="%s"
sliver_id="%s"/>\n'''
result = ""
for cid, res_uuid in self._slices[slice_urn].resources.items():
resource = None
sliver_urn = None
for res in self._agg.resources:
if res.id == res_uuid:
sliver_urn = res.sliver_urn(self._urn_authority, slivername)
resource_urn = res.urn(self._urn_authority)
result = result + tmpl % (cid, resource_urn, self._my_urn, sliver_urn)
return result
def manifest_footer(self):
return '</rspec>\n'
def manifest_rspec(self, slice_urn):
return self.manifest_header() + \
self.manifest_slice(slice_urn) + \
self.manifest_footer()
def min_expire(self, creds, max_duration=None, requested=None):
"""Compute the expiration time from the supplied credentials,
a max duration, and an optional requested duration. The shortest
time amongst all of these is the resulting expiration.
"""
now = datetime.datetime.utcnow()
expires = [self._naiveUTC(c.expiration) for c in creds]
if max_duration:
expires.append(now + max_duration)
if requested:
requested = self._naiveUTC(dateutil.parser.parse(str(requested), tzinfos=tzd))
# Ignore requested time in the past.
if requested > now:
expires.append(self._naiveUTC(requested))
return min(expires)
class AggregateManager(object):
"""The public API for a GENI Aggregate Manager. This class provides the
XMLRPC interface and invokes a delegate for all the operations.
"""
def __init__(self, trust_roots_dir, delegate, authorizer=None,
resource_manager=None):
self._trust_roots_dir = trust_roots_dir
self._delegate = delegate
self.logger = logging.getLogger('gcf.am2')
self.authorizer = authorizer
self.resource_manager = resource_manager
def _exception_result(self, exception):
output = str(exception)
self.logger.warning(output)
# XXX Code for no slice here?
return dict(code=dict(geni_code=102,
am_type="gcf2",
am_code=0),
value="",
output=output)
def GetVersion(self, options=dict()):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
try:
return self._delegate.GetVersion(options)
except Exception as e:
self.logger.exception("Error in GetVersion:")
return self._exception_result(e)
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
args = {}
method = AM_Methods.LIST_RESOURCES_V2
if 'geni_slice_urn' in options:
method = AM_Methods.LIST_RESOURCES_FOR_SLICE_V2
args['slice_urn'] = options['geni_slice_urn']
with AMMethodContext(self, method,
self.logger, self.authorizer,
self.resource_manager,
credentials,
args, options) as amc:
if not amc._error:
amc._result = \
self._delegate.ListResources(credentials, amc._options)
return amc._result
def CreateSliver(self, slice_urn, credentials, rspec, users, options):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
args = {'slice_urn' : slice_urn, 'rspec' : rspec, 'users' : users}
with AMMethodContext(self, AM_Methods.CREATE_SLIVER_V2,
self.logger, self.authorizer,
self.resource_manager,
credentials,
args, options, resource_bindings=True) as amc:
if not amc._error:
slice_urn = amc._args['slice_urn']
rspec = amc._args['rspec']
users = amc._args['users']
amc._result = self._delegate.CreateSliver(slice_urn,
credentials,
rspec, users,
amc._options)
return amc._result
def DeleteSliver(self, slice_urn, credentials, options):
"""Delete the given sliver. Return true on success."""
args = {'slice_urn' : slice_urn}
with AMMethodContext(self, AM_Methods.DELETE_SLIVER_V2,
self.logger, self.authorizer,
self.resource_manager,
credentials,
args, options) as amc:
if not amc._error:
slice_urn = amc._args['slice_urn']
amc._result = \
self._delegate.DeleteSliver(slice_urn, credentials,
amc._options)
return amc._result
def SliverStatus(self, slice_urn, credentials, options):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.'''
args = {'slice_urn' : slice_urn}
with AMMethodContext(self, AM_Methods.SLIVER_STATUS_V2,
self.logger, self.authorizer,
self.resource_manager,
credentials,
args, options) as amc:
if not amc._error:
slice_urn = amc._args['slice_urn']
amc._result = \
self._delegate.SliverStatus(slice_urn, credentials,
amc._options)
return amc._result
def RenewSliver(self, slice_urn, credentials, expiration_time, options):
"""Extend the life of the given sliver until the given
expiration time. Return False on error."""
args = {'slice_urn' : slice_urn, 'expiration_time' : expiration_time}
with AMMethodContext(self, AM_Methods.RENEW_SLIVER_V2,
self.logger, self.authorizer,
self.resource_manager, credentials,
args, options, resource_bindings=True) as amc:
if not amc._error:
slice_urn = amc._args['slice_urn']
expiration_time = amc._args['expiration_time']
amc._result = \
self._delegate.RenewSliver(slice_urn, credentials,
expiration_time, amc._options)
return amc._result
def Shutdown(self, slice_urn, credentials, options):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
args = {'slice_urn' : slice_urn}
with AMMethodContext(self, AM_Methods.SHUTDOWN_V2,
self.logger, self.authorizer,
self.resource_manager,
credentials,
args, options) as amc:
if not amc._error:
slice_urn = amc._args['slice_urn']
amc._result = \
self._delegate.Shutdown(slice_urn, credentials,
amc._options)
return amc._result
class AggregateManagerServer(object):
"""An XMLRPC Aggregate Manager Server. Delegates calls to given delegate,
or the default printing AM."""
def __init__(self, addr, keyfile=None, certfile=None,
trust_roots_dir=None,
ca_certs=None, base_name=None,
authorizer=None, resource_manager=None,
delegate=None):
# ca_certs arg here must be a file of concatenated certs
if ca_certs is None:
raise Exception('Missing CA Certs')
elif not os.path.isfile(os.path.expanduser(ca_certs)):
raise Exception('CA Certs must be an existing file of accepted root certs: %s' % ca_certs)
# Decode the addr into a URL. Is there a pythonic way to do this?
server_url = "https://%s:%d/" % addr
if delegate is None:
delegate = ReferenceAggregateManager(trust_roots_dir, base_name,
server_url)
# FIXME: set logRequests=true if --debug
self._server = SecureXMLRPCServer(addr, keyfile=keyfile,
certfile=certfile, ca_certs=ca_certs)
aggregate_manager = AggregateManager(trust_roots_dir, delegate,
authorizer, resource_manager)
self._server.register_instance(aggregate_manager)
# Set the server on the delegate so it can access the
# client certificate.
delegate._server = self._server
if not base_name is None:
global RESOURCE_NAMESPACE
RESOURCE_NAMESPACE = base_name
def serve_forever(self):
self._server.serve_forever()
def register_instance(self, instance):
# Pass the AM instance to the generic XMLRPC server,
# which lets it know what XMLRPC methods to expose
self._server.register_instance(instance)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.Module`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import itertools
from absl.testing import parameterized
import six
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import values as distributed_values
from tensorflow.python.eager import def_function
from tensorflow.python.framework import ops
from tensorflow.python.keras import layers
from tensorflow.python.keras import models
from tensorflow.python.module import module
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class TestModuleNaming(test.TestCase):
def test_single_name(self):
mod = module.Module(name="simple")
self.assertEqual(mod.name, "simple")
self.assertEqual(mod.name_scope.name, "simple/")
def test_construct_in_scope(self):
with ops.name_scope("foo"):
mod = module.Module(name="bar")
self.assertEqual(mod.name, "bar")
self.assertEqual(mod.name_scope.name, "foo/bar/")
def test_enters_name_scope_in_call(self):
mod = ReturnsNameScopeModule()
for _ in range(3):
self.assertEqual(mod(), mod.name_scope.name)
def test_enters_name_scope_in_other_method(self):
mod = ReturnsNameScopeModule()
for _ in range(3):
self.assertEqual(mod.alternative_forward(), mod.name_scope.name)
def test_subclassed_module(self):
mod = SubclassedReturnsNameScopeModule()
for _ in range(3):
self.assertEqual(mod.alternative_forward(), mod.name_scope.name)
self.assertEqual(mod.alternative_alternative_forward(),
mod.name_scope.name)
def test_submodule_created_late(self):
m = TreeModule()
self.assertEqual(m.name, "tree_module")
self.assertEqual(m.name_scope.name, "tree_module/")
leaf1 = m.new_leaf()
self.assertEqual(leaf1.name, "tree_module")
self.assertEqual(leaf1.name_scope.name, "tree_module/tree_module/")
def test_does_not_evaluate_property_methods(self):
mod = PropertyThrowsWhenCalledModule()
with self.assertRaises(AssertionError):
mod.raise_assertion_error # pylint: disable=pointless-statement
def test_overridden_name_scope(self):
mod = ModuleOverridingNameScope()
self.assertEqual(mod(), mod.name_scope.name)
self.assertEqual(mod.alternative_forward(), mod.name_scope.name)
def test_patched_callable(self):
with ops.name_scope("foo"):
mod = module.Module(name="bar")
mod.foo = get_name_scope
# `foo` is not a method so we do not re-enter the name scope.
self.assertEqual(mod.foo(), "")
def test_property(self):
mod = PropertyModule()
mod.some_property = None, None # None, None for the linter.
getter_scope_name, setter_scope_name = mod.some_property
self.assertEqual(getter_scope_name, "property_module/")
self.assertEqual(setter_scope_name, "property_module/")
def test_property_no_name_scope(self):
mod = PropertyModule()
mod.no_name_scope_property = None, None # None, None for the linter.
getter_scope_name, setter_scope_name = mod.no_name_scope_property
self.assertEqual(getter_scope_name, "")
self.assertEqual(setter_scope_name, "")
def test_invalid_name(self):
msg = ".* is not a valid module name"
with self.assertRaisesRegexp(ValueError, msg):
module.Module(name="$Foo")
def test_modules_not_numbered_in_eager(self):
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger/")
self.assertEqual(mod.child.name_scope.name, "badger/badger/")
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger/")
self.assertEqual(mod.child.name_scope.name, "badger/badger/")
def test_module_numbering_in_graph(self):
with ops.Graph().as_default():
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger/")
self.assertEqual(mod.child.name_scope.name, "badger/badger/")
mod = RecursiveModule(2)
self.assertEqual(mod.name_scope.name, "badger_1/")
self.assertEqual(mod.child.name_scope.name, "badger_1/badger/")
def test_ctor_error_closes_name_scope(self):
with self.assertRaises(ErrorModuleError):
# If super constructor is called then a name scope is opened then an error
# is thrown. The metaclass should handle this and close the namescope
# before re-throwing the exception.
ErrorModule(call_super=True)
self.assertEqual("", get_name_scope())
def test_ctor_error_handles_ctor_not_opening_name_scope(self):
with self.assertRaises(ErrorModuleError):
# If super ctor is not called then the name scope isn't opened. We need to
# ensure that this doesn't trigger an exception (e.g. the metaclass trying
# to __exit__ a non-existant name scope).
ErrorModule(call_super=False)
self.assertEqual("", get_name_scope())
def test_forward_method_closes_name_scope(self):
mod = ErrorModule(call_super=True, raise_in_constructor=False)
with self.assertRaises(ErrorModuleError):
mod()
self.assertEqual("", get_name_scope())
def test_get_attr_doesnt_enter_name_scope(self):
scope_names = []
class GetAttrModule(module.Module):
def __getattr__(self, name):
scope_names.append((name, get_name_scope()))
return super(GetAttrModule, self).__getattr__(name)
mod = GetAttrModule()
with self.assertRaises(AttributeError):
mod.does_not_exist # pylint: disable=pointless-statement
self.assertIn(("does_not_exist", ""), scope_names)
def test_get_attribute_doesnt_enter_name_scope(self):
scope_names = []
class GetAttributeModule(module.Module):
def __getattribute__(self, name):
scope_names.append((name, get_name_scope()))
return super(GetAttributeModule, self).__getattribute__(name)
mod = GetAttributeModule()
with self.assertRaises(AttributeError):
mod.does_not_exist # pylint: disable=pointless-statement
self.assertIn(("does_not_exist", ""), scope_names)
class VariableNamingTest(test.TestCase):
def test_variable_names(self):
mod = RecursiveModule(3)
self.assertEqual(mod.w.name, "badger/mushroom:0")
self.assertEqual(mod.child.w.name, "badger/badger/mushroom:0")
self.assertEqual(mod.child.child.w.name, "badger/badger/badger/mushroom:0")
class VariableTrackingTest(test.TestCase):
def test_variables(self):
m = RecursiveModule(3)
self.assertEqual(m.variables, (m.w, m.child.w, m.child.child.w))
self.assertEqual(m.child.variables, (m.child.w, m.child.child.w))
self.assertEqual(m.child.child.variables, (m.child.child.w,))
def test_trainable_variables(self):
m = RecursiveModule(3)
self.assertEqual(m.trainable_variables,
(m.w, m.child.w, m.child.child.w))
self.assertEqual(m.child.trainable_variables,
(m.child.w, m.child.child.w))
self.assertEqual(m.child.child.trainable_variables, (m.child.child.w,))
def test_trainable_variables_ignores_non_trainable(self):
m = RecursiveModule(3, trainable=False)
self.assertEqual(len(m.trainable_variables), 0)
self.assertEqual(len(m.child.trainable_variables), 0)
self.assertEqual(len(m.child.child.trainable_variables), 0)
def test_supports_distributed_variables(self):
device_map = distributed_values.SingleDeviceMap("/CPU:0")
mirrored = distributed_values.MirroredVariable(
None, device_map, [variables.Variable(1.)],
variables.VariableAggregation.SUM)
tpu = distributed_values.TPUMirroredVariable(
strategy=None,
device_map=device_map,
values=[variables.Variable(42.)],
aggregation=None)
aggregating = distributed_values.AggregatingVariable(
strategy=None, v=variables.Variable(1.), aggregation=None)
m = module.Module()
m.a = mirrored
m.b = tpu
m.c = aggregating
self.assertEqual(m.variables, (mirrored, tpu, aggregating))
class ModuleTrackingTest(test.TestCase):
def test_submodules(self):
m = RecursiveModule(3)
self.assertEqual(list(m.submodules), [m.child, m.child.child])
self.assertEqual(list(m.child.submodules), [m.child.child])
self.assertEqual(list(m.child.child.submodules), [])
def test_non_ctor_submodule(self):
m = TreeModule()
leaf1 = m.new_leaf()
self.assertEqual(set(m.submodules), {leaf1})
leaf2 = m.new_leaf()
self.assertEqual(set(m.submodules), {leaf1, leaf2})
class ForwardMethodsTest(test.TestCase):
def testFunctionType(self):
mod = ModuleWithFunctionAnnotatedCall()
self.assertTrue(isinstance(mod.forward, def_function.Function))
self.assertTrue(isinstance(mod.forward_ag, def_function.Function))
def testEntersNameScope_call(self):
mod = ModuleWithFunctionAnnotatedCall()
self.assertEqual(mod.forward().numpy(),
b"module_with_function_annotated_call/")
self.assertEqual(mod.forward_ag().numpy(),
b"module_with_function_annotated_call/")
def testEntersNameScope_concreteFunction(self):
mod = ModuleWithFunctionAnnotatedCall()
self.assertEqual(mod.forward.get_concrete_function()().numpy(),
b"module_with_function_annotated_call/")
self.assertEqual(mod.forward_ag.get_concrete_function()().numpy(),
b"module_with_function_annotated_call/")
class AbcTest(test.TestCase):
def testAbstract(self):
msg = "Can't instantiate .* abstract methods"
with self.assertRaisesRegexp(TypeError, msg):
AbstractModule() # pylint: disable=abstract-class-instantiated
def testConcrete(self):
mod = ConcreteModule()
x, scope_name = mod(2.)
self.assertEqual(x, 4.)
self.assertEqual(scope_name, "concrete_module/")
self.assertEqual(get_name_scope(), "")
def get_name_scope():
with ops.name_scope("x") as ns:
return ns[:-2]
class ErrorModuleError(Exception):
pass
class ErrorModule(module.Module):
def __init__(self, call_super, raise_in_constructor=True):
if call_super:
super(ErrorModule, self).__init__()
if raise_in_constructor:
raise ErrorModuleError("Deliberate error!")
def __call__(self):
raise ErrorModuleError("Deliberate error!")
class RecursiveModule(module.Module):
def __init__(self, depth, trainable=True):
super(RecursiveModule, self).__init__(name="badger")
with self.name_scope:
self.child = None
if depth > 1:
self.child = RecursiveModule(depth - 1, trainable=trainable)
self.w = variables.Variable(1.0, trainable=trainable, name="mushroom")
@six.add_metaclass(abc.ABCMeta)
class AbstractModule(module.Module):
@abc.abstractmethod
def __call__(self, x):
pass
class ConcreteModule(AbstractModule):
@module.Module.with_name_scope
def __call__(self, x):
return x ** 2, get_name_scope()
class TreeModule(module.Module):
def __init__(self, name=None):
super(TreeModule, self).__init__(name=name)
self._leaves = []
@module.Module.with_name_scope
def new_leaf(self, name=None):
leaf = TreeModule(name=name)
self._leaves.append(leaf)
return leaf
class ReturnsNameScopeModule(module.Module):
@module.Module.with_name_scope
def alternative_forward(self):
return get_name_scope()
@module.Module.with_name_scope
def __call__(self):
return get_name_scope()
class SubclassedReturnsNameScopeModule(ReturnsNameScopeModule):
@module.Module.with_name_scope
def alternative_alternative_forward(self):
return get_name_scope()
class PropertyThrowsWhenCalledModule(module.Module):
@property
def raise_assertion_error(self):
raise AssertionError
class ModuleOverridingNameScope(ReturnsNameScopeModule):
@property
def name_scope(self):
return ops.name_scope("yolo/")
class ModuleWithFunctionAnnotatedCall(module.Module):
@def_function.function(autograph=False)
@module.Module.with_name_scope
def forward(self):
return get_name_scope()
@def_function.function(autograph=True)
@module.Module.with_name_scope
def forward_ag(self):
return get_name_scope()
class PropertyModule(module.Module):
def __init__(self):
super(PropertyModule, self).__init__()
self._setter_scope_name = None
@property
@module.Module.with_name_scope
def some_property(self):
getter_scope_name = get_name_scope()
return getter_scope_name, self._setter_scope_name
@some_property.setter
@module.Module.with_name_scope
def some_property(self, my_property):
self._setter_scope_name = get_name_scope()
@property
def no_name_scope_property(self):
getter_scope_name = get_name_scope()
return getter_scope_name, self._setter_scope_name
@no_name_scope_property.setter
def no_name_scope_property(self, my_property):
self._setter_scope_name = get_name_scope()
NamedPair = collections.namedtuple("NamedPair", ("first", "second"))
mk_index_dict = lambda v: dict(enumerate(v))
class FlattenTest(parameterized.TestCase, test.TestCase):
@parameterized.parameters(lambda v: NamedPair(*v), list, tuple, mk_index_dict)
def test_flatten(self, container_type):
parent = SimpleModule(container_type=container_type)
child = parent.c
self.assertEqual(
list(parent._flatten(recursive=False, predicate=is_member)),
[parent.a[0], parent.a[1], parent.z])
self.assertEqual(
list(parent._flatten(predicate=is_member)),
[parent.a[0], parent.a[1], parent.z, child.a[0], child.a[1], child.z])
def test_attribute_traversal_key(self):
mod = LayerModule()
self.assertEqual(
mod.variables,
mod._trainable_variables + mod._non_trainable_variables + [mod._bonus])
def test_attributes_to_ignore(self):
class DangerousModule(module.Module):
_TF_MODULE_IGNORED_PROPERTIES = frozenset(itertools.chain(
("dangerous_submodule", "dangerous_variable"),
module.Module._TF_MODULE_IGNORED_PROPERTIES
))
mod = DangerousModule()
mod.dangerous_submodule = module.Module()
mod.dangerous_variable = variables.Variable(1.)
mod.normal_variable = variables.Variable(2.)
self.assertEmpty(mod.submodules)
self.assertLen(mod.variables, 1)
self.assertEqual(mod.variables[0], mod.normal_variable)
def test_with_path(self):
mod = module.Module()
mod.w = variables.Variable(1.)
mod.encoder = module.Module()
mod.encoder.w = [({"k": mod.w}, {"k": mod.w})]
mod.decoder = mod.encoder
state_dict = dict(
mod._flatten(with_path=True, predicate=module._is_variable))
self.assertEqual(state_dict,
{("w",): mod.w,
("encoder", "w", 0, 0, "k"): mod.encoder.w[0][0]["k"],
("encoder", "w", 0, 1, "k"): mod.encoder.w[0][1]["k"],
("decoder", "w", 0, 0, "k"): mod.decoder.w[0][0]["k"],
("decoder", "w", 0, 1, "k"): mod.decoder.w[0][1]["k"]},)
def test_module_discover_layer_variable(self):
m = module.Module()
m.a = layers.Dense(1)
m.b = layers.Dense(2)
# The weights of the layer has not been created yet.
self.assertEmpty(m.variables)
self.assertLen(m.submodules, 2)
inputs = layers.Input((1,))
m.a(inputs)
m.b(inputs)
variable_list = m.variables
self.assertLen(variable_list, 4)
self.assertEqual(variable_list[0], m.a.kernel)
self.assertEqual(variable_list[1], m.a.bias)
self.assertEqual(variable_list[2], m.b.kernel)
self.assertEqual(variable_list[3], m.b.bias)
def test_model_discover_submodule(self):
m = models.Sequential(layers=[layers.Dense(1),
layers.Dense(2)])
self.assertEqual(m.submodules, (m.layers[0], m.layers[1]))
m(layers.Input((1,)))
self.assertLen(m.variables, 4)
class LayerModule(module.Module):
def __init__(self):
super(LayerModule, self).__init__()
self._trainable_variables = [
variables.Variable(1., name="a"),
variables.Variable(2., name="b"),
]
self._non_trainable_variables = [
variables.Variable(3., name="c"),
variables.Variable(4., name="d"),
]
self._bonus = variables.Variable(5., name="e")
@property
def variables(self):
def key_function(name):
indexes = {"_trainable_variables": 0, "_non_trainable_variables": 1}
return indexes.get(name, 2), name
return list(
self._flatten(
predicate=module._is_variable,
attribute_traversal_key=key_function))
class MemberType(object):
"""A simple type to search for."""
pass
class SimpleModule(module.Module):
def __init__(self, create_child=True, container_type=list):
super(SimpleModule, self).__init__()
self.z = MemberType()
self.a = container_type([MemberType(), MemberType()])
if create_child:
self.c = SimpleModule(create_child=False)
is_member = lambda v: isinstance(v, MemberType)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
test.main()
|
|
# This file is part of the ISIS IBEX application.
# Copyright (C) 2012-2016 Science & Technology Facilities Council.
# All rights reserved.
#
# This program is distributed in the hope that it will be useful.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution.
# EXCEPT AS EXPRESSLY SET FORTH IN THE ECLIPSE PUBLIC LICENSE V1.0, THE PROGRAM
# AND ACCOMPANYING MATERIALS ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND. See the Eclipse Public License v1.0 for more details.
#
# You should have received a copy of the Eclipse Public License v1.0
# along with this program; if not, you can obtain a copy from
# https://www.eclipse.org/org/documents/epl-v10.php or
# http://opensource.org/licenses/eclipse-1.0.php
import unittest
from datetime import datetime, timedelta
from hamcrest import *
from mock import Mock
from ArchiverAccess.archive_time_period import ArchiveTimePeriod
from ArchiverAccess.periodic_data_generator import PeriodicDataGenerator
from ArchiverAccess.test_modules.stubs import ArchiverDataStub
class TestPeriodicDataGenerator(unittest.TestCase):
def test_GIVEN_single_initial_values_WHEN_write_values_THEN_first_data_line_is_at_start_time(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
data_generator = self._create_data_generator(expected_start_time, [1], 10)
result = next(data_generator)
assert_that(result[0], is_(expected_start_time))
def test_GIVEN_single_initial_values_WHEN_write_values_THEN_first_value_is_given_value(self):
initial_value_pv1 = 1.23
data_generator = self._create_data_generator(datetime(2017, 1, 1, 1, 2, 3, 0), [initial_value_pv1], 10)
result = next(data_generator)
assert_that(result.values[0], is_(initial_value_pv1))
def test_GIVEN_multiple_initial_values_WHEN_write_values_THEN_values_are_given_values(self):
initial_value_pvs = [1.23, 3.45, 5.67]
data_generator = self._create_data_generator(datetime(2017, 1, 1, 1, 2, 3, 0), initial_value_pvs, 10)
result = next(data_generator)
assert_that(result.values, is_(initial_value_pvs))
def test_GIVEN_initial_values_only_WHEN_write_values_THEN_time_values_are_separated_by_delta_values_are_constant(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 10
data_generator = self._create_data_generator(expected_start_time, [1.23], log_count)
results = []
for value in data_generator:
results.append(value)
assert_that([x[0] for x in results], is_([expected_start_time + timedelta(seconds=delta) for delta in range(log_count)]))
def test_GIVEN_single_change_in_single_values_WHEN_write_values_THEN_value_changes_after_specified_time(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 11
initial_value = 1.23
final_value = -12.24
values = [[expected_start_time + timedelta(seconds=3.5), "pv0", final_value]]
expected_result = [initial_value] * 4 + [final_value] * 7
data_generator = self._create_data_generator(expected_start_time, [initial_value], log_count, values=values)
results = []
for value in data_generator:
results.append(value.values)
assert_that([x[0] for x in results], is_(expected_result))
def test_GIVEN_multiple_changes_in_single_value_some_more_often_than_log_frequency_some_longer_WHEN_write_values_THEN_value_changes_after_specified_time(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 11
initial_value = 1.23
val1 = 6
val2 = 23.2
val3 = -145
val4 = 78.5
val5 = 34.6
values = [
[expected_start_time + timedelta(seconds=3.5), "pv0", val1],
[expected_start_time + timedelta(seconds=3.6), "pv0", val2],
[expected_start_time + timedelta(seconds=4.1), "pv0", val3],
[expected_start_time + timedelta(seconds=6.1), "pv0", val4],
[expected_start_time + timedelta(seconds=7), "pv0", val5]
]
expected_result = [initial_value,
initial_value,
initial_value,
initial_value,
val2,
val3,
val3,
val5,
val5,
val5,
val5]
data_generator = self._create_data_generator(expected_start_time, [initial_value], log_count, values=values)
results = []
for value in data_generator:
results.append(value.values)
assert_that([x[0] for x in results], is_(expected_result))
def test_GIVEN_multiple_changes_in_multiple_values_some_more_often_than_log_frequency_some_longer_WHEN_write_values_THEN_value_changes_after_specified_time(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 11
val0 = 4.3
initial_values = [1.23, 8.4, val0]
val1 = 6
val2 = 23.2
val3 = -145
val4 = 78.5
val5 = 34.6
val6 = 13.6
val7 = 147.6
val8 = 1516.6
values = [
[expected_start_time + timedelta(seconds=3.5), "pv0", val1],
[expected_start_time + timedelta(seconds=3.4), "pv1", val6],
[expected_start_time + timedelta(seconds=3.6), "pv0", val2],
[expected_start_time + timedelta(seconds=4.1), "pv0", val3],
[expected_start_time + timedelta(seconds=6.1), "pv0", val4],
[expected_start_time + timedelta(seconds=7), "pv0", val5],
[expected_start_time + timedelta(seconds=7), "pv1", val7],
[expected_start_time + timedelta(seconds=7.4), "pv1", val8]
]
expected_result = [initial_values,
initial_values,
initial_values,
initial_values,
[val2, val6, val0],
[val3, val6, val0],
[val3, val6, val0],
[val5, val7, val0],
[val5, val8, val0],
[val5, val8, val0],
[val5, val8, val0]]
data_generator = self._create_data_generator(expected_start_time, initial_values, log_count, values=values)
results = []
for value in data_generator:
results.append(value.values)
assert_that(results, is_(expected_result))
def test_GIVEN_string_value_WHEN_write_values_THEN_string_value_used(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 11
initial_value = 1.23
final_value = "Disconnected"
values = [[expected_start_time + timedelta(seconds=3.5), "pv0", final_value]]
expected_result = [initial_value] * 4 + [final_value] * 7
data_generator = self._create_data_generator(expected_start_time, [initial_value], log_count, values=values)
results = []
for value in data_generator:
results.append(value.values)
assert_that([x[0] for x in results], is_(expected_result))
def test_GIVEN_time_periods_are_commensurate_WHEN_get_generator_twice_THEN_values_from_second_call_do_not_revert_make_to_inital_values(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 11
initial_value = 1.23
final_value = -12.24
values = [
[
[expected_start_time + timedelta(seconds=3.5), "pv0", final_value]
],
[]
]
data_generator, pv_names = self._set_up_data_generator([initial_value], values)
initial_results = []
first_time_period_request = ArchiveTimePeriod(expected_start_time, timedelta(seconds=1), log_count)
for value in data_generator.get_generator(pv_names, first_time_period_request):
initial_results.append(value.values)
results = []
for value in data_generator.get_generator(pv_names, ArchiveTimePeriod(first_time_period_request.end_time, timedelta(seconds=1), 1)):
results.append(value.values)
assert_that(results, is_([[final_value]]))
def test_GIVEN_time_periods_are_not_commensurate_WHEN_get_generator_twice_THEN_values_from_second_call_starts_from_second_call_to_initial_values(self):
expected_start_time = datetime(2017, 1, 1, 1, 2, 3, 0)
log_count = 6
initial_value = 1.23
final_value = -12.24
values = [
[
[expected_start_time + timedelta(seconds=3.5), "pv0", final_value]
],
[]
]
data_generator, pv_names = self._set_up_data_generator([initial_value], values)
initial_results = []
first_time_period_request = ArchiveTimePeriod(expected_start_time, timedelta(seconds=1), log_count)
for value in data_generator.get_generator(pv_names, first_time_period_request):
initial_results.append(value.values)
results = []
for value in data_generator.get_generator(pv_names, ArchiveTimePeriod(first_time_period_request.end_time + timedelta(seconds=10), timedelta(seconds=1), 1)):
results.append(value.values)
assert_that(results, is_([[initial_value]]))
def _create_data_generator(self, expected_start_time, initial_pv_values, log_count, values=None, archiver_throw_exception_on_initial_values=False):
data_generator, pv_names = self._set_up_data_generator(initial_pv_values, values, archiver_throw_exception_on_initial_values)
return data_generator.get_generator(
pv_names,
ArchiveTimePeriod(expected_start_time, timedelta(seconds=1), log_count))
def _set_up_data_generator(self, initial_pv_values, values=None, archiver_throw_exception_on_initial_values=False):
pv_names = ["pv{0}".format(i) for i in range(len(initial_pv_values))]
initial_pv_values_dict = {}
for name, val in zip(pv_names, initial_pv_values):
initial_pv_values_dict[name] = val
archiver_data = ArchiverDataStub(initial_pv_values_dict, values)
if archiver_throw_exception_on_initial_values:
archiver_data.initial_values = Mock(side_effect=ValueError())
data_generator = PeriodicDataGenerator(archiver_data)
return data_generator, pv_names
|
|
import hashlib
import binascii
import random
import time
from Crypto.Cipher import AES
from Crypto.Util import Counter
rng = random.SystemRandom()
def init_key_generation(keylengthbits):
if keylengthbits < 8:
keylengthbits = 8
elif keylengthbits % 8 != 0:
keylengthbits += ( 8 - keylengthbits % 8)
key = ""
iters = keylengthbits / 8
while iters > 0:
key += format(rng.randint(0,255), '02x')
iters -= 1
return key
def do_xor_on_hex(hexstring1, hexstring2):
v1 = 0
v2 = 0
index = 0
hexstr1nums = []
hexstr2nums = []
finalnums = []
xorlen = len(hexstring1)
finalxor = ""
if xorlen != len(hexstring2) or xorlen % 2 != 0:
print "ERROR!"
return None
while v1 <= (xorlen - 2):
hexstr1nums.append(int(hexstring1[(v1):(v1+2)],16))
v1 += 2
while v2 <= (xorlen - 2):
hexstr2nums.append(int(hexstring2[(v2):(v2+2)],16))
v2 += 2
while index < (xorlen / 2):
finalnums.append(hexstr1nums[index] ^ hexstr2nums[index])
index += 1
for i in finalnums:
finalxor += format(i, '02x')
return finalxor
def generate_header_file(masterpassword, flen, fname, hver):
filelength = str(flen)
headername = str(fname) + ".header"
headerversion = format(hver, '02x')
if len(headerversion) != 2:
print "BAD HVER, ABORT"
return None
headercontents = ""
salt_to_use = init_key_generation(128)
#print "Salt used: " + salt_to_use
master_key = init_key_generation(512)
#print "Master key: " + master_key
encrypted_key = do_xor_on_hex(master_key, binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterpassword, salt_to_use, 100000)))
#print "Encrypted key: " + encrypted_key
headerfile = open(headername, "wb")
headercontents = headerversion + salt_to_use + encrypted_key + filelength
headerfile.write(headercontents)
headerfile.close()
return master_key, salt_to_use
def read_header_file(masterpassword, fname):
headername = str(fname) + ".header"
headerfile = open(headername, "rb")
totalheader = headerfile.read()
header_version = int(totalheader[0:2],16)
header_salt = totalheader[2:34]
header_encrypted_key = totalheader[34:162]
header_master_key = do_xor_on_hex(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterpassword, header_salt, 100000)), header_encrypted_key)
header_length = totalheader[162:]
print "Salt used: " + header_salt
print "Master key: " + header_master_key
print "Encrypted key: " + header_encrypted_key
print "File length: " + header_length
headerfile.close()
return header_master_key, header_length, header_version, header_salt
def edit_header_file(oldpassword, newpassword, fname):
headername = str(fname) + ".header"
headerfile = open(headername, "rb")
totalheader = headerfile.read()
headerfile.close()
newheadercontents = ""
header_version = totalheader[0:2]
header_salt = totalheader[2:34]
header_encrypted_key = totalheader[34:162]
header_master_key = do_xor_on_hex(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', oldpassword, header_salt, 100000)), header_encrypted_key)
header_new_encrypted_key = do_xor_on_hex(binascii.hexlify(hashlib.pbkdf2_hmac('sha512', newpassword, header_salt, 100000)), header_master_key)
header_length = totalheader[162:]
newheadercontents = header_version + header_salt + header_new_encrypted_key + header_length
headerfile = open(headername, "wb")
headerfile.write(newheadercontents)
headerfile.close()
return "Done"
def hex_transpose(hexstr):
v1 = 0
newhex1 = ""
newhex2 = ""
hexlen = len(hexstr)
while v1 < (hexlen):
newhex1 += hexstr[v1+1] + hexstr[v1]
v1 += 2
newhex2 = newhex1[(hexlen/2):] + newhex1[0:(hexlen/2)]
return newhex2
def advance_cipher(inithash):
new_hash = hashlib.sha512(inithash).hexdigest()
transposed_hash = hex_transpose(new_hash)
hash_of_hash = hashlib.sha512(transposed_hash).hexdigest()
return new_hash, hash_of_hash
def advance_cipher_2(inithash, ptextfb):
new_hash = hashlib.sha512(inithash).hexdigest()
transposed_hash = hex_transpose(new_hash) + hex_transpose(ptextfb)
hash_of_hash = hashlib.sha512(transposed_hash).hexdigest()
return new_hash, hash_of_hash
def encrypt_file_1(filename, masterpassword):
output_filename = filename + ".crypt"
file_to_encrypt = open(filename, "rb")
file_to_output = open(output_filename, "wb")
file_to_output_hex = ""
current_key_to_xor = ""
startlen = 0
file_to_encrypt_bin = file_to_encrypt.read()
file_to_encrypt_hex = binascii.hexlify(file_to_encrypt_bin)
file_length = len(file_to_encrypt_hex)
masterkey, uss = generate_header_file(masterpassword, file_length, filename, 1)
file_padding = 128 - (file_length % 128)
while file_padding > 0:
if file_padding >= 2:
file_to_encrypt_hex += format(rng.randint(0,255), '02x')
file_padding -= 2
else:
file_to_encrypt_hex += "0"
file_padding -= 1
file_checksum = hashlib.sha512(file_to_encrypt_hex[0:file_length]).hexdigest()
file_to_encrypt_hex += file_checksum
file_length = len(file_to_encrypt_hex)
file_to_encrypt.close()
print "Times to iterate (W/chk): " + str(file_length / 128)
print "Encrypted file checksum: ", file_checksum
times_to_iterate = file_length / 128
times_to_iterate_total = times_to_iterate
current_key = masterkey
chunk_list = []
while times_to_iterate > 0:
#print "START KEY: ", current_key
current_key, current_key_to_xor = advance_cipher(current_key)
#print "KEY AFTER CA: ", current_key
#print "KEY TO XOR: ", current_key_to_xor
chunk_list.append(do_xor_on_hex(file_to_encrypt_hex[startlen:startlen+128],current_key_to_xor))
startlen += 128
times_to_iterate -= 1
if times_to_iterate % 1000 == 0:
print "Encryption Progress: ", (times_to_iterate_total - times_to_iterate) / float(times_to_iterate_total) * 100.0, "%"
#print file_to_output_hex
file_to_output_hex = "".join(chunk_list)
file_to_output.write(binascii.unhexlify(file_to_output_hex))
file_to_output.close()
dtk, dtl, dtv, uss = read_header_file(masterpassword, filename)
if dtv == 1:
is_correct = decrypt_file_1(filename, True, dtk, dtl)
if is_correct == "File decrypted, checksum OK":
return "Encryption Done and Verified"
else:
return "ERROR!"
else:
return "BAD HEADER ERROR!"
def decrypt_file_1(filename, testmode, decryption_master_key, decryption_length):
filename_to_decrypt = filename + ".crypt"
file_to_decrypt = open(filename_to_decrypt, "rb")
file_to_decrypt_bin = file_to_decrypt.read()
file_to_decrypt_hex = binascii.hexlify(file_to_decrypt_bin)
file_to_decrypt.close()
file_to_decrypt_output_hex = ""
real_file_to_decrypt_output_hex = ""
decrypt_checksum = ""
checksum_ok = False
current_key_to_xor_decrypt = ""
startlen_decrypt = 0
times_to_iterate_decrypt = len(file_to_decrypt_hex) / 128
times_to_iterate_decrypt_total = times_to_iterate_decrypt
current_key_decrypt = decryption_master_key
chunk_list_decrypt = []
while times_to_iterate_decrypt > 0:
current_key_decrypt, current_key_to_xor_decrypt = advance_cipher(current_key_decrypt)
chunk_list_decrypt.append(do_xor_on_hex(file_to_decrypt_hex[startlen_decrypt:startlen_decrypt+128],current_key_to_xor_decrypt))
startlen_decrypt += 128
times_to_iterate_decrypt -= 1
if times_to_iterate_decrypt % 1000 == 0:
print "Decryption Progress: ", (times_to_iterate_decrypt_total - times_to_iterate_decrypt) / float(times_to_iterate_decrypt_total) * 100.0, "%"
file_to_decrypt_output_hex = "".join(chunk_list_decrypt)
decrypt_checksum = file_to_decrypt_output_hex[-128:]
real_file_to_decrypt_output_hex = file_to_decrypt_output_hex[0:int(decryption_length)]
print "Decrypted file checksum (read): ", decrypt_checksum
print "Decrypted file checksum (calculated): ", hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest()
if decrypt_checksum == hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest():
checksum_ok = True
if testmode == False:
file_to_decrypt_output = open(filename, "wb")
file_to_decrypt_output.write(binascii.unhexlify(real_file_to_decrypt_output_hex))
file_to_decrypt_output.close()
if checksum_ok == True:
return "File decrypted, checksum OK"
else:
return "Wrong key, corrupted file or not a valid container"
def encrypt_file_2(filename, masterpassword):
output_filename = filename + ".crypt"
file_to_encrypt = open(filename, "rb")
file_to_output = open(output_filename, "wb")
file_to_output_hex = ""
current_key_to_xor = ""
startlen = 0
file_to_encrypt_bin = file_to_encrypt.read()
file_to_encrypt_hex = binascii.hexlify(file_to_encrypt_bin)
file_length = len(file_to_encrypt_hex)
masterkey, iv = generate_header_file(masterpassword, file_length, filename, 2)
file_padding = 128 - (file_length % 128)
while file_padding > 0:
if file_padding >= 2:
file_to_encrypt_hex += format(rng.randint(0,255), '02x')
file_padding -= 2
else:
file_to_encrypt_hex += "0"
file_padding -= 1
file_checksum = hashlib.sha512(file_to_encrypt_hex[0:file_length]).hexdigest()
file_to_encrypt_hex += file_checksum
file_length = len(file_to_encrypt_hex)
file_to_encrypt.close()
print "Times to iterate (W/chk): " + str(file_length / 128)
print "Encrypted file checksum: ", file_checksum
times_to_iterate = file_length / 128
times_to_iterate_total = times_to_iterate
current_key = masterkey
iv_hash = hashlib.sha512(iv).hexdigest()
current_plaintext_hash_feedback = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterkey, iv_hash, 100000))
chunk_list = []
while times_to_iterate > 0:
#print "START KEY: ", current_key
current_key, current_key_to_xor = advance_cipher_2(current_key, current_plaintext_hash_feedback)
#print "KEY AFTER CA: ", current_key
#print "KEY TO XOR: ", current_key_to_xor
current_plaintext_chunk = file_to_encrypt_hex[startlen:startlen+128]
current_plaintext_hash_feedback = hashlib.sha512(current_plaintext_chunk).hexdigest()
chunk_list.append(do_xor_on_hex(file_to_encrypt_hex[startlen:startlen+128],current_key_to_xor))
startlen += 128
times_to_iterate -= 1
if times_to_iterate % 1000 == 0:
print "Encryption Progress: ", (times_to_iterate_total - times_to_iterate) / float(times_to_iterate_total) * 100.0, "%"
#print file_to_output_hex
file_to_output_hex = "".join(chunk_list)
file_to_output.write(binascii.unhexlify(file_to_output_hex))
file_to_output.close()
dtk, dtl, dtv, div = read_header_file(masterpassword, filename)
if dtv == 2:
is_correct = decrypt_file_2(filename, True, dtk, dtl, div)
if is_correct == "File decrypted, checksum OK":
return "Encryption Done and Verified"
else:
return "ERROR!"
else:
return "BAD HEADER ERROR!"
def decrypt_file_2(filename, testmode, decryption_master_key, decryption_length, decryption_iv):
filename_to_decrypt = filename + ".crypt"
file_to_decrypt = open(filename_to_decrypt, "rb")
file_to_decrypt_bin = file_to_decrypt.read()
file_to_decrypt_hex = binascii.hexlify(file_to_decrypt_bin)
file_to_decrypt.close()
file_to_decrypt_output_hex = ""
real_file_to_decrypt_output_hex = ""
decrypt_checksum = ""
checksum_ok = False
current_key_to_xor_decrypt = ""
startlen_decrypt = 0
times_to_iterate_decrypt = len(file_to_decrypt_hex) / 128
times_to_iterate_decrypt_total = times_to_iterate_decrypt
current_key_decrypt = decryption_master_key
decryption_iv_hash = hashlib.sha512(decryption_iv).hexdigest()
current_plaintext_hash_feedback_decipher = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', decryption_master_key, decryption_iv_hash, 100000))
chunk_list_decrypt = []
while times_to_iterate_decrypt > 0:
current_key_decrypt, current_key_to_xor_decrypt = advance_cipher_2(current_key_decrypt, current_plaintext_hash_feedback_decipher)
current_deciphered_chunk = do_xor_on_hex(file_to_decrypt_hex[startlen_decrypt:startlen_decrypt+128],current_key_to_xor_decrypt)
chunk_list_decrypt.append(current_deciphered_chunk)
current_plaintext_hash_feedback_decipher = hashlib.sha512(current_deciphered_chunk).hexdigest()
startlen_decrypt += 128
times_to_iterate_decrypt -= 1
if times_to_iterate_decrypt % 1000 == 0:
print "Decryption Progress: ", (times_to_iterate_decrypt_total - times_to_iterate_decrypt) / float(times_to_iterate_decrypt_total) * 100.0, "%"
file_to_decrypt_output_hex = "".join(chunk_list_decrypt)
decrypt_checksum = file_to_decrypt_output_hex[-128:]
real_file_to_decrypt_output_hex = file_to_decrypt_output_hex[0:int(decryption_length)]
print "Decrypted file checksum (read): ", decrypt_checksum
print "Decrypted file checksum (calculated): ", hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest()
if decrypt_checksum == hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest():
checksum_ok = True
if testmode == False:
file_to_decrypt_output = open(filename, "wb")
file_to_decrypt_output.write(binascii.unhexlify(real_file_to_decrypt_output_hex))
file_to_decrypt_output.close()
if checksum_ok == True:
return "File decrypted, checksum OK"
else:
return "Wrong key, corrupted file or not a valid container"
def encrypt_file_3(filename, masterpassword, encmethod):
output_filename = filename + ".crypt"
file_to_encrypt = open(filename, "rb")
file_to_output = open(output_filename, "wb")
file_to_output_hex = ""
startlen = 0
file_to_encrypt_bin = file_to_encrypt.read()
file_to_encrypt_hex = binascii.hexlify(file_to_encrypt_bin)
file_length = len(file_to_encrypt_hex)
masterkey, iv = generate_header_file(masterpassword, file_length, filename, encmethod)
file_padding = 32 - (file_length % 32)
while file_padding > 0:
if file_padding >= 2:
file_to_encrypt_hex += format(rng.randint(0,255), '02x')
file_padding -= 2
else:
file_to_encrypt_hex += "0"
file_padding -= 1
file_checksum = hashlib.sha512(file_to_encrypt_hex[0:file_length]).hexdigest()
file_to_encrypt_hex += file_checksum
file_length = len(file_to_encrypt_hex)
file_to_encrypt.close()
print "Times to iterate (W/chk): " + str(file_length / 32)
print "Encrypted file checksum: ", file_checksum
times_to_iterate = file_length / 32
times_to_iterate_total = times_to_iterate
current_key = hashlib.sha256(masterkey).digest()
iv_hash = hashlib.sha512(iv+masterkey).hexdigest()
real_iv_to_use = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', masterkey, iv_hash, 10))
real_iv_to_use = binascii.unhexlify(real_iv_to_use[:32])
chunk_list = []
if encmethod == 3:
cipher = AES.new(current_key, AES.MODE_CBC, real_iv_to_use)
elif encmethod == 4:
icv = int(binascii.hexlify(real_iv_to_use),16)
ctr = Counter.new(128, initial_value=icv)
cipher = AES.new(current_key, AES.MODE_CTR, counter=ctr)
elif encmethod == 5:
cipher = AES.new(current_key, AES.MODE_CFB, real_iv_to_use)
elif encmethod == 6:
cipher = AES.new(current_key, AES.MODE_OFB, real_iv_to_use)
while times_to_iterate > 0:
current_plaintext_chunk = binascii.unhexlify(file_to_encrypt_hex[startlen:startlen+32])
chunk_list.append(binascii.hexlify(cipher.encrypt(current_plaintext_chunk)))
startlen += 32
times_to_iterate -= 1
if times_to_iterate % 15000 == 0:
print "Encryption Progress: ", (times_to_iterate_total - times_to_iterate) / float(times_to_iterate_total) * 100.0, "%"
#print file_to_output_hex
file_to_output_hex = "".join(chunk_list)
file_to_output.write(binascii.unhexlify(file_to_output_hex))
file_to_output.close()
dtk, dtl, dtv, div = read_header_file(masterpassword, filename)
if dtv == 3 or dtv == 4 or dtv == 5 or dtv == 6:
is_correct = decrypt_file_3(filename, True, dtk, dtl, div, dtv)
if is_correct == "File decrypted, checksum OK":
return "Encryption Done and Verified"
else:
return "ERROR!"
else:
return "BAD HEADER ERROR!"
def decrypt_file_3(filename, testmode, decryption_master_key, decryption_length, decryption_iv, encmethod):
filename_to_decrypt = filename + ".crypt"
file_to_decrypt = open(filename_to_decrypt, "rb")
file_to_decrypt_bin = file_to_decrypt.read()
file_to_decrypt_hex = binascii.hexlify(file_to_decrypt_bin)
file_to_decrypt.close()
file_to_decrypt_output_hex = ""
real_file_to_decrypt_output_hex = ""
decrypt_checksum = ""
checksum_ok = False
startlen_decrypt = 0
times_to_iterate_decrypt = len(file_to_decrypt_hex) / 32
times_to_iterate_decrypt_total = times_to_iterate_decrypt
current_key = hashlib.sha256(decryption_master_key).digest()
iv_hash = hashlib.sha512(decryption_iv+decryption_master_key).hexdigest()
real_iv_to_use = binascii.hexlify(hashlib.pbkdf2_hmac('sha512', decryption_master_key, iv_hash, 10))
real_iv_to_use = binascii.unhexlify(real_iv_to_use[:32])
chunk_list_decrypt = []
if encmethod == 3:
cipher = AES.new(current_key, AES.MODE_CBC, real_iv_to_use)
elif encmethod == 4:
icv = int(binascii.hexlify(real_iv_to_use),16)
ctr = Counter.new(128, initial_value=icv)
cipher = AES.new(current_key, AES.MODE_CTR, counter=ctr)
elif encmethod == 5:
cipher = AES.new(current_key, AES.MODE_CFB, real_iv_to_use)
elif encmethod == 6:
cipher = AES.new(current_key, AES.MODE_OFB, real_iv_to_use)
while times_to_iterate_decrypt > 0:
current_deciphered_chunk = cipher.decrypt(binascii.unhexlify(file_to_decrypt_hex[startlen_decrypt:startlen_decrypt+32]))
chunk_list_decrypt.append(binascii.hexlify(current_deciphered_chunk))
startlen_decrypt += 32
times_to_iterate_decrypt -= 1
if times_to_iterate_decrypt % 15000 == 0:
print "Decryption Progress: ", (times_to_iterate_decrypt_total - times_to_iterate_decrypt) / float(times_to_iterate_decrypt_total) * 100.0, "%"
file_to_decrypt_output_hex = "".join(chunk_list_decrypt)
decrypt_checksum = file_to_decrypt_output_hex[-128:]
real_file_to_decrypt_output_hex = file_to_decrypt_output_hex[0:int(decryption_length)]
print "Decrypted file checksum (read): ", decrypt_checksum
print "Decrypted file checksum (calculated): ", hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest()
if decrypt_checksum == hashlib.sha512(real_file_to_decrypt_output_hex).hexdigest():
checksum_ok = True
if testmode == False:
file_to_decrypt_output = open(filename, "wb")
file_to_decrypt_output.write(binascii.unhexlify(real_file_to_decrypt_output_hex))
file_to_decrypt_output.close()
if checksum_ok == True:
return "File decrypted, checksum OK"
else:
return "Wrong key, corrupted file or not a valid container"
print "Encryption Test Program r3.0"
print "by fabrizziop"
print "MIT licence"
what_to_do = int(raw_input("1: Encrypt, 2: Decrypt , 3: Change Password: "))
if what_to_do == 1:
mpas = str(raw_input("Master Password: "))
fnm = str(raw_input("File Name: "))
print "Methods:"
print "For AES methods, key is SHA-256 of master key, IV is 100k rounds SHA-512-HMAC PKBDF2 of master key and SHA-512 of master key+salt"
print "1: SHA512 stream, transpose, SHA512 again, then XOR"
print "2: SHA512 stream, transpose, append transposed SHA512 of plaintext chunk, SHA512 again, then XOR"
print "3: AES-256-CBC"
print "4: AES-256-CTR"
print "5: AES-256-CFB"
print "6: AES-256-OFB"
method = int(raw_input("Pick a method: "))
if method == 1:
print encrypt_file_1(fnm, mpas)
elif method == 2:
print encrypt_file_2(fnm, mpas)
elif method == 3 or method == 4 or method == 5 or method == 6:
print encrypt_file_3(fnm, mpas, method)
elif what_to_do == 2:
mpas = str(raw_input("Master Password: "))
fnm = str(raw_input("File Name: "))
dmk, dl, dv, dciv = read_header_file(mpas, fnm)
if dv == 1:
print "Method: SHA512 stream, transpose, SHA512 again, then XOR"
print decrypt_file_1(fnm, False, dmk, dl)
elif dv == 2:
print "Method: SHA512 stream, transpose, append transposed SHA512 of plaintext chunk, SHA512 again, then XOR"
print decrypt_file_2(fnm, False, dmk, dl, dciv)
elif dv == 3:
print "3: AES-256-CBC"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
elif dv == 4:
print "3: AES-256-CTR"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
elif dv == 5:
print "3: AES-256-CFB"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
elif dv == 6:
print "3: AES-256-OFB"
print decrypt_file_3(fnm, False, dmk, dl, dciv, dv)
else:
print "FILE NOT COMPATIBLE"
elif what_to_do == 3:
opas = str(raw_input("Old Password: "))
npas = str(raw_input("New Password: "))
fnm = str(raw_input("File Name: "))
print edit_header_file(opas, npas, fnm)
time.sleep(3)
|
|
# -*- coding: utf-8 -*-
'''Kaushik Tandon
July 21 2018 - August 13 2018
This program scrapes Wikipedia articles related to Artificial Intelligence. The goal is to build a knowledge graph
of topics related to AI and categorize them using The Brane's Knowledge Classification System of tags. This program
successfully creates nodes and links in a CSV file with a false positive rate of less than 10%.
'''
import requests
import csv
from BeautifulSoup import *
import sys
import codecs
import re
import time
reload(sys)
sys.setdefaultencoding('utf8')
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
#Input files
database = 'database.csv' #Entire DB - currently is the August backup
terms_to_collect_file = 'database_terms.csv' #Terms to gather table
avoid_terms_file = 'avoid.txt' #Terms to avoid table
avoid_categories_file = 'avoid_categories.txt' #categories that should not be scraped
#Output files
category_file = 'categories.txt' #Generated file of categories scraped
created_CSV_file = 'scrape.csv' #Created file after scraping
#Create a represesentation of a category in order to easily store and access important data
class Category:
def __init__(self,name,url,sub_categories,linked_pages,level,category_num):
self.name = name
self.url = url;
self.sub_categories = sub_categories
self.linked_pages = linked_pages;
self.level = level
self.category_num = category_num
def getURL():
''' Ask the user for which category url to scrape - default is AI category
Returns:
URL as string
'''
url = raw_input("Enter URL of wikipedia category page to scrape (or enter for default)")
if len(url) < 2 or 'wikipedia' not in url:
url = 'https://en.wikipedia.org/wiki/Category:Artificial_intelligence'
#url = 'https://en.wikipedia.org/wiki/Category:Game_artificial_intelligence'
return url
def getLinksFromCategoryPage(page):
''' Given a category page, this method can extract the pages and the subcategories on each page
Example: https://en.wikipedia.org/wiki/Category:Artificial_intelligence should return 2 arrays, one with 326 pages and one with 37 category titles
Args:
page: The URL of the page to extract pages/subcategories from
Returns:
Two arrays - one with list of page urls, one with list of category urls
'''
#Page must be of form Category:Name
pages = []
sub_categories = []
soup_html = getHTML(page)
#Extract pages
a = soup_html.findAll('div',{'class': 'mw-category-group'})
for temp in a:
pageNames = extractPageNames(temp)
for pageName in pageNames:
ind1 = pageName.find('(')
ind2 = pageName.find('P')
ind3 = pageName.find("C")
ind4 = pageName.find(')')
num = bool(re.search(r'\d', pageName)) #Number in pageName
#Trying to catch pages of type (5 C, 40 P)
if(num and ind1 >= 0 and ind4 > 0 and (ind2 > 0 or ind3 > 0)):
continue
#Remove weird characters
pageName = ''.join([i if ord(i) < 128 else '' for i in pageName])
if(len(pageName) > 0):
pages.append('https://en.wikipedia.org/wiki/' + str(pageName.strip()))
#Check for additional pages
c = soup_html.find('div',{'id': 'mw-pages'})
if(c != None and len(c.findAll('a')) > 2):
elemToCheck = c.findAll('a')[1]
if(str(elemToCheck.text).strip().lower() == 'next page'):
more_page = 'https://en.wikipedia.org' + str(elemToCheck.get('href'))
additional_pages = extractAdditionalPages(more_page)
for page in additional_pages:
pages.append(page)
#Look for subcategories
b = soup_html.findAll('a',{'class': 'CategoryTreeLabel CategoryTreeLabelNs14 CategoryTreeLabelCategory'})
for sub in b:
sub = str(sub)
index = sub.find("Category:")
name = sub[index:sub.find('"',index)]
sub_categories.append('https://en.wikipedia.org/wiki/' + name.strip())
return pages,sub_categories
def extractAdditionalPages(page):
''' Helper method for getLinksFromCategoryPage() to handle pages which have more than 200 pages linked
Example url: https://en.wikipedia.org/w/index.php?title=Category:Artificial_intelligence&pagefrom=Leaf+Project%0AThe+Leaf+%28AI%29+Project#mw-pages
Args:
page: URL of 'next page' category page being scraped
Returns:
List of urls of pages in category on specific page
'''
additional_pages = list()
soup_html = getHTML(page)
a = soup_html.findAll('div',{'class': 'mw-category-group'})
for temp in a:
pageNames = extractPageNames(temp)
for pageName in pageNames:
#Trying to catch pages of type (5 C, 40 P) and ignore them
ind1 = pageName.find('(')
ind2 = pageName.find('P')
ind3 = pageName.find("C")
ind4 = pageName.find(')')
num = bool(re.search(r'\d', pageName))
if(num and ind1 >= 0 and ind4 > 0 and (ind2 > 0 or ind3 > 0)):
continue
pageName = ''.join([i if ord(i) < 128 else '' for i in pageName])
if(len(pageName) > 0):
additional_pages.append('https://en.wikipedia.org/wiki/' + str(pageName.strip()))
return additional_pages
def getHTML(url):
''' Uses BeautifulSoup to get the HTML for a page
Args:
url: URL of page to get HTML for
Returns:
Beautiful Soup object with HTML
'''
try:
r = requests.get(url)
return BeautifulSoup(r.text)
except:
print("Couldn't get HTML for: " + url)
def getTitle(soup_html):
''' Uses BeautifulSoup html to get the title of the article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
title of article or "Error"
'''
if(len(soup_html.findAll("h1", {"id": "firstHeading"})) == 0):
return "Error"
txt = soup_html.findAll("h1", {"id": "firstHeading"})[0].getText()
txt = ''.join([i if ord(i) < 128 else '-' for i in txt])
return txt
def extractSeeAlso(soup_html):
''' Uses BeautifulSoup html to get the see also categories from a Wikipedia article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
list of names of articles in the see also category (or empty list)
'''
seeAlso = list()
section = soup_html.find('span', id='See_also')
if section != None:
wrongUL = True
section = section.parent.findNext('ul')
count = 0
while(wrongUL):
count = count + 1
for litag in section.findAll('a', href=True):
if litag.get('href') == None and wrongUL:
continue
elif 'wiki' not in str(litag.get('href')) and wrongUL:
continue
else:
wrongUL = False
name = litag.text
if name == None:
continue
name = str(name).strip()
if('page does not exist' in name):
continue
if name in seeAlso:
continue
else:
seeAlso.append(name)
if(wrongUL):
section = section.parent.findNext('ul')
if(count == 5):
break
return seeAlso
def extractCategories(soup_html):
''' Uses BeautifulSoup html to get the categories of a Wikipedia article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
list of names of categories (or empty list)
'''
categories = []
a = soup_html.find('div',{'class': 'mw-normal-catlinks'})
if a != None:
for litag in a.findAll('li'):
categories.append(str(litag.text))
return categories
def extractReferences(soup_html):
''' Uses BeautifulSoup html to get the references of a Wikipedia article
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
list of names of references (or empty list)
'''
references = []
a = soup_html.find('ol',{'class': 'references'})
if a != None:
for litag in a.findAll('li'):
references.append(str(litag.text))
return references
def isStub(soup_html):
''' Uses BeautifulSoup html to determine whether Wikipedia article is a stub
Args:
soup_html: Beautiful Soup object with HTML (returned by getHTML())
Returns:
True if article is a stub (should be skipped)
'''
a = soup_html.find('table',{'class': 'metadata plainlinks stub'})
if a != None:
return True
return False
def loadAvoidTerms():
''' Load the terms to avoid table from a predefined text file
Returns:
list of terms to avoid (lowercase)
'''
with open(avoid_terms_file) as f:
content = f.readlines()
content = [x.strip().lower() for x in content]
return content
def loadAvoidCategories():
''' Load the categories to avoid from a predefined text file
Returns:
list of categories to avoid (lowercase)
'''
with open(avoid_categories_file) as f:
content = f.readlines()
content = [x.strip().lower() for x in content]
return content
def loadGatherTerms():
''' Load the terms to gather table from a predefined csv file
Returns:
dictionary with key being lower case word (and plural versions) and value being the database ID
'''
terms = dict()
with open(terms_to_collect_file) as f:
for line in f.readlines():
words = line.split(',')
temp_id = str(words[0])
for word in words:
word = word.strip().lower()
word2 = word + 's'
word3 = word + 'es'
if(len(word) > 0 and (word[0] < '0' or word[0] > '9')):
terms[word] = temp_id
terms[word2] = temp_id
terms[word3] = temp_id
return terms
def splitAndLower(words):
''' Returns a list of words ignoring parentheses and splitting to lowercase
Args:
words: list of words to handle
Returns:
list of words where each word is lowercase
'''
words = words.replace("("," ")
words = words.replace(")"," ")
words = words.lower().strip()
all_words = words.split(" ")
for i in range(len(all_words)):
all_words[i] = all_words[i].strip()
return all_words
def numCapitalsInTitle(title):
''' Determine the number of capitals in an article title
Args:
title: article title to check
Returns:
integer number of capitals in the title
'''
title = title.replace("("," ").replace(")"," ").strip()
all_words = title.split(" ")
numCap = 0
for word in all_words:
if len(word) > 0 and word[0].isupper():
numCap = numCap + 1
return numCap
def validArticleTitle(article_title,avoid_terms,gather_terms):
''' Determine if a Wikipedia article title is valid, or if the article should be skipped
Args:
article_title: title to check
avoid_terms: list of terms to avoid
gather_terms: dict of terms to gather
Returns:
True if article title is valid
'''
#check for partial match
words_in_title = splitAndLower(article_title)
for word in words_in_title:
if word.lower() in avoid_terms:
return False
#All individual words cannot be capital
allCapital = True
individualWords = article_title.split(" ")
if(len(individualWords) > 1):
for word in individualWords:
word = word.strip()
if word[0].islower():
allCapital = False
if word.lower() in gather_terms or word[:-1].lower() in gather_terms or (word+'s').lower() in gather_terms or (word+'es').lower() in gather_terms:
return True
if word[0] == '(' and word[1].islower():
allCapital = False
if(allCapital):
return False
#avoid_terms only contains lower case, so convert article_title to lower case for checking
article_title = article_title.lower()
#check for full title
if article_title in avoid_terms:
return False
#check for plural title
if article_title + 's' in avoid_terms or article_title + 'es' in avoid_terms:
return False
return True
def validCategoryName(name,invalidNames):
''' Determines whether a category name is invalid and should be scraped
Args:
name: string name of category
invalidNames: list of invalid names loaded from file
Returns:
True if valid name, False if not
'''
invalidWords = ['researchers','video games','competitions','comic','film','history','fiction']
if name in invalidNames:
return False
for word in invalidWords:
if word in name.lower() or word + 's' in invalidWords:
return False
return True
#This is needed since the getText() method in beautiful soup returns some messy data here
def extractTextFromParagraph(paragraph):
''' Extract actual text from a paragraph element
Args:
paragraph: Beautiful Soup paragraph element
Returns:
string containing text in paragraph
'''
paragraph = str(paragraph)
string = ''
i = 0
while(i < len(paragraph)):
c = str(paragraph[i])
#Skip until end of tag
if c == '<':
i = paragraph.find('>',i)
elif c == '&':
if((paragraph.find(';',i+1)) != -1):
i = paragraph.find(';',i+1)
#I actually want the 2nd occurence, else I get left with the source number. However there may be a legitimate ;
if((paragraph.find(';',i+1)) != -1):
i = paragraph.find(';',i+1)
#Skip until end of bracket
elif c == '[' and paragraph.find(']',i) != -1:
i = paragraph.find(']',i)
#Good character
else:
string += c;
i = i + 1
#Replace all weird characters (mainly hyphens)
string =''.join([i if ord(i) < 128 else '-' for i in string])
return string.replace("--","-").strip()
def extractLinksFromParagraph(paragraph):
''' Extract any links in the paragraph in order to check for matches later
Args:
paragraph: Beautiful Soup paragraph element
Returns:
list of links in the paragraph
'''
titles = list()
a = (paragraph.findAll('a'))
for link in a:
link = str(link)
if('href' not in link):
continue
ind = link.find('>') + 1
if(ind == 0):
continue
text = link[ind:link.find('<',ind)]
titles.append(text.strip())
return titles
def extractPageNames(tags):
''' Helper method to extract the list of pages from a category page
Args:
tags: Beautiful Soup div element
Returns:
List of names of pages
'''
names = []
tags = str(tags)
index = tags.find('>',tags.find('title='))
while index != -1 and index < len(tags) and tags.find('<',index) != -1:
names.append(tags[index+1:tags.find('<',index)])
index = tags.find('>',tags.find('title=',index))
return names
def getPotentialFirstNoun(paragraph, article_title):
''' Extract potential nouns to look at from the paragraph of the Wikipedia article
Args:
paragraph: Wikipedia article first paragraph
article_title: title of Wikipedia article
Returns:
True if article title is valid
'''
nouns = list()
first_sentence = paragraph[0:paragraph.find('.')]
second_half = first_sentence#[len(article_title) + 1:]
if(second_half == None or len(second_half) <= 1):
return nouns
if(second_half[0] == '('):
second_half = second_half[second_half.find(')') + 2:]
if('(' in second_half):
second_half = second_half[0:second_half.find('(')] + second_half[second_half.find(')')+1:]
words = second_half.split(' ')
ind = 0
for word in words:
if ('-' in word and 'human' not in word and ('machine' not in word or 'computer' not in word)):
#words.remove(word)
#word1 = word.split('-')[0]
#word2 = word.split('-')[1]
#words.insert(ind,word1)
#words.insert(ind,word2)
continue
elif(',' in word):
#fix comma
words.remove(word)
word = word[:-1]
words.insert(ind,word)
ind = ind + 1
emptyList = list()
#Don't bother returning anything important since these verbs aren't there
if 'is' not in words and 'are' not in words and 'refer' not in words and 'refers' not in words and 'consist' not in words and 'consists' not in words and 'was' not in words and 'has' not in words:
return emptyList
for i in range(len(words)):
if(i <= 25):
ind1 = words[i].find('(')
ind2 = words[i].find(')')
if(ind1 != -1 and ind2 != -1):
continue
#nouns.append(words[i][ind1+1:ind2])
else:
nouns.append(words[i].strip())
#Return 1 word and 2 word phrases since some nouns in terms to gather table are 2 words
if(len(words) > 2):
for i in range(len(words) - 1):
if(i <= 25):
nouns.append(words[i].strip() + " " + words[i+1].strip())
return nouns
#Return's ID or -1 if title matches term
def database_match(article_title):
''' Determine if a Wikipedia article title is already in the database
Args:
article_title: title to check
Returns:
String ID if in database, '-1' if not
'''
#First load the terms
with open(database, 'r+') as f:
reader = csv.reader(f)
for row in reader:
database_id = row[0]
value = row[4]
if article_title.lower().strip() == value.lower().strip(): #.strip() == value.lower().strip():
return str(database_id)
return '-1'
def database_lookup(id):
''' Get's the name of the node with the given ID
Args:
id: database id
Returns:
Name of node, or 'Not found'
'''
with open(database, 'r+') as f:
reader = csv.reader(f)
for row in reader:
database_id = row[0]
value = row[4]
if str(database_id) == str(id):
return str(value)
return 'Not found'
def is_cluster(id):
''' Determine if the given id is a cluster
Args:
id: database id to check
Returns:
True if is cluster, False if not
'''
with open(database, 'r+') as f:
reader = csv.reader(f)
for row in reader:
if(len(row) >= 12):
database_id = row[0]
cl = str(row[12]).strip().lower()
if str(database_id) == str(id):
if cl == 'true':
return True
return False
def csv_match(article_title):
''' Determine if a Wikipedia article title matches any created node in the CSV file
Args:
article_title: title to check
Returns:
string id of node if matches created node, or '-1'
'''
with open(created_CSV_file,'r+') as f:
reader = csv.reader(f)
for row in reader:
if(str(row[0]) == 'CN'):
database_id = str(row[1])
title = row[2]
if article_title.lower() == title.lower():
return database_id
return '-1'
def create_link(columnB,columnC,isCluster, otherTitle):
''' Adds a link between 2 nodes to the CSV file
Args:
columnB: ID from database or csv that categorizes the column C node
columnC: ID from database that is categorized by the column B node
isCluster: Whether the column B node is a cluster
otherTitle: Noun/Title being used to verify accuracy
'''
if(not (linkExistsInCSV(columnB,columnC))):
with open(created_CSV_file, 'a+') as csvfile:
writer = csv.writer(csvfile,lineterminator = '\n')
if isCluster:
writer.writerow(['CL',str(columnB),str(columnC),'is categorised as','categorises',str(database_lookup(columnB)),str(otherTitle)])
else:
writer.writerow(['CL',str(columnB),str(columnC),'is related to','is related to',str(database_lookup(columnB)),str(otherTitle)])
def create_node(ID,title,description,noun,url):
''' Creates a node with given ID in the CSV file
Args:
ID: ID of node to create
title: name of node to create
descrption: paragraph of node from Wikipedia article
noun: noun being used to categorize to help verify accuracy
url: url of Wikipedia article to help verify accuracy
Returns:
True if node is created, False if node already has been created
'''
if(csv_match(title) == '-1'): #Node not already in CSV
with open(created_CSV_file, 'a+') as csvfile:
writer = csv.writer(csvfile,lineterminator = '\n')
writer.writerow(['CN',str(ID),title,'description',description,"",str(noun),str(url)])
return True
else:
print(title + " already exists " + str(ID))
return False
def linkExistsInCSV(columnB,columnC):
''' Determine if a link between 2 nodes already exists
Args:
columnB: 2nd column of CSV file ID - used to categorize the columnC node
columnC: 3rd column of CSV file ID - is categorized by the columnB node
Returns:
True if link exists, False is it doesn't
'''
with open(created_CSV_file,'r+') as f:
reader = csv.reader(f)
for row in reader:
if(str(row[0]) == 'CL'):
b = str(row[1])
c = str(row[2])
if b == columnB and c == columnC:
return True
return False
def main():
#Load some things that may be needed later
myTime = time.time()
avoid_terms = loadAvoidTerms()
gather_terms = loadGatherTerms()
avoid_categories = loadAvoidCategories()
every_ever_category = []
urls = list()
#create files so that they exist
file = open(created_CSV_file, 'w+')
file.close()
file2 = open(category_file, 'w+')
file2.close()
#Prompt user for category url to start at
start_category = str(getURL())
#Default layer of AI is 0
init_layer = 0
cat_num = 1
#Build Category array - contains all categories. Will be used to get all URLs
category_name = start_category[start_category.find('Category:')+9:].strip().replace("_"," ").lower()
new_urls, sub_categories = getLinksFromCategoryPage(start_category)
every_ever_category.append(Category(category_name,start_category,sub_categories,new_urls,init_layer,cat_num))
with open(category_file,'r+') as f:
for current_category in every_ever_category:
#Don't want to go too far past AI
if(current_category.level >= 3):
continue
else:
for sub in current_category.sub_categories:
category_name = str(sub[sub.find('Category:')+9:]).strip().replace("_"," ").lower()
if(not validCategoryName(category_name,avoid_categories)):
continue
next_urls, next_categories = getLinksFromCategoryPage(sub)
layer = current_category.level + 1
append = True
if(len(next_urls) == 0 and len(next_categories) == 0):
continue
elif(len(next_urls) == 0 and layer == 3):
continue
#Check if category already appended -> don't want to append twice
for cat in every_ever_category:
if cat.name == category_name:
append = False
break
if(append):
cat_num = cat_num + 1
every_ever_category.append(Category(category_name,sub,next_categories,next_urls,layer,cat_num))
f.write(category_name + "\n")
print category_name, layer,cat_num
f.close()
#Load list of urls
for current_category in every_ever_category:
category_urls = current_category.linked_pages
for url in category_urls:
urls.append(url)
print len(every_ever_category)
#Start scraping a certain page
for i in every_ever_category:
print "Category:" + i.name + " has " + str(len(i.linked_pages)) + " pages "
#No longer needed, trying to save memory since there were some issues when running on the entire thing
del every_ever_category
num_articles = 0
id_count = 1
num_invalid = 0
for url in urls:
print url, id_count
soup = getHTML(url)
if(soup == None):
continue
#Don't bother if is stub
if(isStub(soup)):
print(url," is stub\n")
continue
#get title and first paragraph
article_title = getTitle(soup)
if article_title == 'Error':
continue
paragraph = ""
titles_in_paragraph = list()
every = soup.find('div',{'class': 'mw-parser-output'})
if every == None:
print("Error on: ",url)
continue
else:
every = every.findAll('p')
#Don't know which paragraph is actual - most likely 1st or 2nd
for p in every:
potentialParagraph = extractTextFromParagraph(p)
titles_in_paragraph = extractLinksFromParagraph(p)
#If the title is in the paragraph, then it is most likely legitimate
if article_title.lower() in potentialParagraph.lower():
paragraph = potentialParagraph
break
elif '(' in article_title and ')' in article_title:
if(article_title[:article_title.find('(')].strip().lower() in potentialParagraph.lower()):
paragraph = potentialParagraph
break
else:
flag = False
all_words = splitAndLower(article_title)
for w in all_words:
if not flag and w.lower().strip() in potentialParagraph.lower():
paragraph = potentialParagraph
flag = True
if flag:
break
num_articles = num_articles + 1
#Wikipedia error - should be a category, but not plural, ie social robot vs social robots
if('social' in article_title.lower() +'s' and 'robot' in article_title.lower()):
article_title = article_title + 's'
#Really annoying if there's latex in the paragraph, so just skipping those pages
if('{\displaystyle' in paragraph or 'alt=' in paragraph):
paragraph = ""
titles_in_paragraph = list()
#Determine if the article title is already in the database
database_id = database_match(article_title)
node_created = False
valid_title = True #If in database it is a valid title
if database_id == '-1':
#If the title is valid, we should try creating a node
if validArticleTitle(article_title,avoid_terms,gather_terms) and len(paragraph) > 0:
valid_title = True
database_id = str(id_count)
else:
num_invalid = num_invalid + 1
valid_title = False
else:
node_created = True
#Attempt to create and categorize a node if the title is valid
if(valid_title):
firstNouns = getPotentialFirstNoun(paragraph, article_title)
valid_noun = False
appeared = False
detected_noun = ""
invalid_nouns = ['information','field','extraction','vocabulary','corpus','translation','programming','software','tree','system','data','technology',
'framework','language','device','network','actvity','branch','approaches','business','way','area','domain','robot','study','studies'
'use','university','college','interface']
#Try to categorize by paragraph nouns
if(not valid_noun):
num_words_to_look_at = 6
noun_index = 0
#Attempt to categorize by noun and terms to gather table
for index, noun in enumerate(firstNouns):
noun = ' ' + str(noun).strip() + ' '
if not appeared and noun == ' is ' or noun == ' are ' or noun == ' refer ' or noun == ' refers ' or noun == ' consist ' or noun == ' consists ' or noun == 'was' or noun == ' has ':
appeared = True
detected_noun = noun.strip()
noun_index = index
noun = noun.strip()
#Don't care about 'was'
if appeared and detected_noun == 'was':
appeared = False
print("skipping since doesn't exist anymore")
break
#Skipping these for now
if appeared and noun == 'metric' or noun == 'measurement' or noun == 'measure':
appeared = False
print("skipping since type of metric/measurement/measure")
break
#Valid, can exist loop
if appeared:
break
#Look at first nouns from the index after the verb was detected
noun_subset = firstNouns[noun_index:]
if(len(noun_subset) > num_words_to_look_at):
noun_subset = noun_subset[:num_words_to_look_at + 1]
#Valid verb, so look for the noun
if(appeared):
for index in range(len(noun_subset)-1):
bigNoun = False
#Check what the next word is classified as -> helps make 2 word terms to gather work as well as ensure things like 'software company' are classified as company
noun = noun_subset[index].lower().strip()
next_noun = noun_subset[index+1].lower().strip()
#2 word match
if((noun + ' ' + next_noun) in gather_terms):
noun = noun + ' ' + next_noun
bigNoun = True
if not bigNoun:
if(next_noun in gather_terms and index != len(noun_subset)-2):
continue
elif(next_noun in gather_terms and index == len(noun_subset)-2):
if(firstNouns[noun_index+index+2] in gather_terms):
noun = firstNouns[noun_index+index+2]
else:
noun = next_noun
if noun in gather_terms and not valid_noun and appeared:
#Create node if it doesn't exist
if(not node_created):
database_id = str(id_count)
modifyID = create_node(database_id,article_title,paragraph,noun,url)
if(modifyID):
id_count = id_count + 1
else:
database_id = csv_match(article_title)
num_articles = num_articles - 1
node_created = True
#Create link if node exists
if(node_created):
term_id = gather_terms[noun]
isCluster = is_cluster(term_id)
create_link(term_id,database_id,isCluster,noun)
valid_noun = True
#If can't categorize by noun, attempt to categorize by title if there is only one capital in the word. Don't categorize by title if the noun is in invalid_nouns
if(not valid_noun and not node_created and numCapitalsInTitle(article_title) <= 1):
words_in_title = article_title.split(" ")
for title_word in words_in_title:
#Ignore '(' and ')' in title
if('(' in title_word and ')' in title_word):
title_word = title_word[title_word.find('(')+1:title_word.find(')')]
#Ignore "was company" or "was software company" as they no longer exist
if(not('was' in firstNouns and 'company' in firstNouns)):
if title_word.lower() in gather_terms and title_word.lower() not in invalid_nouns and title_word.lower()[:-1] not in invalid_nouns:
t_id = gather_terms[title_word.lower()]
#Create a node if node does not exist
if(not node_created):
modifyID = create_node(database_id,article_title,paragraph,title_word.lower(),url)
if(modifyID):
id_count = id_count + 1
else:
database_id = csv_match(article_title)
num_articles = num_articles - 1
node_created = True
#Create a link if there is a node
if(node_created):
isCluster = is_cluster(t_id)
create_link(t_id,database_id,isCluster,title_word.lower())
valid_noun = True
#Valid node and categorization -> Can look at see also, categories, and links in paragraph
if(valid_noun and node_created):
see_also_titles = extractSeeAlso(soup)
#Node must either be in database or csv file now
database_id = database_match(article_title)
if(database_id == '-1'):
database_id = csv_match(article_title)
#Check for matches with see also titles
for title in see_also_titles:
title_id = database_match(title)
if (title_id != '-1' and title_id != database_id):
isCluster = is_cluster(title_id)
create_link(title_id,database_id,isCluster,title)
csv_id = csv_match(title)
if(csv_id != '-1' and csv_id != database_id):
create_link(csv_id,database_id,False,title) #Anything in the CSV is guaranteed to not be in the database and not be a cluster
#Check for matches with categories at bottom of Wikipedia article
category_titles = extractCategories(soup)
for title in category_titles:
title_id = str(database_match(title))
if (title_id != '-1' and title_id != database_id):
isCluster = is_cluster(title_id)
create_link(title_id,database_id,isCluster,title)
csv_id = csv_match(title)
if(csv_id != '-1' and csv_id != database_id):
create_link(csv_id,database_id,False,title) #Anything in the CSV is guaranteed to not be in the database and not be a cluster
#Check for matches with links in first paragraph
for title in titles_in_paragraph:
title_id = str(database_match(title))
if (title_id != '-1' and title_id != database_id):
isCluster = is_cluster(title_id)
create_link(title_id,database_id,isCluster,title)
csv_id = csv_match(title)
if(csv_id != '-1' and csv_id != database_id):
create_link(csv_id,database_id,False,title) #Anything in the CSV is guaranteed to not be in the database and not be a cluster
print("Created: " + created_CSV_file)
print(str(id_count) + " nodes, " + str(num_invalid) + " invalid titles")
time2 = time.time()
print(str((time2-myTime)/60) + " minutes to run")
if __name__ == '__main__':
main()
|
|
# Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains an ant to run in the +x direction."""
import brax
from brax import jumpy as jp
from brax.envs import env
class Ant(env.Env):
"""Trains an ant to run in the +x direction."""
def __init__(self, **kwargs):
super().__init__(_SYSTEM_CONFIG, **kwargs)
def reset(self, rng: jp.ndarray) -> env.State:
"""Resets the environment to an initial state."""
rng, rng1, rng2 = jp.random_split(rng, 3)
qpos = self.sys.default_angle() + jp.random_uniform(
rng1, (self.sys.num_joint_dof,), -.1, .1)
qvel = jp.random_uniform(rng2, (self.sys.num_joint_dof,), -.1, .1)
qp = self.sys.default_qp(joint_angle=qpos, joint_velocity=qvel)
info = self.sys.info(qp)
obs = self._get_obs(qp, info)
reward, done, zero = jp.zeros(3)
metrics = {
'reward_ctrl_cost': zero,
'reward_contact_cost': zero,
'reward_forward': zero,
'reward_survive': zero,
}
return env.State(qp, obs, reward, done, metrics)
def step(self, state: env.State, action: jp.ndarray) -> env.State:
"""Run one timestep of the environment's dynamics."""
qp, info = self.sys.step(state.qp, action)
obs = self._get_obs(qp, info)
x_before = state.qp.pos[0, 0]
x_after = qp.pos[0, 0]
forward_reward = (x_after - x_before) / self.sys.config.dt
ctrl_cost = .5 * jp.sum(jp.square(action))
contact_cost = (0.5 * 1e-3 *
jp.sum(jp.square(jp.clip(info.contact.vel, -1, 1))))
survive_reward = jp.float32(1)
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
done = jp.where(qp.pos[0, 2] < 0.2, x=jp.float32(1), y=jp.float32(0))
done = jp.where(qp.pos[0, 2] > 1.0, x=jp.float32(1), y=done)
state.metrics.update(
reward_ctrl_cost=ctrl_cost,
reward_contact_cost=contact_cost,
reward_forward=forward_reward,
reward_survive=survive_reward)
return state.replace(qp=qp, obs=obs, reward=reward, done=done)
def _get_obs(self, qp: brax.QP, info: brax.Info) -> jp.ndarray:
"""Observe ant body position and velocities."""
# some pre-processing to pull joint angles and velocities
(joint_angle,), (joint_vel,) = self.sys.joints[0].angle_vel(qp)
# qpos:
# Z of the torso (1,)
# orientation of the torso as quaternion (4,)
# joint angles (8,)
qpos = [qp.pos[0, 2:], qp.rot[0], joint_angle]
# qvel:
# velocity of the torso (3,)
# angular velocity of the torso (3,)
# joint angle velocities (8,)
qvel = [qp.vel[0], qp.ang[0], joint_vel]
# external contact forces:
# delta velocity (3,), delta ang (3,) * 10 bodies in the system
# Note that mujoco has 4 extra bodies tucked inside the Torso that Brax
# ignores
cfrc = [
jp.clip(info.contact.vel, -1, 1),
jp.clip(info.contact.ang, -1, 1)
]
# flatten bottom dimension
cfrc = [jp.reshape(x, x.shape[:-2] + (-1,)) for x in cfrc]
return jp.concatenate(qpos + qvel + cfrc)
_SYSTEM_CONFIG = """
bodies {
name: "$ Torso"
colliders {
capsule {
radius: 0.25
length: 0.5
end: 1
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 10
}
bodies {
name: "Aux 1"
colliders {
rotation { x: 90 y: -45 }
capsule {
radius: 0.08
length: 0.4428427219390869
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "$ Body 4"
colliders {
rotation { x: 90 y: -45 }
capsule {
radius: 0.08
length: 0.7256854176521301
end: -1
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "Aux 2"
colliders {
rotation { x: 90 y: 45 }
capsule {
radius: 0.08
length: 0.4428427219390869
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "$ Body 7"
colliders {
rotation { x: 90 y: 45 }
capsule {
radius: 0.08
length: 0.7256854176521301
end: -1
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "Aux 3"
colliders {
rotation { x: -90 y: 45 }
capsule {
radius: 0.08
length: 0.4428427219390869
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "$ Body 10"
colliders {
rotation { x: -90 y: 45 }
capsule {
radius: 0.08
length: 0.7256854176521301
end: -1
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "Aux 4"
colliders {
rotation { x: -90 y: -45 }
capsule {
radius: 0.08
length: 0.4428427219390869
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "$ Body 13"
colliders {
rotation { x: -90 y: -45 }
capsule {
radius: 0.08
length: 0.7256854176521301
end: -1
}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
}
bodies {
name: "Ground"
colliders {
plane {}
}
inertia { x: 1.0 y: 1.0 z: 1.0 }
mass: 1
frozen { all: true }
}
joints {
name: "$ Torso_Aux 1"
parent_offset { x: 0.2 y: 0.2 }
child_offset { x: -0.1 y: -0.1 }
parent: "$ Torso"
child: "Aux 1"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
angle_limit { min: -30.0 max: 30.0 }
rotation { y: -90 }
}
joints {
name: "Aux 1_$ Body 4"
parent_offset { x: 0.1 y: 0.1 }
child_offset { x: -0.2 y: -0.2 }
parent: "Aux 1"
child: "$ Body 4"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation: { z: 135 }
angle_limit {
min: 30.0
max: 70.0
}
}
joints {
name: "$ Torso_Aux 2"
parent_offset { x: -0.2 y: 0.2 }
child_offset { x: 0.1 y: -0.1 }
parent: "$ Torso"
child: "Aux 2"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation { y: -90 }
angle_limit { min: -30.0 max: 30.0 }
}
joints {
name: "Aux 2_$ Body 7"
parent_offset { x: -0.1 y: 0.1 }
child_offset { x: 0.2 y: -0.2 }
parent: "Aux 2"
child: "$ Body 7"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation { z: 45 }
angle_limit { min: -70.0 max: -30.0 }
}
joints {
name: "$ Torso_Aux 3"
parent_offset { x: -0.2 y: -0.2 }
child_offset { x: 0.1 y: 0.1 }
parent: "$ Torso"
child: "Aux 3"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation { y: -90 }
angle_limit { min: -30.0 max: 30.0 }
}
joints {
name: "Aux 3_$ Body 10"
parent_offset { x: -0.1 y: -0.1 }
child_offset {
x: 0.2
y: 0.2
}
parent: "Aux 3"
child: "$ Body 10"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation { z: 135 }
angle_limit { min: -70.0 max: -30.0 }
}
joints {
name: "$ Torso_Aux 4"
parent_offset { x: 0.2 y: -0.2 }
child_offset { x: -0.1 y: 0.1 }
parent: "$ Torso"
child: "Aux 4"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation { y: -90 }
angle_limit { min: -30.0 max: 30.0 }
}
joints {
name: "Aux 4_$ Body 13"
parent_offset { x: 0.1 y: -0.1 }
child_offset { x: -0.2 y: 0.2 }
parent: "Aux 4"
child: "$ Body 13"
stiffness: 18000.0
angular_damping: 20
spring_damping: 80
rotation { z: 45 }
angle_limit { min: 30.0 max: 70.0 }
}
actuators {
name: "$ Torso_Aux 1"
joint: "$ Torso_Aux 1"
strength: 350.0
torque {}
}
actuators {
name: "Aux 1_$ Body 4"
joint: "Aux 1_$ Body 4"
strength: 350.0
torque {}
}
actuators {
name: "$ Torso_Aux 2"
joint: "$ Torso_Aux 2"
strength: 350.0
torque {}
}
actuators {
name: "Aux 2_$ Body 7"
joint: "Aux 2_$ Body 7"
strength: 350.0
torque {}
}
actuators {
name: "$ Torso_Aux 3"
joint: "$ Torso_Aux 3"
strength: 350.0
torque {}
}
actuators {
name: "Aux 3_$ Body 10"
joint: "Aux 3_$ Body 10"
strength: 350.0
torque {}
}
actuators {
name: "$ Torso_Aux 4"
joint: "$ Torso_Aux 4"
strength: 350.0
torque {}
}
actuators {
name: "Aux 4_$ Body 13"
joint: "Aux 4_$ Body 13"
strength: 350.0
torque {}
}
friction: 1.0
gravity { z: -9.8 }
angular_damping: -0.05
baumgarte_erp: 0.1
collide_include {
first: "$ Torso"
second: "Ground"
}
collide_include {
first: "$ Body 4"
second: "Ground"
}
collide_include {
first: "$ Body 7"
second: "Ground"
}
collide_include {
first: "$ Body 10"
second: "Ground"
}
collide_include {
first: "$ Body 13"
second: "Ground"
}
dt: 0.05
substeps: 10
"""
|
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the Partially Signed Transaction RPCs.
"""
import os
import json
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error, find_output
# Create one-input, one-output, no-fee transaction:
class PSBTTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 3
# TODO: remove -txindex. Currently required for getrawtransaction call.
self.extra_args = [
["-txindex"],
["-txindex"],
["-txindex"]
]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Create and fund a raw tx for sending 10 DASH
psbtx1 = self.nodes[0].walletcreatefundedpsbt([], {self.nodes[2].getnewaddress():10})['psbt']
# Node 1 should not be able to add anything to it but still return the psbtx same as before
psbtx = self.nodes[1].walletprocesspsbt(psbtx1)['psbt']
assert_equal(psbtx1, psbtx)
# Sign the transaction and send
signed_tx = self.nodes[0].walletprocesspsbt(psbtx)['psbt']
final_tx = self.nodes[0].finalizepsbt(signed_tx)['hex']
self.nodes[0].sendrawtransaction(final_tx)
# Create p2sh and p2pkh addresses
pubkey0 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())['pubkey']
pubkey1 = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
pubkey2 = self.nodes[2].getaddressinfo(self.nodes[2].getnewaddress())['pubkey']
p2sh = self.nodes[1].addmultisigaddress(2, [pubkey0, pubkey1, pubkey2])['address']
p2pkh = self.nodes[1].getnewaddress()
# fund those addresses
rawtx = self.nodes[0].createrawtransaction([], {p2sh:10, p2pkh:10})
rawtx = self.nodes[0].fundrawtransaction(rawtx, {"changePosition":2})
signed_tx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])['hex']
txid = self.nodes[0].sendrawtransaction(signed_tx)
self.nodes[0].generate(6)
self.sync_all()
# Find the output pos
p2sh_pos = -1
p2pkh_pos = -1
decoded = self.nodes[0].decoderawtransaction(signed_tx)
for out in decoded['vout']:
if out['scriptPubKey']['addresses'][0] == p2sh:
p2sh_pos = out['n']
elif out['scriptPubKey']['addresses'][0] == p2pkh:
p2pkh_pos = out['n']
# spend single key from node 1
rawtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():9.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(rawtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[1].sendrawtransaction(self.nodes[1].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# feeRate of 0.1 DASH / KB produces a total fee slightly below -maxtxfee (~0.06650000):
res = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2pkh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}, 0, {"feeRate": 0.1})
assert_greater_than(res["fee"], 0.06)
assert_greater_than(0.07, res["fee"])
# feeRate of 10 DASH / KB produces a total fee well above -maxtxfee
# previously this was silently capped at -maxtxfee
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by -maxtxfee", self.nodes[1].walletcreatefundedpsbt, [{"txid":txid,"vout":p2pkh_pos},{"txid":txid,"vout":p2sh_pos},{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():29.99}, 0, {"feeRate": 10})
# partially sign multisig things with node 1
psbtx = self.nodes[1].walletcreatefundedpsbt([{"txid":txid,"vout":p2sh_pos}], {self.nodes[1].getnewaddress():9.99})['psbt']
walletprocesspsbt_out = self.nodes[1].walletprocesspsbt(psbtx)
psbtx = walletprocesspsbt_out['psbt']
assert_equal(walletprocesspsbt_out['complete'], False)
# partially sign with node 2. This should be complete and sendable
walletprocesspsbt_out = self.nodes[2].walletprocesspsbt(psbtx)
assert_equal(walletprocesspsbt_out['complete'], True)
self.nodes[2].sendrawtransaction(self.nodes[2].finalizepsbt(walletprocesspsbt_out['psbt'])['hex'])
# check that walletprocesspsbt fails to decode a non-psbt
rawtx = self.nodes[1].createrawtransaction([{"txid":txid,"vout":p2pkh_pos}], {self.nodes[1].getnewaddress():9.99})
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[1].walletprocesspsbt, rawtx)
# Convert a non-psbt to psbt and make sure we can decode it
rawtx = self.nodes[0].createrawtransaction([], {self.nodes[1].getnewaddress():10})
rawtx = self.nodes[0].fundrawtransaction(rawtx)
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Make sure that a psbt with signatures cannot be converted
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx['hex'])
assert_raises_rpc_error(-22, "Inputs must not have scriptSigs", self.nodes[0].converttopsbt, signedtx['hex'])
assert_raises_rpc_error(-22, "Inputs must not have scriptSigs", self.nodes[0].converttopsbt, signedtx['hex'], False)
# Unless we allow it to convert and strip signatures
self.nodes[0].converttopsbt(signedtx['hex'], True)
# Explicitly allow converting non-empty txs
new_psbt = self.nodes[0].converttopsbt(rawtx['hex'])
self.nodes[0].decodepsbt(new_psbt)
# Create outputs to nodes 1 and 2
node1_addr = self.nodes[1].getnewaddress()
node2_addr = self.nodes[2].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(node1_addr, 13)
txid2 = self.nodes[0].sendtoaddress(node2_addr, 13)
blockhash = self.nodes[0].generate(6)[0]
self.sync_all()
vout1 = find_output(self.nodes[1], txid1, 13, blockhash=blockhash)
vout2 = find_output(self.nodes[2], txid2, 13, blockhash=blockhash)
# Create a psbt spending outputs from nodes 1 and 2
psbt_orig = self.nodes[0].createpsbt([{"txid":txid1, "vout":vout1}, {"txid":txid2, "vout":vout2}], {self.nodes[0].getnewaddress():25.999})
# Update psbts, should only have data for one input and not the other
psbt1 = self.nodes[1].walletprocesspsbt(psbt_orig)['psbt']
psbt1_decoded = self.nodes[0].decodepsbt(psbt1)
assert psbt1_decoded['inputs'][0] and not psbt1_decoded['inputs'][1]
psbt2 = self.nodes[2].walletprocesspsbt(psbt_orig)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert not psbt2_decoded['inputs'][0] and psbt2_decoded['inputs'][1]
# Combine, finalize, and send the psbts
combined = self.nodes[0].combinepsbt([psbt1, psbt2])
finalized = self.nodes[0].finalizepsbt(combined)['hex']
self.nodes[0].sendrawtransaction(finalized)
self.nodes[0].generate(6)
self.sync_all()
# BIP 174 Test Vectors
# Check that unknown values are just passed through
unknown_psbt = "cHNidP8BAD8CAAAAAf//////////////////////////////////////////AAAAAAD/////AQAAAAAAAAAAA2oBAAAAAAAACg8BAgMEBQYHCAkPAQIDBAUGBwgJCgsMDQ4PAAA="
unknown_out = self.nodes[0].walletprocesspsbt(unknown_psbt)['psbt']
assert_equal(unknown_psbt, unknown_out)
# Open the data file
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/rpc_psbt.json'), encoding='utf-8') as f:
d = json.load(f)
invalids = d['invalid']
valids = d['valid']
creators = d['creator']
signers = d['signer']
combiners = d['combiner']
finalizers = d['finalizer']
extractors = d['extractor']
# Invalid PSBTs
for invalid in invalids:
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decodepsbt, invalid)
# Valid PSBTs
for valid in valids:
self.nodes[0].decodepsbt(valid)
# Creator Tests
for creator in creators:
created_tx = self.nodes[0].createpsbt(creator['inputs'], creator['outputs'])
assert_equal(created_tx, creator['result'])
# Signer tests
for i, signer in enumerate(signers):
self.nodes[2].createwallet("wallet{}".format(i))
wrpc = self.nodes[2].get_wallet_rpc("wallet{}".format(i))
for key in signer['privkeys']:
wrpc.importprivkey(key)
signed_tx = wrpc.walletprocesspsbt(signer['psbt'])['psbt']
assert_equal(signed_tx, signer['result'])
# Combiner test
for combiner in combiners:
combined = self.nodes[2].combinepsbt(combiner['combine'])
assert_equal(combined, combiner['result'])
# Empty combiner test
assert_raises_rpc_error(-8, "Parameter 'txs' cannot be empty", self.nodes[0].combinepsbt, [])
# Finalizer test
for finalizer in finalizers:
finalized = self.nodes[2].finalizepsbt(finalizer['finalize'], False)['psbt']
assert_equal(finalized, finalizer['result'])
# Extractor test
for extractor in extractors:
extracted = self.nodes[2].finalizepsbt(extractor['extract'], True)['hex']
assert_equal(extracted, extractor['result'])
# Test that psbts with p2pkh outputs are created properly
p2pkh = self.nodes[0].getnewaddress()
psbt = self.nodes[1].walletcreatefundedpsbt([], [{p2pkh : 1}], 0, {"includeWatching" : True}, True)
self.nodes[0].decodepsbt(psbt['psbt'])
# Test decoding error: invalid base64
assert_raises_rpc_error(-22, "TX decode failed invalid base64", self.nodes[0].decodepsbt, ";definitely not base64;")
# Send to all types of addresses
addr1 = self.nodes[1].getnewaddress()
txid1 = self.nodes[0].sendtoaddress(addr1, 11)
vout1 = find_output(self.nodes[0], txid1, 11)
addr2 = self.nodes[1].getnewaddress()
txid2 = self.nodes[0].sendtoaddress(addr2, 11)
vout2 = find_output(self.nodes[0], txid2, 11)
addr3 = self.nodes[1].getnewaddress()
txid3 = self.nodes[0].sendtoaddress(addr3, 11)
vout3 = find_output(self.nodes[0], txid3, 11)
self.sync_all()
# Update a PSBT with UTXOs from the node
# Inputs should not be filled because they are non-witness
psbt = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1},{"txid":txid2, "vout":vout2},{"txid":txid3, "vout":vout3}], {self.nodes[0].getnewaddress():32.999})
decoded = self.nodes[1].decodepsbt(psbt)
assert "witness_utxo" not in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0]
assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1]
assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2]
updated = self.nodes[1].utxoupdatepsbt(psbt)
decoded = self.nodes[1].decodepsbt(updated)
assert "witness_utxo" not in decoded['inputs'][0] and "non_witness_utxo" not in decoded['inputs'][0]
assert "witness_utxo" not in decoded['inputs'][1] and "non_witness_utxo" not in decoded['inputs'][1]
assert "witness_utxo" not in decoded['inputs'][2] and "non_witness_utxo" not in decoded['inputs'][2]
# Two PSBTs with a common input should not be joinable
psbt1 = self.nodes[1].createpsbt([{"txid":txid1, "vout":vout1}], {self.nodes[0].getnewaddress():Decimal('10.999')})
assert_raises_rpc_error(-8, "exists in multiple PSBTs", self.nodes[1].joinpsbts, [psbt1, updated])
# Join two distinct PSBTs
addr4 = self.nodes[1].getnewaddress()
txid4 = self.nodes[0].sendtoaddress(addr4, 5)
vout4 = find_output(self.nodes[0], txid4, 5)
self.nodes[0].generate(6)
self.sync_all()
psbt2 = self.nodes[1].createpsbt([{"txid":txid4, "vout":vout4}], {self.nodes[0].getnewaddress():Decimal('4.999')})
psbt2 = self.nodes[1].walletprocesspsbt(psbt2)['psbt']
psbt2_decoded = self.nodes[0].decodepsbt(psbt2)
assert "final_scriptwitness" not in psbt2_decoded['inputs'][0] and "final_scriptSig" in psbt2_decoded['inputs'][0]
joined = self.nodes[0].joinpsbts([psbt, psbt2])
joined_decoded = self.nodes[0].decodepsbt(joined)
assert len(joined_decoded['inputs']) == 4 and len(joined_decoded['outputs']) == 2 and "final_scriptwitness" not in joined_decoded['inputs'][3] and "final_scriptSig" not in joined_decoded['inputs'][3]
if __name__ == '__main__':
PSBTTest().main()
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from oslo_log import log as logging
import sh
import six
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
class Vcs(object):
def __init__(self, repo, sources_root):
self.repo = repo
self.sources_root = sources_root
if not os.path.exists(sources_root):
os.mkdir(sources_root)
else:
if not os.access(sources_root, os.W_OK):
raise Exception('Sources root folder %s is not writable' %
sources_root)
def fetch(self):
pass
def log(self, branch, head_commit_id):
pass
def get_last_id(self, branch):
pass
GIT_LOG_PARAMS = [
('commit_id', '%H'),
('date', '%at'),
('author_name', '%an'),
('author_email', '%ae'),
('subject', '%s'),
('message', '%b'),
]
GIT_LOG_FORMAT = ''.join([(r[0] + ':' + r[1] + '%n')
for r in GIT_LOG_PARAMS]) + 'diff_stat:'
DIFF_STAT_PATTERN = ('[^\d]+(\d+)\s+[^\s]*\s+changed'
'(,\s+(\d+)\s+([^\d\s]*)\s+(\d+)?)?')
GIT_LOG_PATTERN = re.compile(''.join([(r[0] + ':(.*?)\n')
for r in GIT_LOG_PARAMS]) +
'diff_stat:(?P<diff_stat>.+?)(?=commit|\Z)',
re.DOTALL)
CO_AUTHOR_PATTERN_RAW = ('(?P<author_name>.*?)\s*'
'<?(?P<author_email>[\w\.-]+@[\w\.-]+)>?')
CO_AUTHOR_PATTERN = re.compile(CO_AUTHOR_PATTERN_RAW, re.IGNORECASE)
MESSAGE_PATTERNS = {
'bug_id': re.compile(r'bug[\s#:]*(?P<id>\d+)', re.IGNORECASE),
'blueprint_id': re.compile(r'\b(?:blueprint|bp)\b[ \t]*[#:]?[ \t]*'
r'(?P<id>[a-z0-9-]+)', re.IGNORECASE),
'change_id': re.compile('Change-Id: (?P<id>I[0-9a-f]{40})', re.IGNORECASE),
'coauthor': re.compile(r'(?:Co-Authored-By|Also-By|Co-Author):'
r'\s*(?P<id>%s)\s' % CO_AUTHOR_PATTERN_RAW,
re.IGNORECASE)
}
class Git(Vcs):
def __init__(self, repo, sources_root):
super(Git, self).__init__(repo, sources_root)
uri = self.repo['uri']
match = re.search(r'([^/]+)\.git$', uri)
if match:
self.folder = os.path.normpath(self.sources_root + '/' +
match.group(1))
else:
raise Exception('Unexpected uri %s for git' % uri)
self.release_index = {}
def _checkout(self, branch):
try:
sh.git('clean', '-d', '--force')
sh.git('reset', '--hard')
sh.git('checkout', 'origin/' + branch)
return True
except sh.ErrorReturnCode:
LOG.error('Unable to checkout branch %(branch)s from repo '
'%(uri)s. Ignore it',
{'branch': branch, 'uri': self.repo['uri']},
exc_info=True)
return False
def fetch(self):
LOG.debug('Fetching repo uri %s', self.repo['uri'])
if os.path.exists(self.folder):
os.chdir(self.folder)
try:
uri = str(
sh.git('config', '--get', 'remote.origin.url')).strip()
except sh.ErrorReturnCode:
LOG.error('Unable to get config for git repo %s. Ignore it',
self.repo['uri'], exc_info=True)
return {}
if uri != self.repo['uri']:
LOG.warning('Repo uri %(uri)s differs from cloned %(old)s',
{'uri': self.repo['uri'], 'old': uri})
os.chdir('..')
shutil.rmtree(self.folder)
if not os.path.exists(self.folder):
os.chdir(self.sources_root)
try:
sh.git('clone', self.repo['uri'])
os.chdir(self.folder)
except sh.ErrorReturnCode:
LOG.error('Unable to clone git repo %s. Ignore it',
self.repo['uri'], exc_info=True)
else:
os.chdir(self.folder)
try:
sh.git('fetch')
except sh.ErrorReturnCode:
LOG.error('Unable to fetch git repo %s. Ignore it',
self.repo['uri'], exc_info=True)
return self._get_release_index()
def _get_release_index(self):
if not os.path.exists(self.folder):
return {}
LOG.debug('Get release index for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self.release_index:
for release in self.repo.get('releases', []):
release_name = release['release_name'].lower()
if 'branch' in release:
branch = release['branch']
else:
branch = 'master'
if not self._checkout(branch):
continue
if 'tag_from' in release:
tag_range = release['tag_from'] + '..' + release['tag_to']
else:
tag_range = release['tag_to']
try:
git_log_iterator = sh.git('log', '--pretty=%H', tag_range,
_tty_out=False)
for commit_id in git_log_iterator:
self.release_index[commit_id.strip()] = release_name
except sh.ErrorReturnCode:
LOG.error('Unable to get log of git repo %s. Ignore it',
self.repo['uri'], exc_info=True)
return self.release_index
def log(self, branch, head_commit_id):
LOG.debug('Parsing git log for repo uri %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return
commit_range = 'HEAD'
if head_commit_id:
commit_range = head_commit_id + '..HEAD'
try:
output = sh.git('log', '--pretty=' + GIT_LOG_FORMAT, '--shortstat',
'-M', '--no-merges', commit_range, _tty_out=False,
_decode_errors='ignore', _encoding='utf8')
except sh.ErrorReturnCode:
LOG.error('Unable to get log of git repo %s. Ignore it',
self.repo['uri'], exc_info=True)
return
for rec in re.finditer(GIT_LOG_PATTERN, six.text_type(output)):
i = 1
commit = {}
for param in GIT_LOG_PARAMS:
commit[param[0]] = rec.group(i)
i += 1
if not commit['author_email']:
# ignore commits with empty email (there are some < Essex)
continue
commit['author_email'] = utils.keep_safe_chars(
commit['author_email'])
diff_stat_str = rec.group('diff_stat')
diff_rec = re.search(DIFF_STAT_PATTERN, diff_stat_str)
if diff_rec:
files_changed = int(diff_rec.group(1))
lines_changed_group = diff_rec.group(2)
lines_changed = diff_rec.group(3)
deleted_or_inserted = diff_rec.group(4)
lines_deleted = diff_rec.group(5)
if lines_changed_group: # there inserted or deleted lines
if not lines_deleted:
if deleted_or_inserted[0] == 'd': # deleted
lines_deleted = lines_changed
lines_changed = 0
else:
files_changed = 0
lines_changed = 0
lines_deleted = 0
commit['files_changed'] = files_changed
commit['lines_added'] = int(lines_changed or 0)
commit['lines_deleted'] = int(lines_deleted or 0)
for pattern_name, pattern in six.iteritems(MESSAGE_PATTERNS):
collection = set()
for item in re.finditer(pattern, commit['message']):
collection.add(item.group('id'))
if collection:
commit[pattern_name] = list(collection)
commit['date'] = int(commit['date'])
commit['module'] = self.repo['module']
commit['branches'] = set([branch])
if commit['commit_id'] in self.release_index:
commit['release'] = self.release_index[commit['commit_id']]
else:
commit['release'] = None
if commit['release'] == 'ignored':
# drop commits that are marked by 'ignored' release
continue
if 'blueprint_id' in commit:
commit['blueprint_id'] = [(commit['module'] + ':' + bp_name)
for bp_name
in commit['blueprint_id']]
if 'coauthor' in commit:
verified_coauthors = []
for coauthor in commit['coauthor']:
m = re.match(CO_AUTHOR_PATTERN, coauthor)
if m and utils.check_email_validity(
m.group("author_email")):
verified_coauthors.append(m.groupdict())
if verified_coauthors:
commit['coauthor'] = verified_coauthors
else:
del commit['coauthor'] # no valid authors
yield commit
def get_last_id(self, branch):
LOG.debug('Get head commit for repo uri: %s', self.repo['uri'])
os.chdir(self.folder)
if not self._checkout(branch):
return None
try:
return str(sh.git('rev-parse', 'HEAD')).strip()
except sh.ErrorReturnCode:
LOG.error('Unable to get HEAD for git repo %s. Ignore it',
self.repo['uri'], exc_info=True)
return None
def get_vcs(repo, sources_root):
uri = repo['uri']
LOG.debug('Factory is asked for VCS uri: %s', uri)
match = re.search(r'\.git$', uri)
if match:
return Git(repo, sources_root)
else:
LOG.warning('Unsupported VCS, fallback to dummy')
return Vcs(repo, uri)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class DurationOperations(object):
"""DurationOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_null(
self, custom_headers=None, raw=False, **operation_config):
"""Get null duration value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyduration.models.ErrorException>`
"""
# Construct URL
url = '/duration/null'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def put_positive_duration(
self, duration_body, custom_headers=None, raw=False, **operation_config):
"""Put a positive duration value.
:param duration_body:
:type duration_body: timedelta
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyduration.models.ErrorException>`
"""
# Construct URL
url = '/duration/positiveduration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(duration_body, 'duration')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_positive_duration(
self, custom_headers=None, raw=False, **operation_config):
"""Get a positive duration value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyduration.models.ErrorException>`
"""
# Construct URL
url = '/duration/positiveduration'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_invalid(
self, custom_headers=None, raw=False, **operation_config):
"""Get an invalid duration value.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: timedelta
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestsbodyduration.models.ErrorException>`
"""
# Construct URL
url = '/duration/invalid'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('duration', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This module contains the user-facing API for AutoGraph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from enum import Enum
from tensorflow.python.autograph.core import config
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.utils import py_func
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# TODO(mdan): Properly document the type hints.
# TODO(mdan): Reduce the type hint information to (module, type).
# (currently we require (module + class name, type))
# TODO(mdan): This should behave like to_graph (e.g. convert statically).
def convert(recursive=False, verbose=False):
"""Decorator that compiles a function to use TensorFlow ops.
The decorator is dynamic - it recompiles the target whenever the decorated
function is called. This means the parameter values are known at conversion.
It also means that repeated calls with different types of parameters will be
correctly processed.
Args:
recursive: bool, whether to recursively convert any functions or classes
that the converted function may use.
verbose: bool, whether to output the compiled code in the logs.
Returns:
Callable, a decorator that converts the given function into an equivalent
function that uses TensorFlow ops.
"""
def decorator(f):
"""Decorator implementation."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
return converted_call(
f, None,
converter.ConversionOptions(
recursive=recursive,
verbose=verbose,
force_conversion=True,
optional_features=converter.Feature.ALL,
), *args, **kwargs)
wrapper = tf_decorator.make_decorator(f, wrapper)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(wrapper, '__pyct_is_compile_decorator', True)
return wrapper
return decorator
class RunMode(Enum):
"""Specifies the way a converted function or method should be executed in TF.
The enum values have the following semantics:
* GRAPH: Call this function directly, as-is. This is suitable for functions
that were already designed for TF graphs and contain ops.
* PY_FUNC: Wrap this function into a py_func op. This is suitable for code
that will only run correctly in Python, for example code that renders
to the display, reads keyboard input, etc.
"""
GRAPH = 1
PY_FUNC = 2
def do_not_convert(run_as=RunMode.GRAPH, return_dtypes=None):
"""Decorator that suppresses the conversion of a function.
See also: docs/pyfunc_dtypes.md
Args:
run_as: RunMode, specifies how to use the function in TensorFlow.
return_dtypes: Optional[Iterable[ Union[tf.DType,
utils.py_func.MatchDType]]], the return data types of the converted
function, if run_as is RunMode.PY_FUNC. Ignored otherwise. May be set to
None if the function has no return values.
Returns:
Callable, a decorator that wraps the original function.
"""
def decorator(f):
"""Decorator implementation."""
@functools.wraps(f)
def graph_wrapper(*args, **kwargs):
return f(*args, **kwargs)
@functools.wraps(f)
def py_func_wrapper(*args, **kwargs):
if kwargs:
raise NotImplementedError('RunMode.PY_FUNC does not yet support kwargs')
# TODO(mdan): Add support for kwargs.
return py_func.wrap_py_func(
f, return_dtypes, args, kwargs, use_dummy_return=not return_dtypes)
if run_as == RunMode.GRAPH:
wrapper = graph_wrapper
elif run_as == RunMode.PY_FUNC:
wrapper = py_func_wrapper
else:
raise ValueError('unknown value for run_as: %s' % run_as)
# Sometimes the decorator is just desugared, making it impossible to detect.
# This attribute makes detection easier.
setattr(wrapper, '__pyct_is_compile_decorator', True)
return wrapper
return decorator
# TODO(mdan): Move to a private, undocumented module.
def converted_call(f, owner, options, *args, **kwargs):
"""Compiles a function call inline. For internal use only."""
if options.verbose:
logging.info('Converted call: {}; owner: {}'.format(f, owner))
if owner is not None:
if not isinstance(f, str):
raise ValueError(
'When owner is specified, the function name must be specified as'
' a string: {}'.format(f))
# Special case when the owner is a 'super' object. In that case lookups of
# dynamic attributes won't work. See
# inspect_utils.SuperWrapperForDynamicAttrs.
if isinstance(owner, super):
owner = inspect_utils.SuperWrapperForDynamicAttrs(owner)
f = getattr(owner, f)
# TODO(mdan): This needs cleanup.
# In particular, we may want to avoid renaming functions altogether.
if not options.force_conversion and conversion.is_whitelisted_for_graph(f):
return f(*args, **kwargs)
unknown_arg_value = object() # Sentinel for arguments of unknown value
if inspect_utils.isbuiltin(f):
return py_builtins.overload_of(f)(*args, **kwargs)
if tf_inspect.isfunction(f) or tf_inspect.ismethod(f):
# Regular functions
target_entity = f
arg_map_target = f
f_class = inspect_utils.getmethodclass(f)
if f_class is not None:
# If this is a method call, it may or may not include self.
#
# Example when self is included:
# converted_call(to_graph(foo.bar), foo)
#
# Example when self is not included:
# super(...).foo(args)
#
if owner is not None and (not args or args[0] is not owner):
effective_args = (owner,) + args
else:
effective_args = args
partial_types = (f_class,)
else:
effective_args = args
partial_types = ()
elif tf_inspect.isclass(f):
# Constructors
target_entity = f
arg_map_target = f.__init__
effective_args = args
partial_types = ()
elif hasattr(f, '__call__') and hasattr(f, '__class__'):
# Callable objects
target_entity = f.__call__
arg_map_target = f.__call__
effective_args = (f,) + args
partial_types = (f.__class__,)
else:
NotImplementedError('unknown callable type "%s"' % type(f))
arg_values = tf_inspect.getcallargs(arg_map_target, *args, **kwargs)
arg_types = {}
for name, arg in arg_values.items():
if arg is unknown_arg_value:
continue
arg_class = arg.__class__
arg_types[name] = (arg_class.__name__, arg_class)
# When called from within a decorator, this is the only indication that
# the function is a method - it appears that the decorator is applied
# before the method is bound.
if not partial_types:
if 'self' in arg_values:
if tf_inspect.isclass(arg_values['self'].__class__):
partial_types = (arg_values['self'].__class__,)
elif 'cls' in arg_values:
if tf_inspect.isclass(arg_values['cls']):
partial_types = (arg_values['cls'],)
converted_f = to_graph(
target_entity,
recursive=options.recursive,
verbose=options.verbose,
arg_values=arg_values,
arg_types=arg_types,
partial_types=partial_types,
strip_decorators=options.strip_decorators,
optional_features=options.optional_features)
return converted_f(*effective_args, **kwargs)
# TODO(mdan): Rename: to_ops?
# TODO(mdan): Look into overloading as function and decorator, like tfe.defun?
# TODO(mdan): Remove partial_types.
def to_graph(e,
recursive=True,
verbose=False,
arg_values=None,
arg_types=None,
partial_types=None,
strip_decorators=None,
optional_features=converter.Feature.ALL):
"""Converts a Python entity into equivalent code that uses TensorFlow ops.
Supported Python entities include:
* functions
* classes
Classes are converted by converting all their methods into a new class.
Args:
e: Union[Callable, Type], the Python entity to convert.
recursive: bool, whether to recursively convert any functions that the
converted function may call.
verbose: bool, whether to output the compiled code in the logs.
arg_values: Optional[Dict[Text, Any]], value hints for symbols including
function arguments.
arg_types: Optional[Dict[Text, Type]], type hints for symbols including
function arguments.
partial_types: Set[Type], reserved for internal use.
strip_decorators: Tuple[Callable], same as
ConversionOptions.strip_decorators.
optional_features: Union[Feature, Set[Feature]], same as
ConversionOptions.optional_features.
Returns:
Union[Callable, Type], the converted entity, which is the same kind as e
(that is, a function is e is a function, a class if e is a class, etc.) but
its code has been converted to use TF ops.
Raises:
ValueError: If the entity could not be converted.
"""
if strip_decorators is None:
strip_decorators = ()
strip_decorators += (convert, do_not_convert, converted_call)
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(
recursive=recursive,
verbose=verbose,
strip_decorators=strip_decorators,
optional_features=optional_features),
partial_types=partial_types,
autograph_module=tf_inspect.getmodule(to_graph),
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
_, name, namespace = conversion.entity_to_graph(e, program_ctx, arg_values,
arg_types)
nodes = []
for dep in reversed(program_ctx.conversion_order):
nodes.extend(program_ctx.dependency_cache[dep])
compiled_module, _ = compiler.ast_to_object(
nodes,
source_prefix=program_ctx.required_imports,
include_source_map=True)
# The compiled code should see everything the entry entity saw.
# TODO(mdan): This might not work well if the call tree spans modules?
for key, val in namespace.items():
# Avoid overwriting entities that have been transformed.
if key not in compiled_module.__dict__:
compiled_module.__dict__[key] = val
compiled = getattr(compiled_module, name)
# Need this so the source_mapping attribute is available for the context
# manager to access for runtime errors.
#
# Note that compiler.ast_to_object attaches the source map 'ag_source_map__'
# symbol to the compiled module.
# TODO(mdan): Record this statically in the generated code.
# TODO(mdan): Rename this attribute to 'autograph_info__'
source_map_attribute_name = 'ag_source_map'
if getattr(compiled, source_map_attribute_name, None) is not None:
raise ValueError('cannot convert %s because is has an attribute '
'"%s", which is reserved for AutoGraph.' %
(compiled, source_map_attribute_name))
setattr(compiled, source_map_attribute_name,
compiled_module.__dict__['ag_source_map__'])
return compiled
def to_code(e,
recursive=True,
arg_values=None,
arg_types=None,
partial_types=None,
indentation=' '):
"""Returns the equivalent code that uses TensorFlow ops.
Also see: `to_graph`, `convert`
Args:
e: Union[Callable, Type], the Python entity to convert.
recursive: bool, whether to recursively convert any functions that the
converted function may call.
arg_values: Optional[Dict[Text, Any]], value hints for symbols including
function arguments.
arg_types: Optional[Dict[Text, Type]], type hints for symbols including
function arguments.
partial_types: Set[Type], reserved for internal use.
indentation: Text, when to use for each level of indentation.
Returns:
Text, the converted code.
"""
program_ctx = converter.ProgramContext(
options=converter.ConversionOptions(
recursive=recursive,
strip_decorators=(convert, do_not_convert, converted_call)),
partial_types=partial_types,
autograph_module=tf_inspect.getmodule(to_graph),
uncompiled_modules=config.DEFAULT_UNCOMPILED_MODULES)
conversion.entity_to_graph(e, program_ctx, arg_values, arg_types)
code = '\n'.join(
compiler.ast_to_source(program_ctx.dependency_cache[dep], indentation)
for dep in reversed(program_ctx.conversion_order))
return program_ctx.required_imports + '\n\n' + code
|
|
"""Tree structure and hierarchical divisive algorithm for spectral clustering
Used in the paper:
@article{Gaidon2014,
author = {Gaidon, Adrien and Harchaoui, Zaid and Schmid, Cordelia},
title = {{Activity representation with motion hierarchies}},
journal = {IJCV},
year = {2014}
}
LICENSE: BSD
Copyrights: Adrien Gaidon, 2012-2014
"""
import sys
import heapq
import numpy as np
from scipy import sparse
from scipy.sparse.sparsetools import cs_graph_components
import pyflann
from sklearn.cluster import MiniBatchKMeans, KMeans
# The fixed internal paramaters for clustering
INTERNAL_PARAMETERS = dict(
# generic ones
min_tube_size=100, # minimum points per cluster
max_tube_size=2000, # maximum points per cluster
min_k=2, # lower limit on number of tubes per video
# build_sym_geom_adjacency
min_geom_neighbors=10, # minimum number of geometrical neighbors
# spectral_clustering_division
n_threshs=10, # number of evenly-spaced thresholds to try
max_depth=62, # maximum depth for cluster-trees (-> max nodes = 2**h - 1)
min_evect_amplitude=1e-10, # min amplitude of proj on eigenvector to split
)
def spectral_embedding_nystrom(AB, ridge=1e-10, nvec=2, copy=True):
"""Approximate spectral embedding using the Nystrom approximation
Parameters
----------
AB: (n, n+m) array,
similarities between n sub-sampled points and all n+m points
(AB = [A; B], where A is assumed p.d.)
ridge: float,
small offset added to the diagonal of A for numerical stability
nvec: int, optional, default: 2,
number of embedding vectors to use (output dimensionality, nvec < n)
copy: boolean, optional, default: True,
work on a copy of AB or not
Returns
-------
E: (n+m, nvec) array,
the spectral embedding of all points
Raises
------
IndefiniteError: if A = AB[:n, :n] is not positive-definite
Notes
-----
- One shot-technique from [1]: assumes A is p.d.
Note, that [1] has some mistakes that are corrected here.
- Cost in memory is at most 4 time the memory size of AB.
References
----------
[1] Spectral grouping using the Nystrom method,
Fowlkes, C. and Belongie, S. and Chung, F. and Malik, J.
PAMI 2004
"""
if copy:
AB = AB.copy() # XXX memory bottleneck
n = AB.shape[0]
#m = AB.shape[1] - n
assert nvec < n, "Too large number of embedding vectors (%d >= %d)" % (
nvec, n)
# make views of the blocks
A = AB[:, :n]
B = AB[:, n:]
# add a ridge for numerical stability as A is generally badly-conditionned
# XXX use QR decompostion of A for num stab (cf. stable GP)?
A[np.diag_indices_from(A)] += ridge
# normalize the components of AB
b_r = B.sum(axis=1)
pinvA = spd_pinv(A, check_stability=False)
d1 = A.sum(axis=1) + b_r
d2 = np.abs(B.sum(axis=0) + np.dot(np.dot(b_r, pinvA), B))
# Note: abs not required except when numerical problems
if np.any(d1 <= 0):
raise ValueError("numerical issue: negative or null d1 entries")
if np.any(d2 <= 0):
raise ValueError("numerical issue: negative or null d2 entries")
dhat = np.sqrt(1.0 / np.r_[d1, d2])[:, np.newaxis]
A *= np.dot(dhat[:n], dhat[:n].T)
B *= np.dot(dhat[:n], dhat[n:].T)
# square root of the pseudo-inverse
Asi = spd_pinv(A, square_root=True, check_stability=False)
# compute the embedding vectors
AsiB = np.dot(Asi, B) # XXX time & memory bottleneck (20% of total)
S = A + np.dot(AsiB, AsiB.T) # XXX bottleneck (20% of total)
QS, deltaS = None, None
for _i in range(4):
try:
QS, deltaS, _ = np.linalg.svd(S)
break
except np.linalg.LinAlgError:
_qridge = ridge * 10 ** _i
S[np.diag_indices_from(S)] += _qridge
sys.stderr.write(
"WARNING: SVD didn't converge:"
"added ridge {0:0.1e} to weird S matrix\n".format(_qridge))
if QS is None or deltaS is None:
raise ValueError("numerical issue: ridge too low or weird S")
if np.any(deltaS <= 0):
raise ValueError("numerical issue: negative or null deltaS entry")
_VT = np.dot(np.diag(1.0 / np.sqrt(deltaS)), np.dot(QS.T, Asi))
VT = np.dot(_VT, AB) # XXX time & memory bottleneck (20% of total)
# return the first nvec embedding vectors
if np.any(VT[0] == 0):
sys.stderr.write(
"WARNING: numerical issue: null first eigenvector entries\n")
# replace 0 entries by the mean
_m = np.mean(VT[0])
if _m == 0:
raise ValueError('numerical issue: first eigenvector is 0')
VT[0, VT[0] == 0] = _m
E = VT[1:nvec + 1] / VT[0][np.newaxis, :]
# small check
E = np.asarray_chkfinite(E.T)
return E
def spectral_clustering_division(E, geoms, split_type="threshold", normalize_geoms=True):
"""Divisive hierarchical clustering + model selection
Recursively split in two by thresholding the eigenvectors in increasing
eigenvalue order (starting from the second smallest), until we get too small
tubes, then perform model selection to determine the optimal splits.
Parameters
----------
E: (n_pts, n_vec), array,
the spectral embedding of the points on the n_vec smallest eigen-vectors
(from the second smallest eigen-value)
geoms: (n_pts, 3) array,
array of global (x, y, t) positions of the point tracks
split_type: 'kmeans' or 'threshold' (default),
the bi-partitioning algorithm used to split nodes
Returns
-------
best_labels: (n_pts, ) array,
the found cluster memberships
int_paths: (n_pts, ) array,
use np.binary_repr(int_paths[i]) to get the string path of sample i
Note: root is the left-most '1', outliers have path 0
"""
global INTERNAL_PARAMETERS
n_pts, n_vec = E.shape
_n, _d = geoms.shape
assert _n == n_pts and _d == 3, "Invalid geoms (%s)" % (str(geoms.shape))
# limit on tube sizes
mts = int(INTERNAL_PARAMETERS['min_tube_size'])
Mts = int(INTERNAL_PARAMETERS['max_tube_size'])
# lower limit on the number of clusters
min_n_clusters = int(INTERNAL_PARAMETERS['min_k'])
# max allowed node depth
max_depth = int(INTERNAL_PARAMETERS['max_depth'])
# min eigenvector amplitude for split
min_evect_amplitude = float(INTERNAL_PARAMETERS['min_evect_amplitude'])
# number of thresholds to try when using thresholding splits
n_threshs = int(INTERNAL_PARAMETERS['n_threshs'])
# check degenerate case: just issue a warning and lower mts
if n_pts <= 2 * min_n_clusters * mts:
n_mts = int(max(1, n_pts / (2.0 * min_n_clusters)))
sys.stderr.write("WARNING: small video" +
"({} <= {}) ".format(n_pts, 2 * min_n_clusters * mts) +
": changing min_leaf_size to {}.\n".format(n_mts))
mts = n_mts
# get the normalized spatio-temporal positions
if normalize_geoms:
nrlz = np.array([640., 480., 1e2])
ngeoms = geoms.astype(np.float) / nrlz[np.newaxis, :]
ngeoms -= ngeoms.mean(axis=0)[np.newaxis, :]
else:
ngeoms = geoms - geoms.mean(axis=0)[np.newaxis, :]
# initialize the tree structure
stree = SpectralTree(
E, ngeoms, mts, Mts, min_n_clusters, max_depth, min_evect_amplitude,
split_type, n_threshs)
# recursively split the leaves in depth-first left-to-right order
stree.build()
return stree.labels, stree.int_paths
# ==============================================================================
# Helper functions
# ==============================================================================
def spd_pinv(a, rcond=1e-10, square_root=False, check_stability=True):
""" Pseudo-inverse of a symetric positive-definite matrix
Parameters
----------
a: array_like, shape (N, N),
Symetric (not checked) positive-definite matrix to be pseudo-inverted.
rcond: float, optional, default: 1e-10,
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
square_root: boolean, optional, default: False,
return the matrix square-root of the pseudo-inverse instead
Returns
-------
res: ndarray, shape (N, M)
The pseudo-inverse of `a`
or the (matrix) square-root of the pseudo-inverse.
Raises
------
IndefiniteError: if a is not positive-definite.
Notes
-----
Uses the eigen-decomposition of `a`.
Small modifications wrt numpy.linalg.pinv:
- uses the eigen-decomposition instead of the svd
- only the real part
- check for positive-definiteness and eventually numerical stability
"""
N, _N = a.shape
assert N == _N, "Matrix is not square!"
# get the eigen-decomposition
# w, v = np.linalg.eigh(a)
v, w, u = np.linalg.svd(a)
sort_index = np.argsort(w)
w = w[sort_index]
v = v[:,sort_index]
# check positive-definiteness
ev_min = w.min()
if ev_min <= 0:
msg = "Matrix is not positive-definite: min ev = {0}"
raise IndefiniteError(msg.format(ev_min))
# check stability of eigen-decomposition
if check_stability:
# XXX use a preconditioner?
if not np.allclose(a, np.dot(v, w[:, np.newaxis] * v.T)):
raise NumericalError(
"Instability in eigh (condition number={:g})".format(
(w.max() / w.min())))
# invert the "large enough" part of s
cutoff = rcond * w.max()
for i in range(N):
if w[i] > cutoff:
if square_root:
# square root of the pseudo-inverse
w[i] = np.sqrt(1. / w[i])
else:
w[i] = 1. / w[i]
else:
w[i] = 0.
# compute the pseudo-inverse (using broadcasting)
res = np.real(np.dot(v, w[:, np.newaxis] * v.T))
# check stability of pseudo-inverse
if check_stability:
if square_root:
pa = np.dot(res, res)
approx_a = np.dot(a, np.dot(pa, a))
msg = "Instability in square-root of pseudo-inverse"
else:
approx_a = np.dot(a, np.dot(res, a))
msg = "Instability in pseudo-inverse"
if not np.allclose(a, approx_a):
# be a bit laxist by looking at the Mean Squared Error
mse = np.mean((a - approx_a) ** 2)
if mse > 1e-16:
raise NumericalError("{} (MSE={:g})".format(msg, mse))
return res
class IndefiniteError(Exception):
"""Error raised on problematic non-positive-definiteness"""
pass
class NumericalError(Exception):
"""Error raised on problems caused by numerical instability"""
pass
def build_geom_neighbor_graph(geoms, n_neighbors):
""" Computes the sparse CSR geometrical adjacency matrix gadj
Parameters
----------
geoms: (n_pts, d) array,
the geometrical info
n_neighbors: int,
number of neighbors
Returns
-------
gadj: (n_pts, n_pts) sparse CSR array,
the adjacency matrix
gadj[i,j] == 1 iff i and j are geometrical neighbors
Notes
-----
gadj might not be symmetric!
"""
n_pts = geoms.shape[0]
pyflann.set_distance_type('euclidean') # squared euclidean actually
fli = pyflann.FLANN()
build_params = dict(algorithm='kdtree', num_neighbors=n_neighbors)
gneighbs, _ = fli.nn(geoms, geoms, **build_params)
data = np.ones((n_pts, n_neighbors), dtype='u1')
indptr = np.arange(0, n_pts * n_neighbors + 1, n_neighbors, dtype=int)
gadj = sparse.csr_matrix(
(data.ravel(), gneighbs.ravel(), indptr), shape=(n_pts, n_pts))
return gadj
def build_sym_geom_adjacency(geoms, max_gnn=100):
""" Return the sparsest yet maximally connected symetric geometrical adjacency matrix
"""
global INTERNAL_PARAMETERS
min_gnn = INTERNAL_PARAMETERS['min_geom_neighbors']
assert min_gnn < max_gnn, "Too high minimum number of neighbors"
n_pts = geoms.shape[0]
for n_neighbors in range(min_gnn, max_gnn + 1):
# find the lowest number of NN s.t. the graph is not too disconnected
C = build_geom_neighbor_graph(geoms, n_neighbors)
neighbs = C.indices.reshape((n_pts, n_neighbors))
C = C + C.T
C.data[:] = 1
n_comp, _ = sparse.cs_graph_components(C)
if n_comp == 1:
print "# use n_neighbors=%d" % n_neighbors
break
elif n_comp < 1:
raise ValueError('Bug: n_comp=%d' % n_comp)
if n_comp > 1:
print "# use maximum n_neighbors=%d (%d components)" % (
n_neighbors, n_comp)
return n_comp, C, neighbs
class SplitError(Exception):
pass
def allclose_rows(X):
return np.sum(np.diff(X, axis=0) ** 2) < 1e-10
def get_kmeans_split(X):
""" Returns the list of row labels obtained by k-means with k == 2
"""
n_pts, n_dims = X.shape
# special case: all rows are the same: k-means will hold forever...
if allclose_rows(X):
# all vectors are equal: cannot split
sys.stderr.write('# WARNING: all rows are close\n')
sys.stderr.flush()
return None
if n_pts > 1e3:
model = MiniBatchKMeans(
n_clusters=2, init="k-means++", max_iter=30, batch_size=1000,
compute_labels=True, max_no_improvement=None, n_init=5)
else:
model = KMeans(n_clusters=2, init="k-means++", n_init=5, max_iter=100)
model.fit(X)
labels = model.labels_
return labels
class PriorityQueue(object):
""" Simple priority queue class on objects
Compares objects based on their "minus_priority" property (must have this attribute)
Implemented with a heap
"""
def __init__(self):
self._heap = []
def __len__(self):
return len(self._heap)
def push(self, obj):
""" Insert obj in the queue according to obj.minus_priority
"""
# wrap the object to allow for correct pop operation
# remember that in python it's a min-heap (not max!)
wrap_obj = (obj.minus_priority, len(self), obj)
# use insertion number to ensure we never compare based on obj itself!
# additionally resolves ties by popping earliest-inserted object
heapq.heappush(self._heap, wrap_obj)
def pop(self):
""" Returns the highest priority object in the queue
Ties are resolved by popping the object inserted first (FIFO).
"""
_, _, obj = heapq.heappop(self._heap)
return obj
class SpectralNode(object):
""" A node used to split points by thresholding a single eigen-vector
Attributes
----------
ids: (n, ) array,
the (integer) indexes of points affected by this split
vec: int,
the eigen-vector number (dimension in the embedding) used for the split
score: float,
score of the node (reflects quality in terms of consistency and density)
name: string,
path of the node in string format
(e.g. root is '1', left child of root is '10')
has_children: boolean,
whether the node has children or not
(i.e. if it's a leaf or if it wasn't split yet)
thresh: float,
the threshold used to split along the projection on the selected eigen-vector
"""
def __init__(self, ids, vec, score=None, name=""):
""" A node corresponds to a split of points indexed by `ids`.
"""
self.size = len(ids)
self.ids = ids
self.vec = vec
self.score = 0. if score is None else score
self.name = name # binary string path: 0 for left, 1 for right
self.has_children = False
self.thresh = None
@property
def minus_priority(self):
""" Defines lexical order on nodes used to decide splits
In order of decreasing importance:
1) nodes where the split is on smaller eigen-vectors (more reliable)
2) nodes with the lowest parent score (highest gain expected),
3) bigger nodes first
Note that this property is the *opposite* of a priority
"""
#return (-self.size, self.vec, self.score) # kinda "depth-first"
#return (self.vec, self.score, -self.size) # kinda "breadth-first"
return (self.score, -self.size, self.vec) # kinda "depth-first with back-tracking"
def _ps(score):
""" Convenience function for score printing
"""
#s = "({0[0]:.3f}, {0[1]:.3f})".format(score)
s = "{0:.3f}".format(score)
return s
class SpectralTree(object):
""" Binary tree used for hierarchical divisive clustering of a spectral embedding
Attributes
----------
labels: (n_pts, ) array,
the cluster memberships according to the split up to the root (included)
n_clusters: int,
the number of clusters up to now
Notes
-----
The tree is only implicit.
"""
def __init__(self, E, ngeoms, min_leaf_size, max_leaf_size, min_leaves,
max_depth, min_evect_amplitude, split_type, n_threshs):
""" Initialize with empty tree
Parameters
----------
E: (n_pts, n_vec) array,
the spectral embedding of the points on n_vec eigen-vectors,
ngeoms: (n_pts, 3) array,
the spatio-temporal information of each point
(assumed to be normalized)
min_leaf_size: int,
the minimum size of a leaf
(don't split smaller nodes than this)
max_leaf_size: int,
the maximum size of a leaf
(always split for nodes bigger than this)
min_leaves: int,
minimum number of leaves for the full tree
(always split if less)
max_depth: int,
don't split nodes deeper than this (< 63)
min_evect_amplitude: float,
only used when thresholding to split
don't split a set of points along an eigenvector
with an amplitude (max-min) smaller than this
(e.g. 1e-10)
split_type: str,
"threshold": threshold individual eigenvectors to split a node
"kmeans": use k-means to bi-partition a node
n_threshs: int,
number of evenly-spaced in (0, 1) thresholds to try for splitting
"""
self.E = E
self.n_pts, self.n_vec = E.shape
self.ngeoms = ngeoms
self.min_leaf_size = min_leaf_size
self.max_leaf_size = max_leaf_size
self.min_leaves = min_leaves
self.max_depth = max(1, min(max_depth, 62))
self.min_evect_amplitude = float(min_evect_amplitude)
self.split_type = split_type
self.n_threshs = n_threshs
# checks
assert self.n_pts == self.ngeoms.shape[0], "Invalid geoms dimension"
assert self.split_type in ("threshold", "kmeans"), "Unknown split_type"
assert self.max_leaf_size >= self.min_leaf_size, "max_leaf_size < min_leaf_size"
assert min_evect_amplitude > 0, \
"min_evect_amplitude == {} <= 0".format(min_evect_amplitude)
# split-type specific treatments
if self.split_type == "kmeans":
# l2-normalize E
nrlz = np.sqrt((self.E ** 2).sum(axis=1))
mask = nrlz > 0
self.E[mask] /= nrlz[mask][:, np.newaxis]
elif self.split_type == "threshold":
# rescale projections to be between 0 and 1
self.E -= self.E.min(axis=0)[np.newaxis, :]
nrlz = self.E.max(axis=0)
mask = nrlz != 0
self.E[:, mask] /= nrlz[mask][np.newaxis, :]
# relative per-dim thresholds (min 10% - 90% split imbalance)
self.percentiles = np.linspace(0.10, 0.90, num=self.n_threshs)
# build the geom adjacency matrix (used for scoring)
_, self._gadj, self._gneighbs = build_sym_geom_adjacency(ngeoms)
def _get_tube_connectedness(self, tube_idxs):
""" Return the connectedness measure of the tube
Parameters
----------
tube_idxs: (tube_size, ) array,
the ids of the points in the tube we're interested in
Returns
-------
connectedness: float in [0, 1],
1/#connected components
"""
# extract the rows of self._gadj which are in the tube
ids = self._gadj.indices
iptr = self._gadj.indptr
sub_indices = np.hstack(
[ids[iptr[i]:iptr[i + 1]] for i in tube_idxs]).astype(ids.dtype)
sub_indptr = np.zeros_like(iptr)
sub_indptr[tube_idxs + 1] = iptr[tube_idxs + 1] - iptr[tube_idxs]
sub_indptr = np.cumsum(sub_indptr, dtype=iptr.dtype)
_conn_labs = np.empty((self.n_pts,), dtype=iptr.dtype)
num_conn = cs_graph_components(
self.n_pts, sub_indptr, sub_indices, _conn_labs)
assert num_conn > 0, "BUG: negative or null num_conn %d" % num_conn
connectedness = 1. / num_conn
return connectedness
def _get_tube_label_density(self, tube_idxs):
""" Return the average local label agreement of the tube
Parameters
----------
tube_idxs: (tube_size, ) array,
the ids of the points in the tube we're interested in
Returns
-------
density: float in [0, 1],
average ratio of geometrical neighbors in the tube
"""
# get the indexes of the nearest neighbors of all tube points
gneighbs = self._gneighbs[tube_idxs]
# count the number of neighbors in the tube
fbl = np.zeros((self.n_pts, ), dtype=bool)
fbl[tube_idxs] = True
nnt = fbl[gneighbs].sum()
assert nnt > len(
tube_idxs), "BUG: at least the points are in the tube!"
# get the overall ratio
density = float(nnt) / (gneighbs.shape[0] * gneighbs.shape[1])
return density
# XXX use numexpr and (x-y)**2 instead?
def _get_tube_inertia(self, tube_idxs):
""" Return the within-cluster variance (like in k-means)
Parameters
----------
tube_idxs: (tube_size, ) array,
the ids of the points in the tube we're interested in
Returns
-------
inertia: float,
the sum of square differences from the mean
"""
# get the features of the in-cluster points
X = self.E[tube_idxs]
# get the centroid
centroid = np.mean(X, axis=0)
# compute the sum of the squared norms
inertia = np.sum(X * X)
inertia += len(tube_idxs) * np.sum(centroid * centroid)
# compute the inner-products with the centroid
inertia -= 2 * np.sum(np.dot(X, centroid))
return inertia
# XXX critical part: find good scoring!
def get_tube_score(self, tube_idxs):
""" Return the score of a single cluster
Parameters
----------
tube_idxs: (tube_size, ) array,
the ids of the points in the tube we're interested in
Returns
-------
score: float,
the quality score (the higher the better) of the cluster
we use as score, the inverse of the number of connected components
"""
assert len(
tube_idxs) > 0, "BUG: #tube_idxs == {0}".format(len(tube_idxs))
# get the connectedness
tc = np.sqrt(self._get_tube_connectedness(tube_idxs))
return tc
def _get_candidate_thresholds(self, node, vec):
""" Return a list of pairs (n_vec, thresh) of a threshold applicable to
the n_vec'th dimension of the spectral embedding (eigenvector n_vec)
"""
if vec >= self.n_vec:
msg = "BUG: try to split on {0} which is after max_n_vec ({1})"
raise SplitError(msg.format(vec, self.n_vec))
# the projections on the selected eigen-vector
evs = self.E[node.ids, vec]
# get the thresholds
_scale = evs.max() - evs.min()
if _scale < self.min_evect_amplitude:
# not enough amplitude to split
used_threshs = []
else:
# get quantiles as thresholds
evs.sort()
_threshs = evs[(self.percentiles * (len(evs) - 1)).astype(int)]
# discard thresholds very close to each other
# (unstable: small change yields very different split)
used_threshs = [_threshs[0]] # always use the first one
for _t in _threshs[1:]:
if (_t - used_threshs[-1]) > 1e-2 * _scale:
# keep: gap between thresholds is more than 1% of total scale
used_threshs.append(_t)
if len(used_threshs) == 0:
msg = "WARNING: too small amplitude ({0:0.1e})"
msg += " or too close thresholds to split node {1} at vec {2}\n"
sys.stderr.write(msg.format(_scale, node.name, vec))
sys.stderr.flush()
return used_threshs
def _split_threshold(self, node):
"""Find the best split of a node by thresholding the corresponding eigen-vector
"""
# define the score to improve upon
if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:
# split only if min(children scores) > node.score
force_split = False
best_score = node.score
else:
# force split: just take the best (even if children are worse)
force_split = True
best_score = None
left, right = None, None
# iterate over embedding dimensions (first ones are more reliable)
# up to max_n_vec (included), until we found an improving split
for _vec in range(self.n_vec):
# get the candidate thresholds along this dimension
threshs = self._get_candidate_thresholds(node, _vec)
# look for an improving best split along this eigenvector
for _t in threshs:
# compute the split
below_thresh = self.E[node.ids, _vec] < _t
_lids = node.ids[below_thresh]
_rids = node.ids[np.logical_not(below_thresh)]
# check if the tubes are not too small
_nl, _nr = len(_lids), len(_rids)
is_valid = _nl >= self.min_leaf_size and _nr >= self.min_leaf_size
if is_valid:
# compute the score of the new tubes only
_sl = self.get_tube_score(_lids)
_sr = self.get_tube_score(_rids)
# get the score of this split
split_score = min(_sl, _sr)
if best_score is None or split_score > best_score:
# better split
best_score = split_score
node.has_children = True
node.thresh = _t
left = SpectralNode(
_lids, _vec, score=_sl, name=node.name + "0")
right = SpectralNode(
_rids, _vec, score=_sr, name=node.name + "1")
# check stopping criterion
if node.has_children:
# we found an improving split
if _vec > 0 or not force_split:
# found an improving non-forced split: stop here
break
return left, right
def _split_kmeans(self, node):
"""Find the best split of a node by using k-means with k=2 on the full embedding
"""
# bi-partition with k-means until children have enough samples or max outliers is reached
n_outliers = 0
ids = node.ids
left, right = None, None
# define the score to improve upon
if self.n_clusters >= self.min_leaves and node.size <= self.max_leaf_size:
# require an improvement of children
best_score = node.score
# limit outliers to smallest cluster possible
max_outliers = self.min_leaf_size
else:
# just take the best split (even if children are worse)
best_score = None
# no limit on outliers: always split
max_outliers = np.inf
# iterate until valid split or reached max outliers
while n_outliers < max_outliers:
labels = get_kmeans_split(self.E[ids])
if labels is None:
# could not split
break
# compute the split
_lids = ids[labels == 0]
_rids = ids[labels == 1]
# check if the tubes are not too small
_nl, _nr = len(_lids), len(_rids)
if _nl + _nr != len(ids):
raise SplitError("BUG in kmeans")
if _nl >= self.min_leaf_size and _nr >= self.min_leaf_size:
# both children are large enough
_sl = self.get_tube_score(_lids)
_sr = self.get_tube_score(_rids)
# get the score of this split
score = min(_sl, _sr)
# check if the split improves (each child has better score than the parent)
if best_score is None or score > best_score:
# register the split (vec is used to store depth in the tree)
node.has_children = True
best_score = score
left = SpectralNode(
_lids, node.vec + 1, score=_sl, name=node.name + "0")
right = SpectralNode(
_rids, node.vec + 1, score=_sr, name=node.name + "1")
break
elif _nl < self.min_leaf_size and _nr >= self.min_leaf_size:
# left children is too small: add as outlier
self.labels[_lids] = -1
n_outliers += _nl
# carry on with this subset
ids = _rids
elif _nr < self.min_leaf_size and _nl >= self.min_leaf_size:
# right children is too small: add as outlier
self.labels[_rids] = -1
n_outliers += _nr
# carry on with this subset
ids = _lids
else:
# both too small: node is a leaf
#msg = 'Both children are too small:'
#msg+= ' too many outliers ({0} >= max_outliers={1})'.format(n_outliers, max_outliers)
#msg+= ' or too small node size ({0})'.format(node.size)
#raise SplitError(msg)
break
return left, right
def _split_forced(self, node):
"""Force the split of a node, disregarding node size constraints
The split is not random but is obtained by cutting in 2 equally-sized
children sorted according of the projection along the first eigenvector.
The use of this function is only as a last resort to force a mandatory
split if normal splitting strategies have failed.
"""
# compute the split
_vec = 0
sorted_idxs = np.argsort(self.E[node.ids, _vec]).squeeze()
n = len(sorted_idxs) // 2
_lids = node.ids[sorted_idxs[:n]]
_rids = node.ids[sorted_idxs[n:]]
# compute the score of the new tubes only
_sl = self.get_tube_score(_lids)
_sr = self.get_tube_score(_rids)
# register the split
node.has_children = True
node.thresh = np.median(self.E[node.ids, _vec]) # arbitrary
# Note: median would not ensure equal size (because of duplicate values)
left = SpectralNode(_lids, _vec, score=_sl, name=node.name + "0")
right = SpectralNode(_rids, _vec, score=_sr, name=node.name + "1")
return left, right
def split(self, node):
"""Split a tree in two
Parameters
----------
node: SpectralNode object,
the node of the subtree we want to split
(contains the eigen-vector along which we split)
Returns
-------
left: SpectralNode object,
the root of the left subtree (None for leaves)
right: SpectralNode object,
the root of the right subtree (None for leaves)
Notes
-----
Additionally updates the labels and number of clusters.
"""
# check node was not already split
if node.has_children:
raise SplitError("BUG: node was already split")
# early stopping (only if enough nodes already)
if self.n_clusters >= self.min_leaves:
# make a leaf if too small to split
if node.size <= 2 * self.min_leaf_size:
return None, None
# special case: make a leaf if too deep already
if len(node.name) > self.max_depth:
# int(node.name, 2) is too big to be represented as a long (int64)
# if len(node.name > 62)
sys.stderr.write('# WARNING: early stopping too deep branch'
' {}\n'.format(node.name))
sys.stderr.flush()
return None, None
# bi-partition the node's samples
if self.split_type == "kmeans":
left, right = self._split_kmeans(node)
else:
left, right = self._split_threshold(node)
# check if we have two leaves or none
if (left is None and right is not None) or (left is not None and right is None):
raise SplitError(
"BUG: both children should be simultaneously"
"either None or not")
# check the post-conditions
if left is None or right is None:
# node is a leaf
if node.has_children:
raise SplitError("BUG: leaf node marked with (empty) children")
# check if it must have been split instead of being a leaf
if node.size > self.max_leaf_size:
# force the split
left, right = self._split_forced(node)
msg = 'WARNING: forced to split a must-split node that was'
msg += ' too big to be a leaf ({0} > max_leaf_size={1})\n'
sys.stderr.write(msg.format(node.size, self.max_leaf_size))
if self.n_clusters < self.min_leaves:
# force the split
left, right = self._split_forced(node)
msg = 'WARNING: forced to split a must-split node that had'
msg += ' not enough clusters ({0} < min_leaves={1})\n'
sys.stderr.write(msg.format(self.n_clusters, self.min_leaves))
# finalize the split
if node.has_children:
# update the labels of right child only (left keeps the same)
self.labels[right.ids] = self.n_clusters
self.n_clusters += 1
return left, right
def build(self, verbose=True):
"""Recursively split in two, starting from a cluster containing all points
The nodes to split are decided based on a priority queue (cf. SpectralNode).
"""
# initially: one cluster
self.labels = np.zeros((self.n_pts, ), dtype=int)
self.int_paths = np.zeros((self.n_pts, ), dtype=int)
self.n_clusters = 1
# create the root and add it to a FIFO queue of nodes to process
root = SpectralNode(
np.arange(self.n_pts), 0, name="1") # '1' by convention
to_split = PriorityQueue()
to_split.push(root)
# recursively split
#nrecs = 0
while len(to_split) > 0:
# get the node with highest priority
node = to_split.pop()
left, right = self.split(node)
# push to the priority queue
if node.has_children:
# node was split: push the children
to_split.push(left)
to_split.push(right)
else:
# node is a leaf: update the cluster tree paths for the concerned points
self.int_paths[node.ids] = int(node.name, 2)
# Note: outliers (not in node.ids) have default '0' path
# to save all partial labelings, do
#nrecs += 1
#np.save('labels_%04d_split_%s.npy' % (nrecs, node.name), self.labels)
if verbose:
self._print_split_infos(node, left, right, len(to_split))
# check we don't have a too small number of leaves
assert self.n_clusters >= self.min_leaves, \
"BUG: not enough clusters {0}".format(self.n_clusters)
def _print_split_infos(self, node, left, right, left_to_split):
""" Print DEBUG infos about the split of 'node' in 'left' and 'right'
"""
DEBUG_info = "#DEBUG n_clusters={n_clusters:04d} to_split={to_split:04d}"
infos = dict(n_clusters=self.n_clusters, to_split=left_to_split)
DEBUG_info += " score={score}"
infos['score'] = _ps(node.score)
if node.has_children:
# node was split
DEBUG_info += " vec={vec:04d} sl={sl} nl={nl:06d} sr={sr} nr={nr:06d}"
infos['vec'] = left.vec
infos['sl'] = _ps(left.score)
infos['nl'] = left.size
infos['sr'] = _ps(right.score)
infos['nr'] = right.size
else:
# node is a leaf
DEBUG_info += " LEAF" + ' ' * 42
DEBUG_info += " size={size:06d} path={path}"
infos['size'] = node.size
infos['path'] = node.name
print DEBUG_info.format(**infos)
sys.stdout.flush()
|
|
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import six
from oslo_config import cfg
from oslotest import base
from networking_cisco.apps.saf.common import config
from networking_cisco.apps.saf.common import constants
from networking_cisco.apps.saf.db import dfa_db_models as dbm
from networking_cisco.apps.saf.server import cisco_dfa_rest as cdr
from networking_cisco.apps.saf.server import dfa_events_handler as deh
from networking_cisco.apps.saf.server import dfa_fail_recovery as dfr
from networking_cisco.apps.saf.server import dfa_instance_api as dia
from networking_cisco.apps.saf.server.services.firewall.native import (
fw_mgr as fw_native)
FAKE_NETWORK_NAME = 'test_dfa_network'
FAKE_NETWORK_ID = '949fdd05-a26a-4819-a829-9fc2285de6ff'
FAKE_NETWORK_ID2 = '949fdd05-a26a-4819-a829-9fc2285de6fe'
FAKE_CFG_PROF_ID = '8c30f360ffe948109c28ab56f69a82e1'
FAKE_SEG_ID = 12345
FAKE_PROJECT_NAME = 'test_dfa_project'
FAKE_ORCH_ID = 'openstack'
FAKE_PROJECT_ID = 'aee5da7e699444889c662cf7ec1c8de7'
FAKE_PROJECT_ID2 = 'aee5da7e699444889c662cf7ec1c8d8'
FAKE_PROJECT_ID3 = 'aee5da7e699444889c662cf7ec1c8d9'
FAKE_CFG_PROFILE_NAME = 'defaultNetworkL2Profile'
FAKE_INSTANCE_NAME = 'test_dfa_instance'
FAKE_SUBNET_ID = '1a3c5ee1-cb92-4fd8-bff1-8312ac295d64'
FAKE_PORT_ID = 'ea0d92cf-d0cb-4ed2-bbcf-ed7c6aaea4cb'
FAKE_DEVICE_ID = '20305657-78b7-48f4-a7cd-1edf3edbfcad'
FAKE_SECURITY_GRP_ID = '4b5b387d-cf21-4594-b926-f5a5c602295f'
FAKE_MAC_ADDR = 'fa:16:3e:70:15:c4'
FAKE_IP_ADDR = '23.24.25.4'
FAKE_GW_ADDR = '23.24.25.1'
FAKE_DHCP_IP_START = '23.24.25.2'
FAKE_DHCP_IP_END = '23.24.25.254'
FAKE_HOST_ID = 'test_dfa_host'
FAKE_FWD_MODE = 'proxy-gateway'
FAKE_DCNM_USERNAME = 'cisco'
FAKE_DCNM_PASSWD = 'password'
FAKE_DCNM_IP = '1.1.2.2'
class FakeClass(object):
"""Fake class"""
@classmethod
def imitate(cls, *others):
for other in others:
for name in other.__dict__:
try:
setattr(cls, name, mock.Mock())
except (TypeError, AttributeError):
pass
return cls
class FakeProject(object):
"""Fake Project class."""
def __init__(self, proj_id, name, dci_id, desc):
self.id = proj_id
self.name = name
self.dci_id = dci_id
self.description = desc
class TestDFAServer(base.BaseTestCase):
"""Test cases for DFA Server class."""
def setUp(self):
super(TestDFAServer, self).setUp()
# Mocking some modules
self.dcnmpatcher = mock.patch(cdr.__name__ + '.DFARESTClient')
self.mdcnm = self.dcnmpatcher.start()
self.keys_patcher = mock.patch(deh.__name__ + '.EventsHandler')
self.mkeys = self.keys_patcher.start()
self.inst_api_patcher = mock.patch(dia.__name__ + '.DFAInstanceAPI')
self.m_inst_api = self.inst_api_patcher.start()
self.module_patcher = mock.patch.dict('sys.modules',
{'pika': mock.Mock()})
self.module_patcher.start()
from networking_cisco.apps.saf.server import dfa_listen_dcnm as dld
from networking_cisco.apps.saf.server import dfa_server as ds
self.dld_patcher = mock.patch(dld.__name__ + '.DCNMListener')
self.dld = self.dld_patcher.start()
ds.DfaServer.__bases__ = (FakeClass.imitate(
dfr.DfaFailureRecovery, dbm.DfaDBMixin, fw_native.FwMgr),)
ds.DfaServer.get_all_projects.return_value = []
ds.DfaServer.get_all_networks.return_value = []
ds.DfaServer._setup_rpc = mock.Mock()
# TODO(padkrish) Have UT for this function. This may mean over-riding
# the mocking of get_segmentid_range of DCNM client to return a range.
ds.DfaServer.register_segment_dcnm = mock.Mock()
# Setting DCNM parameters.
cfg.CONF.set_override('dcnm_ip', FAKE_DCNM_IP, group='dcnm')
cfg.CONF.set_override('dcnm_user', FAKE_DCNM_USERNAME, group='dcnm')
cfg.CONF.set_override('dcnm_password', FAKE_DCNM_PASSWD, group='dcnm')
cfg.CONF.set_override('timeout_resp', 0.01, group='dcnm')
cfg.CONF.set_override('segmentation_id_min', 10000, group='dcnm')
cfg.CONF.set_override('segmentation_id_max', 20000, group='dcnm')
cfg.CONF.set_override('orchestrator_id', FAKE_ORCH_ID, group='dcnm')
self.cfg = config.CiscoDFAConfig().cfg
self.segid = int(self.cfg.dcnm.segmentation_id_min) + 10
self.seg_Drvr = mock.patch(
'networking_cisco.apps.saf.db.dfa_db_models.'
'DfaSegmentTypeDriver').start()
self.topologyDb = mock.patch(
'networking_cisco.apps.saf.db.dfa_db_models.'
'TopologyDiscoveryDb').start()
self.dfa_server = ds.DfaServer(self.cfg)
self.rpcb = ds.RpcCallBacks(self.dfa_server)
mock.patch.object(self.dfa_server, '_get_segmentation_id',
return_value=12345).start()
mock.patch.object(self.dfa_server.seg_drvr,
'allocate_segmentation_id',
return_value=12345).start()
self.dciid = str(123)
self.proj_desc = 'Unit Test Project'
projs = [
FakeProject(FAKE_PROJECT_ID, FAKE_PROJECT_NAME,
self.dciid, self.proj_desc)]
self.dfa_server.get_all_projects.return_value = projs
self.dfa_server._load_project_info_cache()
self.part_name = self.cfg.dcnm.default_partition_name
def _get_port_info(self):
port_info = {'port': {
'status': 'ACTIVE',
'binding:host_id': FAKE_HOST_ID,
'allowed_address_pairs': [],
'extra_dhcp_opts': [],
'device_owner': 'compute:nova',
'binding:profile': {},
'fixed_ips': [{'subnet_id': FAKE_SUBNET_ID,
'ip_address': FAKE_IP_ADDR}],
'id': FAKE_PORT_ID,
'security_groups': [FAKE_SECURITY_GRP_ID],
'device_id': FAKE_DEVICE_ID,
'name': '',
'admin_state_up': True,
'network_id': FAKE_NETWORK_ID,
'tenant_id': FAKE_PROJECT_ID,
'binding:vif_details': {'port_filter': True,
'ovs_hybrid_plug': True},
'binding:vnic_type': 'normal',
'binding:vif_type': 'ovs',
'mac_address': FAKE_MAC_ADDR}}
return port_info
def _load_network_info(self):
dnet = mock.Mock()
dnet.network_id = FAKE_NETWORK_ID
dnet.segmentation_id = self.segid
dnet.config_profile = FAKE_CFG_PROFILE_NAME
dnet.fwd_mod = FAKE_FWD_MODE
dnet.tenant_id = FAKE_PROJECT_ID
dnet.name = FAKE_NETWORK_NAME
self.dfa_server.get_all_networks.return_value = [dnet]
self.dfa_server._load_network_info()
def test_update_project_info_cache(self):
"""Test case for update project info."""
pid = FAKE_PROJECT_ID
name = FAKE_PROJECT_NAME
dciid = 1000
result = constants.RESULT_SUCCESS
self.dfa_server.update_project_info_cache(pid, dci_id=dciid,
name=name, opcode='add')
self.assertTrue(self.dfa_server.add_project_db.called)
self.assertFalse(self.dfa_server.update_project_entry.called)
self.assertFalse(self.dfa_server.del_project_db.called)
self.dfa_server.add_project_db.assert_called_with(pid, name,
dciid, result)
self.dfa_server.update_project_info_cache(pid, dci_id=dciid,
name=name,
opcode='update')
self.assertTrue(self.dfa_server.update_project_entry.called)
self.assertFalse(self.dfa_server.del_project_db.called)
self.dfa_server.update_project_entry.assert_called_with(pid, dciid,
result)
def test_project_create_func(self):
"""Test case for project create event."""
dciid = str(12345)
proj_desc = 'Unit Test Project'
proj_id = FAKE_PROJECT_ID2
proj = mock.Mock()
proj.name = FAKE_PROJECT_NAME
proj.description = proj_desc
part_name = self.cfg.dcnm.default_partition_name
self.dfa_server.keystone_event._service.projects.get.return_value = (
proj)
self.dfa_server.project_create_func(proj_id)
# Try it with DCI id
proj.name = FAKE_PROJECT_NAME + ':dci_id:' + dciid
proj_id = FAKE_PROJECT_ID3
self.dfa_server.project_create_func(proj_id)
expected_calls = [
mock.call(FAKE_ORCH_ID, FAKE_PROJECT_NAME, part_name, None,
proj.description),
mock.call(FAKE_ORCH_ID, FAKE_PROJECT_NAME, part_name, dciid,
proj.description)]
self.assertEqual(
expected_calls,
self.dfa_server.dcnm_client.create_project.call_args_list)
def test_project_update_event(self):
"""Test case for project update event."""
proj_info = {'resource_info': FAKE_PROJECT_ID}
proj = mock.Mock()
proj.name = FAKE_PROJECT_NAME + ':dci_id:' + self.dciid
proj.description = self.proj_desc
self.dfa_server.keystone_event._service.projects.get.return_value = (
proj)
self.dfa_server.project_update_event(proj_info)
# Project update event is called with the same parameters. It is
# expected that there is no call to update_project.
self.assertFalse(
self.dfa_server.dcnm_client.update_project.called)
# Try with updating the project by name.
proj.name = FAKE_PROJECT_NAME + 'new' + ':dci_id:' + self.dciid
self.dfa_server.project_update_event(proj_info)
self.assertFalse(
self.dfa_server.dcnm_client.update_project.called)
# Try with updating the dci_id of the project.
proj.name = FAKE_PROJECT_NAME + ':dci_id:' + str(124)
self.dfa_server.project_update_event(proj_info)
self.assertTrue(
self.dfa_server.dcnm_client.update_project.called)
expected_calls = [mock.call(FAKE_PROJECT_NAME,
self.cfg.dcnm.default_partition_name,
dci_id=str(124))]
self.assertEqual(
expected_calls,
self.dfa_server.dcnm_client.update_project.call_args_list)
def test_project_delete_event(self):
"""Test case for project delete event."""
proj_name = FAKE_PROJECT_NAME
proj_info = {'resource_info': FAKE_PROJECT_ID}
part_name = self.cfg.dcnm.default_partition_name
self.dfa_server.project_delete_event(proj_info)
# Check information sent to dcnm and api that deleting the entry from
# DB is called.
self.dfa_server.dcnm_client.delete_project.assert_called_with(
proj_name, part_name)
self.dfa_server.del_project_db.assert_called_with(FAKE_PROJECT_ID)
def test_network_create_func(self):
"""Test case for network create event."""
net = {'name': FAKE_NETWORK_NAME,
'tenant_id': FAKE_PROJECT_ID,
'id': FAKE_NETWORK_ID2}
dcnmclnt = self.dfa_server.dcnm_client
dcnmclnt.get_config_profile_for_network.return_value = (
FAKE_CFG_PROFILE_NAME, FAKE_FWD_MODE)
self.dfa_server.get_network.return_value = None
self.dfa_server.network_create_func(net)
dfa_net = self.dfa_server.network[FAKE_NETWORK_ID2]
expected_calls = [mock.call(FAKE_NETWORK_ID2, dfa_net, 'openstack',
constants.SUBNET_PENDING)]
self.assertEqual(expected_calls,
self.dfa_server.add_network_db.call_args_list)
def test_subnet_create_event(self):
"""Test case for subnet create event."""
network_info = {'network':
{'name': FAKE_NETWORK_NAME,
'tenant_id': FAKE_PROJECT_ID,
'id': FAKE_NETWORK_ID2}}
subnet_info = {'subnet': {
'network_id': FAKE_NETWORK_ID2,
'tenant_id': FAKE_PROJECT_ID,
'allocation_pools': [
{'start': FAKE_DHCP_IP_START, 'end': FAKE_DHCP_IP_END}],
'gateway_ip': FAKE_GW_ADDR,
'ip_version': 4,
'cidr': FAKE_IP_ADDR + '/24',
'id': FAKE_SUBNET_ID}}
dcnmclnt = self.dfa_server.dcnm_client
dcnmclnt.get_config_profile_for_network.return_value = (
FAKE_CFG_PROFILE_NAME, FAKE_FWD_MODE)
self.dfa_server.get_network.return_value = None
self.dfa_server.network_create_event(network_info)
fake_network = mock.Mock()
fake_network.source = 'dcnm'
fake_network.name = FAKE_NETWORK_NAME
fake_network.result = constants.SUBNET_PENDING
self.dfa_server.get_network.return_value = fake_network
self.dfa_server.subnet_create_event(subnet_info)
self.assertFalse(self.dfa_server.dcnm_client.create_network.called)
fake_network.source = 'openstack'
fake_network.result = constants.SUBNET_PENDING
self.dfa_server.subnet_create_event(subnet_info)
self.assertTrue(self.dfa_server.dcnm_client.create_network.called)
create_call = self.dfa_server.dcnm_client.create_network.call_args
arg1, arg2 = create_call
self.assertTrue(arg1[0] == FAKE_PROJECT_NAME)
self.assertTrue(
arg1[1].__dict__ == self.dfa_server.network[FAKE_NETWORK_ID2])
self.assertTrue(
arg1[2].__dict__ == self.dfa_server.subnet[FAKE_SUBNET_ID])
def test_network_delete_event(self):
"""Test case for network delete event."""
self._load_network_info()
network_info = {'network_id': FAKE_NETWORK_ID}
self.dfa_server.get_vms.return_value = []
self.dfa_server.network_delete_event(network_info)
self.assertTrue(self.dfa_server.dcnm_client.delete_network.called)
dcall = self.dfa_server.dcnm_client.delete_network.call_args
arg1, arg2 = dcall
self.assertTrue(arg1[0] == FAKE_PROJECT_NAME)
self.assertTrue(arg1[1].name == FAKE_NETWORK_NAME)
self.assertTrue(arg1[1].segmentation_id == self.segid)
self.dfa_server.seg_drvr.release_segmentation_id.assert_called_with(
self.segid)
self.assertTrue(self.dfa_server.delete_network_db.called)
def test_dcnm_network_create_event(self):
"""Test case for DCNM network create event."""
network_info = {'segmentation_id': FAKE_SEG_ID,
'project_name': FAKE_PROJECT_NAME,
'partition_name': self.part_name}
self.dfa_server.get_network_by_segid.return_value = None
self.dfa_server.get_project_id.return_value = FAKE_PROJECT_ID
dcnm_network = {'segmentId': FAKE_SEG_ID,
'profileName': FAKE_CFG_PROFILE_NAME,
'networkName': FAKE_NETWORK_NAME,
'organizationName': FAKE_PROJECT_NAME,
'dhcpScope': None,
'netmaskLength': 24,
'gateway': FAKE_GW_ADDR}
self.dfa_server.dcnm_client.get_network.return_value = dcnm_network
dcnmclnt = self.dfa_server.dcnm_client
dcnmclnt.config_profile_fwding_mode_get.return_value = FAKE_FWD_MODE
self.dfa_server.dcnm_network_create_event(network_info)
# Check the results.
self.dfa_server.dcnm_client.get_network.assert_called_with(
FAKE_PROJECT_NAME, FAKE_SEG_ID)
for netid, dcnmnet in six.iteritems(self.dfa_server.network):
self.dfa_server.add_network_db.assert_called_with(
netid, dcnmnet, 'DCNM', constants.RESULT_SUCCESS)
self.assertTrue(self.dfa_server.neutronclient.create_network.called)
net_ext_name = self.cfg.dcnm.dcnm_net_ext
call_args = self.dfa_server.neutronclient.create_network.call_args
cargs, ckwargs = call_args
net_name = ckwargs.get('body').get('network').get('name')
self.assertTrue(net_name == (
FAKE_NETWORK_NAME + net_ext_name + str(FAKE_SEG_ID)))
self.assertTrue(self.dfa_server.neutronclient.create_subnet.called)
def test_dcnm_network_delete_event(self):
"""Test case for DCNM network delete event."""
self._load_network_info()
network_info = {'segmentation_id': (
self.dfa_server.network[FAKE_NETWORK_ID]['segmentation_id'])}
dcnmnet = mock.Mock()
dcnmnet.network_id = FAKE_NETWORK_ID
self.dfa_server.get_network_by_segid.return_value = dcnmnet
self.dfa_server.dcnm_network_delete_event(network_info)
# Check the results.
self.assertTrue(self.dfa_server.network == {})
self.dfa_server.neutronclient.delete_network.assert_called_with(
FAKE_NETWORK_ID)
self.dfa_server.delete_network_db.assert_called_with(FAKE_NETWORK_ID)
def test_port_create_event(self):
"""Test case for port create event."""
port_info = self._get_port_info()
self._load_network_info()
self.dfa_server._inst_api.get_instance_for_uuid.return_value = (
FAKE_INSTANCE_NAME)
self.dfa_server.dcnm_dhcp = True
self.dfa_server.port_create_event(port_info)
# Check the output/calls
self.assertTrue(self.dfa_server.neutron_event.send_vm_info.called)
call_args = self.dfa_server.neutron_event.send_vm_info.call_args
cargs, ckwargs = call_args
self.assertTrue(cargs[0] == FAKE_HOST_ID)
self.assertTrue(str(self.dfa_server.port[FAKE_PORT_ID]) == cargs[1])
self.assertTrue(self.dfa_server.add_vms_db.called)
call_args = self.dfa_server.add_vms_db.call_args
cargs, ckwargs = call_args
self.assertTrue(self.dfa_server.port[FAKE_PORT_ID] == cargs[0])
self.assertTrue(constants.RESULT_SUCCESS == cargs[1])
def test_port_update_event(self):
"""Test case for port update event."""
port_info = self._get_port_info()
mvm = mock.Mock()
mvm.host = None
mvm.port_id = FAKE_PORT_ID
self.dfa_server.get_vm.return_value = mvm
self.dfa_server._inst_api.get_instance_for_uuid.return_value = (
FAKE_INSTANCE_NAME)
self.dfa_server.port_update_event(port_info)
# Check the results.
self.dfa_server.neutron_event.send_vm_info.assert_called_with(
port_info['port']['binding:host_id'],
str(self.dfa_server.port[port_info['port']['id']]))
params = dict(columns=dict(
instance_id=FAKE_DEVICE_ID.replace('-', ''),
host=port_info['port']['binding:host_id'],
result=constants.RESULT_SUCCESS,
name=FAKE_INSTANCE_NAME))
self.dfa_server.update_vm_db.assert_called_with(
port_info['port']['id'], **params)
def test_delete_vm_funciton(self):
"""Test case for port delete event."""
port_id = FAKE_PORT_ID
vm = mock.Mock()
vm.mac = FAKE_MAC_ADDR
vm.port_id = FAKE_PORT_ID
vm.segmentation_id = self.segid
vm.network_id = FAKE_NETWORK_ID,
vm.port_id = FAKE_PORT_ID
vm.ip = FAKE_IP_ADDR
vm.gw_mac = FAKE_GW_ADDR
vm.instance_id = FAKE_DEVICE_ID
vm.fwd_mod = FAKE_FWD_MODE
vm.host = FAKE_HOST_ID
vm.name = FAKE_INSTANCE_NAME
self.dfa_server.get_vm.return_value = vm
# Check the results.
# Check the output/calls
self.dfa_server.delete_vm_function(port_id, vm)
# Check the results.
self.dfa_server.delete_vm_db.assert_called_with(vm.port_id)
def test_send_vm_info(self):
"""Test send_send_vm_info"""
vm = mock.Mock()
vm.mac = FAKE_MAC_ADDR
vm.port_id = FAKE_PORT_ID
vm.segmentation_id = self.segid
vm.network_id = FAKE_NETWORK_ID,
vm.port_id = FAKE_PORT_ID
vm.ip = FAKE_IP_ADDR
vm.gw_mac = FAKE_GW_ADDR
vm.instance_id = FAKE_DEVICE_ID
vm.fwd_mod = FAKE_FWD_MODE
vm.host = FAKE_HOST_ID
vm.name = FAKE_INSTANCE_NAME
vm_info = dict(status='down', vm_mac=vm.mac,
segmentation_id=vm.segmentation_id,
host=vm.host, port_uuid=vm.port_id,
net_uuid=vm.network_id,
oui=dict(ip_addr=vm.ip, vm_name=vm.name,
vm_uuid=vm.instance_id, gw_mac=vm.gw_mac,
fwd_mod=vm.fwd_mod, oui_id='cisco'))
return_value = self.dfa_server.send_vm_info(vm_info)
self.assertTrue(self.dfa_server.neutron_event.send_vm_info.called)
cargs, ckwargs = self.dfa_server.neutron_event.send_vm_info.call_args
self.assertEqual(FAKE_HOST_ID, cargs[0])
self.assertEqual(str(vm_info), cargs[1])
self.assertEqual(True, return_value)
def test_add_dhcp_port(self):
"""Test add dhcp port"""
self.dfa_server.get_vm.return_value = None
port_info = self._get_port_info().get("port")
self._load_network_info()
self.dfa_server._inst_api.get_instance_for_uuid.return_value = (
FAKE_INSTANCE_NAME)
self.dfa_server.dcnm_dhcp = False
self.dfa_server.add_dhcp_port(port_info)
# Check the output/calls
self.assertTrue(self.dfa_server.neutron_event.send_vm_info.called)
call_args = self.dfa_server.neutron_event.send_vm_info.call_args
cargs, ckwargs = call_args
self.assertEqual(FAKE_HOST_ID, cargs[0])
self.assertEqual(str(self.dfa_server.port[FAKE_PORT_ID]), cargs[1])
self.assertEqual(self.dfa_server.port[FAKE_PORT_ID]["oui"]["vm_name"],
"dhcp10010_4")
self.assertTrue(self.dfa_server.add_vms_db.called)
call_args = self.dfa_server.add_vms_db.call_args
cargs, ckwargs = call_args
self.assertEqual(self.dfa_server.port[FAKE_PORT_ID], cargs[0])
self.assertEqual(constants.RESULT_SUCCESS, cargs[1])
def test_correct_dhcp_ports(self):
"""Test case for port delete event."""
port_info = self._get_port_info().get("port")
port_info["device_owner"] = "network:dhcp"
ports_list_data = {"ports": [port_info]}
self.dfa_server.neutronclient.list_ports.return_value = ports_list_data
self.dfa_server.neutron_event._clients.get.return_value = True
self.dfa_server.add_dhcp_port = mock.Mock()
self.dfa_server.correct_dhcp_ports(FAKE_NETWORK_ID)
self.assertTrue(self.dfa_server.add_dhcp_port.called)
call_args = self.dfa_server.add_dhcp_port.call_args
cargs, ckwargs = call_args
self.assertEqual(FAKE_PORT_ID, cargs[0].get("id"))
def test_is_mand_arg_present_true(self):
"""Test the is_mand_arg_present function for True case. """
intf_dict = {'remote_port_id_mac': '00:11:22:33:44:55',
'remote_system_name': 'N6K-1'}
ret = self.rpcb.is_mand_arg_present(intf_dict)
self.assertTrue(ret)
def test_is_mand_arg_present_false(self):
"""Test the is_mand_arg_present function for False case. """
# Recheck this again, it should be an OR condition instead of and
# Check if both TLVs are received.
intf_dict = {}
ret = self.rpcb.is_mand_arg_present(intf_dict)
self.assertFalse(ret)
def test_save_topo_disc_params_exist_mand(self):
"""Test the save_topo_disc_params function for exist, mandatory case.
This is for the case when config is already present in the DB and
mandatory TLV's are present in the new config. This is the uppdate
case.
"""
host = 'host1'
interface = 'eth2'
with mock.patch.object(self.rpcb, 'is_mand_arg_present',
return_value=True),\
mock.patch('oslo_serialization.jsonutils.loads') as jsut,\
mock.patch.object(self.rpcb.obj.topology_db,
'add_update_topology_db') as add_upd_mock,\
mock.patch.object(self.rpcb.obj.topology_db, 'query_topology_db',
return_value=[{}]):
jsut.return_value = {'host': host, 'protocol_interface': interface}
self.rpcb.save_topo_disc_params(None, None)
params = dict(columns={'heartbeat': None, 'host': host,
'protocol_interface': interface})
add_upd_mock.assert_called_with(**params)
def test_save_topo_disc_params_exist_nomand(self):
"""Test the save_topo_disc_* function for exist, non-mandatory case.
This is for the case when config is already present in the DB and
mandatory TLV's are not present in the new config. This is the delete
case.
"""
host = 'host1'
interface = 'eth2'
with mock.patch.object(self.rpcb, 'is_mand_arg_present',
return_value=False),\
mock.patch('oslo_serialization.jsonutils.loads') as jsut,\
mock.patch.object(self.rpcb.obj.topology_db,
'delete_topology_entry') as del_upd_mock,\
mock.patch.object(self.rpcb.obj.topology_db, 'query_topology_db',
return_value=[{}]):
jsut.return_value = {'host': host, 'protocol_interface': interface}
self.rpcb.save_topo_disc_params(None, None)
params = {'host': host, 'protocol_interface': interface}
del_upd_mock.assert_called_with(**params)
def test_save_topo_disc_params_nonexist_mand(self):
"""Test the save_topo_disc_* function for non-exist, mandatory case.
This is for the case when config is not present in the DB and
mandatory TLV's are present in the new config. This is the add
case.
"""
host = 'host1'
interface = 'eth2'
with mock.patch.object(self.rpcb, 'is_mand_arg_present',
return_value=True),\
mock.patch('networking_cisco.apps.saf.common.utils.'
'utc_time') as utc_mock,\
mock.patch('oslo_serialization.jsonutils.loads') as jsut,\
mock.patch.object(self.rpcb.obj.topology_db,
'add_update_topology_db') as add_upd_mock,\
mock.patch.object(self.rpcb.obj.topology_db, 'query_topology_db',
return_value=[]):
jsut.return_value = {'host': host, 'protocol_interface': interface}
utc_mock.return_value = 'Jan 1'
self.rpcb.save_topo_disc_params(None, None)
params = dict(columns={'created': 'Jan 1', 'heartbeat': 'Jan 1',
'host': host, 'protocol_interface': interface})
add_upd_mock.assert_called_with(**params)
def test_save_topo_disc_params_nonexist_nonmand(self):
"""Test the save_topo_disc_* function for non-exist, non-mand case.
This is for the case when config is not present in the DB and
mandatory TLV's are not present in the new config. This is the no-op
case.
"""
host = 'host1'
interface = 'eth2'
with mock.patch.object(self.rpcb, 'is_mand_arg_present',
return_value=False),\
mock.patch('networking_cisco.apps.saf.common.utils.'
'utc_time') as utc_mock,\
mock.patch('oslo_serialization.jsonutils.loads') as jsut,\
mock.patch.object(self.rpcb.obj.topology_db,
'add_update_topology_db') as add_upd_mock,\
mock.patch.object(self.rpcb.obj.topology_db, 'query_topology_db',
return_value=[]),\
mock.patch.object(self.rpcb.obj.topology_db,
'delete_topology_entry') as del_upd_mock:
jsut.return_value = {'host': host, 'protocol_interface': interface}
utc_mock.return_value = 'Jan 1'
self.rpcb.save_topo_disc_params(None, None)
add_upd_mock.assert_not_called()
del_upd_mock.assert_not_called()
def test_save_topo_disc_params_none_nonexist_nonmand(self):
"""Test the save_topo_disc_* func for none, non-exist, non-mand case.
This is for the case when config is not present in the DB and
mandatory TLV's are not present in the new config. The output returned
is None. This is the no-op case.
"""
host = 'host1'
interface = 'eth2'
with mock.patch.object(self.rpcb, 'is_mand_arg_present',
return_value=False),\
mock.patch('networking_cisco.apps.saf.common.utils.'
'utc_time') as utc_mock,\
mock.patch('oslo_serialization.jsonutils.loads') as jsut,\
mock.patch.object(self.rpcb.obj.topology_db,
'add_update_topology_db') as add_upd_mock,\
mock.patch.object(self.rpcb.obj.topology_db, 'query_topology_db',
return_value=None),\
mock.patch.object(self.rpcb.obj.topology_db,
'delete_topology_entry') as del_upd_mock:
jsut.return_value = {'host': host, 'protocol_interface': interface}
utc_mock.return_value = 'Jan 1'
self.rpcb.save_topo_disc_params(None, None)
add_upd_mock.assert_not_called()
del_upd_mock.assert_not_called()
def test_add_lbaas_port(self):
port_info = self._get_port_info()
port_id = port_info.get('port').get("id")
lb_id = "1111111111111111"
self.dfa_server.neutronclient.show_port.return_value = (port_info)
self.dfa_server.add_lbaas_port(port_id, lb_id)
# Check the output/calls
self.assertTrue(self.dfa_server.neutron_event.send_vm_info.called)
self.assertTrue(self.dfa_server.add_vms_db.called)
call_args = self.dfa_server.add_vms_db.call_args
cargs, ckwargs = call_args
self.assertEqual(self.dfa_server.port[FAKE_PORT_ID], cargs[0])
self.assertEqual(constants.RESULT_SUCCESS, cargs[1])
|
|
import yacron.job
import yacron.config
import asyncio
import pytest
import aiosmtplib
from unittest.mock import Mock, patch
import tempfile
import os
@pytest.mark.parametrize(
"save_limit, input_lines, output, expected_failure",
[
(
10,
b"line1\nline2\nline3\nline4\n",
"line1\nline2\nline3\nline4\n",
True,
),
(
1,
b"line1\nline2\nline3\nline4\n",
" [.... 3 lines discarded ...]\nline4\n",
True,
),
(
2,
b"line1\nline2\nline3\nline4\n",
"line1\n [.... 2 lines discarded ...]\nline4\n",
True,
),
(0, b"line1\nline2\nline3\nline4\n", "", True),
(0, b"", "", False),
],
)
def test_stream_reader(save_limit, input_lines, output, expected_failure):
loop = asyncio.get_event_loop()
fake_stream = asyncio.StreamReader()
reader = yacron.job.StreamReader(
"cronjob-1", "stderr", fake_stream, "", save_limit
)
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: foo
schedule: "* * * * *"
captureStderr: true
""",
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
async def producer(fake_stream):
fake_stream.feed_data(input_lines)
fake_stream.feed_eof()
job._stderr_reader = reader
job.retcode = 0
loop.run_until_complete(
asyncio.gather(producer(fake_stream), job._read_job_streams())
)
out = job.stderr
assert (out, job.failed) == (output, expected_failure)
def test_stream_reader_long_line():
loop = asyncio.get_event_loop()
fake_stream = asyncio.StreamReader()
reader = yacron.job.StreamReader(
"cronjob-1", "stderr", fake_stream, "", 500
)
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: foo
schedule: "* * * * *"
captureStderr: true
""",
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
async def producer(fake_stream):
fake_stream.feed_data(b"one line\n")
fake_stream.feed_data(b"long line:" + b"1234567890" * 10_000)
fake_stream.feed_data(b"\n")
fake_stream.feed_data(b"another line\n")
fake_stream.feed_eof()
job._stderr_reader = reader
job.retcode = 0
loop.run_until_complete(
asyncio.gather(producer(fake_stream), job._read_job_streams())
)
out = job.stderr
assert out == "one line\nanother line\n"
A_JOB = """
jobs:
- name: test
command: ls
schedule: "* * * * *"
onSuccess:
report:
mail:
from: example@foo.com
to: example@bar.com
smtpHost: smtp1
smtpPort: 1025
subject: >
Cron job '{{name}}' {% if success %}completed{%
else %}failed{% endif %}
password:
value: foobar
username: thisisme
tls: false
starttls: true
body: |
{% if stdout and stderr -%}
STDOUT:
---
{{stdout}}
---
STDERR:
{{stderr}}
{% elif stdout -%}
{{stdout}}
{% elif stderr -%}
{{stderr}}
{% else -%}
(no output was captured)
{% endif %}
"""
@pytest.mark.parametrize(
"success, stdout, stderr, subject, body",
[
(
True,
"out",
"err",
"Cron job 'test' completed",
"STDOUT:\n---\nout\n---\nSTDERR:\nerr\n",
),
(
False,
"out",
"err",
"Cron job 'test' failed",
"STDOUT:\n---\nout\n---\nSTDERR:\nerr\n",
),
(
False,
None,
None,
"Cron job 'test' failed",
"(no output was captured)\n",
),
(False, None, "err", "Cron job 'test' failed", "err\n"),
(False, "out", None, "Cron job 'test' failed", "out\n"),
],
)
def test_report_mail(success, stdout, stderr, subject, body):
config, _, _ = yacron.config.parse_config_string(A_JOB, "")
job_config = config[0]
print(job_config.onSuccess["report"])
job = Mock(
config=job_config,
stdout=stdout,
stderr=stderr,
template_vars={
"name": job_config.name,
"success": success,
"stdout": stdout,
"stderr": stderr,
},
)
mail = yacron.job.MailReporter()
loop = asyncio.get_event_loop()
connect_calls = []
start_tls_calls = []
login_calls = []
messages_sent = []
async def connect(self):
connect_calls.append(self)
async def starttls(self):
start_tls_calls.append(self)
async def login(self, username, password):
login_calls.append((username, password))
async def send_message(self, message):
messages_sent.append(message)
real_init = aiosmtplib.SMTP.__init__
smtp_init_args = None
def init(self, *args, **kwargs):
nonlocal smtp_init_args
smtp_init_args = args, kwargs
real_init(self, *args, **kwargs)
with patch("aiosmtplib.SMTP.__init__", init), patch(
"aiosmtplib.SMTP.connect", connect
), patch("aiosmtplib.SMTP.send_message", send_message), patch(
"aiosmtplib.SMTP.login", login
), patch(
"aiosmtplib.SMTP.starttls", starttls
):
loop.run_until_complete(
mail.report(success, job, job_config.onSuccess["report"])
)
assert smtp_init_args == (
(),
{"hostname": "smtp1", "port": 1025, "use_tls": False},
)
assert len(connect_calls) == 1
assert len(start_tls_calls) == 1
assert login_calls == [("thisisme", "foobar")]
assert len(messages_sent) == 1
message = messages_sent[0]
assert message["From"] == "example@foo.com"
assert message["To"] == "example@bar.com"
assert message["Subject"] == subject
assert message.get_payload() == body
@pytest.mark.parametrize(
"success, dsn_from, body, extra, expected_dsn, fingerprint, "
"level_in, level_out",
[
(
True,
"value",
"Cron job 'test' completed\n\n(job failed because reasons)"
"\n\nSTDOUT:\n---\nout\n---\nSTDERR:\nerr\n",
{
"job": "test",
"exit_code": 0,
"command": "ls",
"shell": "/bin/sh",
"success": True,
},
"http://xxx:yyy@sentry/1",
["test"],
"warning",
"warning",
),
(
False,
"file",
"Cron job 'test' failed\n\n(job failed because reasons)"
"\n\nSTDOUT:\n---\nout\n---\nSTDERR:\nerr\n",
{
"job": "test",
"exit_code": 0,
"command": "ls",
"shell": "/bin/sh",
"success": False,
},
"http://xxx:yyy@sentry/2",
["test"],
None,
"error",
),
(
False,
"envvar",
"Cron job 'test' failed\n\n(job failed because reasons)"
"\n\nSTDOUT:\n---\nout\n---\nSTDERR:\nerr\n",
{
"job": "test",
"exit_code": 0,
"command": "ls",
"shell": "/bin/sh",
"success": False,
},
"http://xxx:yyy@sentry/3",
["test"],
None,
"error",
),
],
)
def test_report_sentry(
success,
dsn_from,
body,
extra,
expected_dsn,
fingerprint,
level_in,
level_out,
tmpdir,
monkeypatch,
):
config, _, _ = yacron.config.parse_config_string(A_JOB, "")
job_config = config[0]
p = tmpdir.join("sentry-secret-dsn")
p.write("http://xxx:yyy@sentry/2")
monkeypatch.setenv("TEST_SENTRY_DSN", "http://xxx:yyy@sentry/3")
if dsn_from == "value":
job_config.onSuccess["report"]["sentry"] = {
"dsn": {
"value": "http://xxx:yyy@sentry/1",
"fromFile": None,
"fromEnvVar": None,
}
}
elif dsn_from == "file":
job_config.onSuccess["report"]["sentry"] = {
"dsn": {"value": None, "fromFile": str(p), "fromEnvVar": None}
}
elif dsn_from == "envvar":
job_config.onSuccess["report"]["sentry"] = {
"dsn": {
"value": None,
"fromFile": None,
"fromEnvVar": "TEST_SENTRY_DSN",
}
}
else:
raise AssertionError
job_config.onSuccess["report"]["sentry"][
"body"
] = yacron.config.DEFAULT_CONFIG["onFailure"]["report"]["sentry"]["body"]
job_config.onSuccess["report"]["sentry"]["fingerprint"] = ["{{ name }}"]
if level_in is not None:
job_config.onSuccess["report"]["sentry"]["level"] = level_in
job = Mock(
config=job_config,
stdout="out",
stderr="err",
retcode=0,
template_vars={
"fail_reason": "reasons",
"name": job_config.name,
"success": success,
"stdout": "out",
"stderr": "err",
},
)
loop = asyncio.get_event_loop()
transports = []
class FakeSentryTransport:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.messages_sent = []
def capture_event(self, event_opt):
self.messages_sent.append(event_opt)
def kill(self):
pass
def flush(self, *args, **kwargs):
pass
def make_transport(*args, **kwargs):
transport = FakeSentryTransport(*args, **kwargs)
transports.append(transport)
return transport
monkeypatch.setattr("sentry_sdk.client.make_transport", make_transport)
sentry = yacron.job.SentryReporter()
loop.run_until_complete(
sentry.report(success, job, job_config.onSuccess["report"])
)
for transport in transports:
assert transport.args[0].get("dsn") == expected_dsn
messages_sent = [
msg for transport in transports for msg in transport.messages_sent
]
assert len(messages_sent) == 1
msg = messages_sent[0]
msg1 = {
key: msg[key] for key in {"message", "level", "fingerprint", "extra"}
}
msg1["extra"].pop("sys.argv", "")
assert msg1 == {
"message": body,
"level": level_out,
"fingerprint": fingerprint,
"extra": extra,
}
def test_report_shell():
stdout, stderr = None, None
with tempfile.TemporaryDirectory() as tmp:
out_file_path = os.path.join(tmp, "unit_test_file")
config, _, _ = yacron.config.parse_config_string(
f"""
jobs:
- name: test
command: echo "foobar" && exit 123
schedule: "* * * * *"
onFailure:
report:
shell:
command: echo "Error code $YACRON_RETCODE" >> {out_file_path}
""",
"",
)
job_config = config[0]
job = Mock(
config=job_config,
stdout=stdout,
stderr=stderr,
template_vars={
"name": job_config.name,
"success": False,
"stdout": stdout,
"stderr": stderr,
},
retcode=123,
fail_reason="",
failed=True,
)
shell_reporter = yacron.job.ShellReporter()
loop = asyncio.get_event_loop()
loop.run_until_complete(
shell_reporter.report(False, job, job_config.onFailure["report"])
)
assert os.path.isfile(out_file_path)
with open(out_file_path, "r") as file:
data = file.read()
assert data.strip() == "Error code 123"
@pytest.mark.parametrize(
"shell, command, expected_type, expected_args",
[
("", "Civ 6", "shell", (b"Civ 6",)),
("", ["echo", "hello"], "exec", (b"echo", b"hello")),
("bash", 'echo "hello"', "exec", (b"bash", b"-c", b'echo "hello"')),
],
)
def test_job_run(monkeypatch, shell, command, expected_type, expected_args):
shell_commands = []
exec_commands = []
async def create_subprocess_common(*args, **kwargs):
stdout = asyncio.StreamReader()
stderr = asyncio.StreamReader()
stdout.feed_data(b"out\n")
stdout.feed_eof()
stderr.feed_data(b"err\n")
stderr.feed_eof()
proc = Mock(stdout=stdout, stderr=stderr)
async def wait():
return
proc.wait = wait
return proc
async def create_subprocess_shell(*args, **kwargs):
shell_commands.append((args, kwargs))
return await create_subprocess_common(*args, **kwargs)
async def create_subprocess_exec(*args, **kwargs):
exec_commands.append((args, kwargs))
return await create_subprocess_common(*args, **kwargs)
monkeypatch.setattr(
"asyncio.create_subprocess_exec", create_subprocess_exec
)
monkeypatch.setattr(
"asyncio.create_subprocess_shell", create_subprocess_shell
)
if isinstance(command, list):
command_snippet = "\n".join(
[" command:"] + [" - " + arg for arg in command]
)
else:
command_snippet = " command: " + command
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
{command}
schedule: "* * * * *"
shell: {shell}
captureStderr: true
captureStdout: true
environment:
- key: FOO
value: bar
""".format(
command=command_snippet, shell=shell
),
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
async def run(job):
await job.start()
await job.wait()
loop = asyncio.get_event_loop()
loop.run_until_complete(run(job))
if shell_commands:
run_type = "shell"
assert len(shell_commands) == 1
args, kwargs = shell_commands[0]
elif exec_commands:
run_type = "exec"
assert len(exec_commands) == 1
args, kwargs = exec_commands[0]
else:
raise AssertionError
assert kwargs["env"]["FOO"] == "bar"
assert run_type == expected_type
assert args == expected_args
def test_execution_timeout():
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: |
echo "hello"
sleep 1
echo "world"
executionTimeout: 0.25
schedule: "* * * * *"
captureStderr: false
captureStdout: true
""",
"",
)
job_config = config[0]
async def test(job):
await job.start()
await job.wait()
return job.stdout
job = yacron.job.RunningJob(job_config, None)
loop = asyncio.get_event_loop()
stdout = loop.run_until_complete(test(job))
assert stdout == "hello\n"
def test_error1():
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: echo "hello"
schedule: "* * * * *"
""",
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
loop = asyncio.get_event_loop()
loop.run_until_complete(job.start())
with pytest.raises(RuntimeError):
loop.run_until_complete(job.start())
def test_error2():
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: echo "hello"
schedule: "* * * * *"
""",
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
loop = asyncio.get_event_loop()
with pytest.raises(RuntimeError):
loop.run_until_complete(job.wait())
def test_error3():
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: echo "hello"
schedule: "* * * * *"
""",
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
loop = asyncio.get_event_loop()
with pytest.raises(RuntimeError):
loop.run_until_complete(job.cancel())
@pytest.mark.parametrize("command", ['echo "hello"', "exit 1"])
def test_statsd(command):
loop = asyncio.get_event_loop()
received = []
async def run():
class UDPServerProtocol:
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
print("Statsd UDP packet received:", data)
message = data.decode()
received.extend(m for m in message.split("\n") if m)
def connection_lost(*_):
pass
listen = loop.create_datagram_endpoint(
UDPServerProtocol, local_addr=("127.0.0.1", 0)
)
transport, protocol = await listen
host, port = transport.get_extra_info("sockname")
print("Listening UDP on %s:%s" % (host, port))
config, _, _ = yacron.config.parse_config_string(
"""
jobs:
- name: test
command: {command}
schedule: "* * * * *"
statsd:
host: 127.0.0.1
port: {port}
prefix: the.prefix
""".format(
port=port, command=command
),
"",
)
job_config = config[0]
job = yacron.job.RunningJob(job_config, None)
await job.start()
await job.wait()
await asyncio.sleep(0.05)
transport.close()
await asyncio.sleep(0.05)
return job
job = loop.run_until_complete(run())
assert received
assert len(received) == 4
assert "the.prefix.start" in received[0]
assert any("the.prefix.stop" in r for r in received[1:])
success = 0 if job.failed else 1
assert any("the.prefix.success:%i" % success in r for r in received[1:])
assert any("the.prefix.duration" in r for r in received[1:])
|
|
# -*- coding: utf-8 -*-
import yaml
import json
import functools
import operator
import requests
from flask import Blueprint, request, g, jsonify
from rowboat.redis import rdb
from rowboat.util.decos import authed
from rowboat.models.guild import Guild, GuildConfigChange, GuildEmoji
from rowboat.models.user import User, Infraction
from rowboat.models.message import Message, Command
from rowboat.constants import USER_MENTION_RE
guilds = Blueprint('guilds', __name__, url_prefix='/api/guilds')
def serialize_user(u):
return {
'user_id': str(u.user_id),
'username': u.username,
'discriminator': u.discriminator,
}
def with_guild(f=None):
def deco(f):
@authed
@functools.wraps(f)
def func(*args, **kwargs):
try:
if g.user.admin:
guild = Guild.select().where(Guild.guild_id == kwargs.pop('gid')).get()
guild.role = 'admin'
else:
guild = Guild.select(
Guild,
Guild.config['web'][str(g.user.user_id)].alias('role')
).where(
(Guild.guild_id == kwargs.pop('gid')) &
(~(Guild.config['web'][str(g.user.user_id)] >> None))
).get()
return f(guild, *args, **kwargs)
except Guild.DoesNotExist:
return 'Invalid Guild', 404
return func
if f and callable(f):
return deco(f)
return deco
def search_users(query=None):
queries = []
if query.isdigit():
queries.append((User.user_id == query))
q = USER_MENTION_RE.findall(query)
if len(q) and q[0].isdigit():
ids = []
ids.append(q[0])
return ids
else:
queries.append((User.username ** u'%{}%'.format(query.replace('%', ''))))
if '#' in query:
username, discrim = query.rsplit('#', 1)
if discrim.isdigit():
queries.append((
(User.username == username) &
(User.discriminator == int(discrim))))
users = User.select().where(reduce(operator.or_, queries))
if len(users) == 0:
return []
return map(lambda i: i.user_id, users[:25])
@guilds.route('/<gid>')
@with_guild
def guild_get(guild):
return jsonify(guild.serialize())
@guilds.route('/<gid>', methods=['DELETE'])
@with_guild
def guild_delete(guild):
if not g.user.admin:
return '', 401
guild.emit('GUILD_DELETE')
return '', 204
@guilds.route('/<gid>/config')
@with_guild
def guild_config(guild):
return jsonify({
'contents': str(guild.config_raw).decode('latin-1') if guild.config_raw else yaml.safe_dump(guild.config),
})
@guilds.route('/<gid>/config', methods=['POST'])
@with_guild
def guild_z_config_update(guild):
if guild.role not in ['admin', 'editor']:
return 'Missing Permissions', 403
# Calculate users diff
try:
data = yaml.load(request.json['config'])
except:
return 'Invalid YAML', 400
before = sorted(guild.config.get('web', {}).items(), key=lambda i: i[0])
after = sorted([(str(k), v) for k, v in data.get('web', {}).items()], key=lambda i: i[0])
if guild.role != 'admin' and before != after:
return 'Invalid Access', 403
role = data.get('web', {}).get(g.user.user_id) or data.get('web', {}).get(str(g.user.user_id))
if guild.role != role and not g.user.admin:
print g.user.admin
return 'Cannot change your own permissions', 400
try:
guild.update_config(g.user.user_id, request.json['config'])
return '', 200
except Guild.DoesNotExist:
return 'Invalid Guild', 404
except Exception as e:
return 'Invalid Data: %s' % e, 400
CAN_FILTER = ['id', 'user_id', 'actor_id', 'type', 'reason', 'actor', 'user']
CAN_SORT = ['id', 'user_id', 'actor_id', 'created_at', 'expires_at', 'type']
@guilds.route('/<gid>/infractions')
@with_guild
def guild_infractions(guild):
user = User.alias()
actor = User.alias()
page = int(request.values.get('page', 1))
if page < 1:
page = 1
limit = int(request.values.get('limit', 1000))
if limit < 1 or limit > 1000:
limit = 1000
q = Infraction.select(Infraction, user, actor).join(
user,
on=((Infraction.user_id == user.user_id).alias('user'))
).switch(Infraction).join(
actor,
on=((Infraction.actor_id == actor.user_id).alias('actor'))
)
queries = []
if 'filtered' in request.values:
filters = json.loads(request.values['filtered'])
for f in filters:
if f['id'] not in CAN_FILTER:
continue
if f['id'] == 'type':
queries.append(Infraction.type_ == Infraction.Types.get(f['value']))
elif f['id'] == 'reason':
queries.append(Infraction.reason ** ('%' + f['value'].lower().replace('%', '') + '%'))
elif f['id'] == 'user':
queries.append(Infraction.user_id.in_(search_users(f['value'])))
elif f['id'] == 'actor':
queries.append(Infraction.actor_id.in_(search_users(f['value'])))
else:
queries.append(getattr(Infraction, f['id']) == f['value'])
if queries:
q = q.where(
(Infraction.guild_id == guild.guild_id) &
reduce(operator.and_, queries)
)
else:
q = q.where((Infraction.guild_id == guild.guild_id))
sorted_fields = []
if 'sorted' in request.values:
sort = json.loads(request.values['sorted'])
for s in sort:
if s['id'] not in CAN_SORT:
continue
if s['desc']:
sorted_fields.append(
getattr(Infraction, s['id']).desc()
)
else:
sorted_fields.append(
getattr(Infraction, s['id'])
)
results = {
"pages": len(q) // limit,
"infractions": []
}
if sorted_fields:
q = q.order_by(*sorted_fields)
else:
q = q.order_by(Infraction.id.desc())
q = q.paginate(
page,
limit,
)
results["infractions"] = [i.serialize(guild=guild, user=i.user, actor=i.actor) for i in q]
return jsonify(results)
@guilds.route('/<gid>/config/history')
@with_guild
def guild_config_history(guild):
def serialize(gcc):
return {
'user': serialize_user(gcc.user_id),
'before': str(gcc.before_raw).decode('latin-1'),
'after': str(gcc.after_raw).decode('latin-1'),
'created_at': gcc.created_at.isoformat(),
}
q = GuildConfigChange.select(GuildConfigChange, User).join(
User, on=(User.user_id == GuildConfigChange.user_id),
).where(GuildConfigChange.guild_id == guild.guild_id).order_by(
GuildConfigChange.created_at.desc()
).paginate(int(request.values.get('page', 1)), 25)
return jsonify(map(serialize, q))
@guilds.route('/<gid>/stats/messages', methods=['GET'])
@with_guild()
def guild_stats_messages(guild):
def serialize(gcc):
return {
"day": gcc[0],
"count": int(gcc[1]),
}
unit = request.values.get('unit', 'days')
amount = int(request.values.get('amount', 7))
sql = '''
SELECT date, coalesce(count, 0) AS count
FROM
generate_series(
NOW() - interval %s,
NOW(),
%s
) AS date
LEFT OUTER JOIN (
SELECT date_trunc(%s, timestamp) AS dt, count(*) AS count
FROM messages
WHERE
timestamp >= (NOW() - interval %s) AND
timestamp < (NOW()) AND
guild_id=%s
GROUP BY dt
) results
ON (date_trunc(%s, date) = results.dt);
'''
tuples = list(Message.raw(
sql,
'{} {}'.format(amount, unit),
'1 {}'.format(unit),
unit,
'{} {}'.format(amount, unit),
guild.guild_id,
unit
).tuples())
return jsonify(map(serialize, tuples))
@guilds.route('/<gid>/stats/self', methods=['GET'])
@with_guild()
def guild_stats_self(guild):
def serialize_user(gcc):
for i in gcc:
user_raw = '''
SELECT username, discriminator
FROM users
WHERE
user_id=%s AND
bot=false;
'''
user = list(User.raw(
user_raw,
i[1]
).tuples())
if user:
return {
'user': {
'username': user[0][0],
'discrim': str(user[0][1]),
'id': i[1]
},
'user_count': int(i[0]),
}
return {
'user': 'N/A',
'user_count': 0,
}
def serialize_emoji(gcc):
for i in gcc:
emoji_raw = '''
SELECT emoji_id
FROM guild_emojis
WHERE
emoji_id=%s AND
guild_id=%s;
'''
emoji = list(GuildEmoji.raw(
emoji_raw,
i[0],
guild.guild_id
).tuples())
if emoji:
return str(emoji[0][0])
return '230870076126003200'
data = json.loads(rdb.get('web:guild:{}:stats'.format(guild.guild_id)) or '{}')
if not data:
# Totals
totals_messages = Message.select(Message.id).where(
(Message.guild_id==guild.guild_id)
).count()
totals_infractions = Infraction.select(Infraction.id).where(
(Infraction.guild_id==guild.guild_id)
).count()
# Peaks
## Messages
peaks_messages_raw = '''
SELECT count(id), author_id
FROM
messages
WHERE
guild_id=%s
GROUP BY author_id
ORDER BY count DESC
LIMIT 5;
'''
peaks_messages = list(Message.raw(
peaks_messages_raw,
guild.guild_id
).tuples())
## Infractions
peaks_infractions_raw = '''
SELECT count(id), user_id
FROM
infractions
WHERE
guild_id=%s
GROUP BY user_id
ORDER BY count DESC
LIMIT 5;
'''
peaks_infractions = list(Infraction.raw(
peaks_infractions_raw,
guild.guild_id
).tuples())
## Emoji
peaks_emoji_raw = '''
SELECT id, count(*)
FROM (
SELECT unnest(emojis) as id
FROM messages
WHERE guild_id=%s and
cardinality(emojis) > 0
) q
GROUP BY 1
ORDER BY 2 DESC
LIMIT 5
'''
peaks_emoji = list(Message.raw(
peaks_emoji_raw,
guild.guild_id
).tuples())
## Command
peaks_command_raw = '''
SELECT count(c.command), c.command
FROM
commands c
INNER JOIN messages m
ON (c.message_id = m.id)
WHERE
m.guild_id=%s
GROUP BY 2
ORDER BY 1 DESC
LIMIT 1;
'''
peaks_command = list(Command.raw(
peaks_command_raw,
guild.guild_id
).tuples())
if totals_messages:
totals_messages = totals_messages
else:
totals_messages = 0
if totals_infractions:
totals_infractions = totals_infractions
else:
totals_infractions = 0
if peaks_messages:
pm = serialize_user(peaks_messages)
else:
pm = {
'user': 'N/A',
'user_count': 0,
}
if peaks_infractions:
pi = serialize_user(peaks_infractions)
else:
pi = {
'user': 'N/A',
'user_count': 0,
}
if peaks_emoji:
anim = False
peaks_emoji_id = serialize_emoji(peaks_emoji)
url = 'https://discordapp.com/api/emojis/{}.gif'.format(peaks_emoji_id)
r = requests.get(url)
try:
r.raise_for_status()
anim = True
except requests.HTTPError:
pass
if anim:
peaks_emoji_ext = 'gif'
else:
peaks_emoji_ext = 'png'
else:
peaks_emoji_id = '230870076126003200'
peaks_emoji_ext = 'png'
if peaks_command:
peaks_command = '{1}'.format(*peaks_command[0])
else:
peaks_command = 'N/A'
data = {'totals': {
'messages': totals_messages,
'infractions': totals_infractions,
},
'peaks': {
'messages': pm,
'infractions': pi,
'emoji': {
'id': peaks_emoji_id,
'ext': peaks_emoji_ext,
},
'command': peaks_command,
},
}
rdb.setex('web:guild:{}:stats'.format(guild.guild_id), json.dumps(data), 600)
return jsonify(data)
|
|
#
# script.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Scripts
Functionality to build scripts, as well as SignatureHash(). Script evaluation
is in bitcoin.core.scripteval
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
bchr = chr
bord = ord
if sys.version > '3':
long = int
bchr = lambda x: bytes([x])
bord = lambda x: x
import copy
import struct
import bitcoin.core
import bitcoin.core.bignum
MAX_SCRIPT_SIZE = 10000
MAX_SCRIPT_ELEMENT_SIZE = 520
MAX_SCRIPT_OPCODES = 201
OPCODE_NAMES = {}
_opcode_instances = []
class CScriptOp(int):
"""A single script opcode"""
__slots__ = []
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bchr(len(d)) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bchr(len(d)) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n-1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1+1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super(CScriptOp, cls).__new__(cls, n))
return _opcode_instances[n]
# Populate opcode instance table
for n in range(0xff+1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE=OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_NOP2 = CScriptOp(0xb1)
OP_NOP3 = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# template matching params
OP_SMALLINTEGER = CScriptOp(0xfa)
OP_PUBKEYS = CScriptOp(0xfb)
OP_PUBKEYHASH = CScriptOp(0xfd)
OP_PUBKEY = CScriptOp(0xfe)
OP_INVALIDOPCODE = CScriptOp(0xff)
VALID_OPCODES = {
OP_1NEGATE,
OP_RESERVED,
OP_1,
OP_2,
OP_3,
OP_4,
OP_5,
OP_6,
OP_7,
OP_8,
OP_9,
OP_10,
OP_11,
OP_12,
OP_13,
OP_14,
OP_15,
OP_16,
OP_NOP,
OP_VER,
OP_IF,
OP_NOTIF,
OP_VERIF,
OP_VERNOTIF,
OP_ELSE,
OP_ENDIF,
OP_VERIFY,
OP_RETURN,
OP_TOALTSTACK,
OP_FROMALTSTACK,
OP_2DROP,
OP_2DUP,
OP_3DUP,
OP_2OVER,
OP_2ROT,
OP_2SWAP,
OP_IFDUP,
OP_DEPTH,
OP_DROP,
OP_DUP,
OP_NIP,
OP_OVER,
OP_PICK,
OP_ROLL,
OP_ROT,
OP_SWAP,
OP_TUCK,
OP_CAT,
OP_SUBSTR,
OP_LEFT,
OP_RIGHT,
OP_SIZE,
OP_INVERT,
OP_AND,
OP_OR,
OP_XOR,
OP_EQUAL,
OP_EQUALVERIFY,
OP_RESERVED1,
OP_RESERVED2,
OP_1ADD,
OP_1SUB,
OP_2MUL,
OP_2DIV,
OP_NEGATE,
OP_ABS,
OP_NOT,
OP_0NOTEQUAL,
OP_ADD,
OP_SUB,
OP_MUL,
OP_DIV,
OP_MOD,
OP_LSHIFT,
OP_RSHIFT,
OP_BOOLAND,
OP_BOOLOR,
OP_NUMEQUAL,
OP_NUMEQUALVERIFY,
OP_NUMNOTEQUAL,
OP_LESSTHAN,
OP_GREATERTHAN,
OP_LESSTHANOREQUAL,
OP_GREATERTHANOREQUAL,
OP_MIN,
OP_MAX,
OP_WITHIN,
OP_RIPEMD160,
OP_SHA1,
OP_SHA256,
OP_HASH160,
OP_HASH256,
OP_CODESEPARATOR,
OP_CHECKSIG,
OP_CHECKSIGVERIFY,
OP_CHECKMULTISIG,
OP_CHECKMULTISIGVERIFY,
OP_NOP1,
OP_NOP2,
OP_NOP3,
OP_NOP4,
OP_NOP5,
OP_NOP6,
OP_NOP7,
OP_NOP8,
OP_NOP9,
OP_NOP10,
OP_SMALLINTEGER,
OP_PUBKEYS,
OP_PUBKEYHASH,
OP_PUBKEY,
}
OPCODE_NAMES.update({
OP_0 : 'OP_0',
OP_PUSHDATA1 : 'OP_PUSHDATA1',
OP_PUSHDATA2 : 'OP_PUSHDATA2',
OP_PUSHDATA4 : 'OP_PUSHDATA4',
OP_1NEGATE : 'OP_1NEGATE',
OP_RESERVED : 'OP_RESERVED',
OP_1 : 'OP_1',
OP_2 : 'OP_2',
OP_3 : 'OP_3',
OP_4 : 'OP_4',
OP_5 : 'OP_5',
OP_6 : 'OP_6',
OP_7 : 'OP_7',
OP_8 : 'OP_8',
OP_9 : 'OP_9',
OP_10 : 'OP_10',
OP_11 : 'OP_11',
OP_12 : 'OP_12',
OP_13 : 'OP_13',
OP_14 : 'OP_14',
OP_15 : 'OP_15',
OP_16 : 'OP_16',
OP_NOP : 'OP_NOP',
OP_VER : 'OP_VER',
OP_IF : 'OP_IF',
OP_NOTIF : 'OP_NOTIF',
OP_VERIF : 'OP_VERIF',
OP_VERNOTIF : 'OP_VERNOTIF',
OP_ELSE : 'OP_ELSE',
OP_ENDIF : 'OP_ENDIF',
OP_VERIFY : 'OP_VERIFY',
OP_RETURN : 'OP_RETURN',
OP_TOALTSTACK : 'OP_TOALTSTACK',
OP_FROMALTSTACK : 'OP_FROMALTSTACK',
OP_2DROP : 'OP_2DROP',
OP_2DUP : 'OP_2DUP',
OP_3DUP : 'OP_3DUP',
OP_2OVER : 'OP_2OVER',
OP_2ROT : 'OP_2ROT',
OP_2SWAP : 'OP_2SWAP',
OP_IFDUP : 'OP_IFDUP',
OP_DEPTH : 'OP_DEPTH',
OP_DROP : 'OP_DROP',
OP_DUP : 'OP_DUP',
OP_NIP : 'OP_NIP',
OP_OVER : 'OP_OVER',
OP_PICK : 'OP_PICK',
OP_ROLL : 'OP_ROLL',
OP_ROT : 'OP_ROT',
OP_SWAP : 'OP_SWAP',
OP_TUCK : 'OP_TUCK',
OP_CAT : 'OP_CAT',
OP_SUBSTR : 'OP_SUBSTR',
OP_LEFT : 'OP_LEFT',
OP_RIGHT : 'OP_RIGHT',
OP_SIZE : 'OP_SIZE',
OP_INVERT : 'OP_INVERT',
OP_AND : 'OP_AND',
OP_OR : 'OP_OR',
OP_XOR : 'OP_XOR',
OP_EQUAL : 'OP_EQUAL',
OP_EQUALVERIFY : 'OP_EQUALVERIFY',
OP_RESERVED1 : 'OP_RESERVED1',
OP_RESERVED2 : 'OP_RESERVED2',
OP_1ADD : 'OP_1ADD',
OP_1SUB : 'OP_1SUB',
OP_2MUL : 'OP_2MUL',
OP_2DIV : 'OP_2DIV',
OP_NEGATE : 'OP_NEGATE',
OP_ABS : 'OP_ABS',
OP_NOT : 'OP_NOT',
OP_0NOTEQUAL : 'OP_0NOTEQUAL',
OP_ADD : 'OP_ADD',
OP_SUB : 'OP_SUB',
OP_MUL : 'OP_MUL',
OP_DIV : 'OP_DIV',
OP_MOD : 'OP_MOD',
OP_LSHIFT : 'OP_LSHIFT',
OP_RSHIFT : 'OP_RSHIFT',
OP_BOOLAND : 'OP_BOOLAND',
OP_BOOLOR : 'OP_BOOLOR',
OP_NUMEQUAL : 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY : 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL : 'OP_NUMNOTEQUAL',
OP_LESSTHAN : 'OP_LESSTHAN',
OP_GREATERTHAN : 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL : 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL : 'OP_GREATERTHANOREQUAL',
OP_MIN : 'OP_MIN',
OP_MAX : 'OP_MAX',
OP_WITHIN : 'OP_WITHIN',
OP_RIPEMD160 : 'OP_RIPEMD160',
OP_SHA1 : 'OP_SHA1',
OP_SHA256 : 'OP_SHA256',
OP_HASH160 : 'OP_HASH160',
OP_HASH256 : 'OP_HASH256',
OP_CODESEPARATOR : 'OP_CODESEPARATOR',
OP_CHECKSIG : 'OP_CHECKSIG',
OP_CHECKSIGVERIFY : 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG : 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY : 'OP_CHECKMULTISIGVERIFY',
OP_NOP1 : 'OP_NOP1',
OP_NOP2 : 'OP_NOP2',
OP_NOP3 : 'OP_NOP3',
OP_NOP4 : 'OP_NOP4',
OP_NOP5 : 'OP_NOP5',
OP_NOP6 : 'OP_NOP6',
OP_NOP7 : 'OP_NOP7',
OP_NOP8 : 'OP_NOP8',
OP_NOP9 : 'OP_NOP9',
OP_NOP10 : 'OP_NOP10',
OP_SMALLINTEGER : 'OP_SMALLINTEGER',
OP_PUBKEYS : 'OP_PUBKEYS',
OP_PUBKEYHASH : 'OP_PUBKEYHASH',
OP_PUBKEY : 'OP_PUBKEY',
OP_INVALIDOPCODE : 'OP_INVALIDOPCODE',
})
OPCODES_BY_NAME = {
'OP_0' : OP_0,
'OP_PUSHDATA1' : OP_PUSHDATA1,
'OP_PUSHDATA2' : OP_PUSHDATA2,
'OP_PUSHDATA4' : OP_PUSHDATA4,
'OP_1NEGATE' : OP_1NEGATE,
'OP_RESERVED' : OP_RESERVED,
'OP_1' : OP_1,
'OP_2' : OP_2,
'OP_3' : OP_3,
'OP_4' : OP_4,
'OP_5' : OP_5,
'OP_6' : OP_6,
'OP_7' : OP_7,
'OP_8' : OP_8,
'OP_9' : OP_9,
'OP_10' : OP_10,
'OP_11' : OP_11,
'OP_12' : OP_12,
'OP_13' : OP_13,
'OP_14' : OP_14,
'OP_15' : OP_15,
'OP_16' : OP_16,
'OP_NOP' : OP_NOP,
'OP_VER' : OP_VER,
'OP_IF' : OP_IF,
'OP_NOTIF' : OP_NOTIF,
'OP_VERIF' : OP_VERIF,
'OP_VERNOTIF' : OP_VERNOTIF,
'OP_ELSE' : OP_ELSE,
'OP_ENDIF' : OP_ENDIF,
'OP_VERIFY' : OP_VERIFY,
'OP_RETURN' : OP_RETURN,
'OP_TOALTSTACK' : OP_TOALTSTACK,
'OP_FROMALTSTACK' : OP_FROMALTSTACK,
'OP_2DROP' : OP_2DROP,
'OP_2DUP' : OP_2DUP,
'OP_3DUP' : OP_3DUP,
'OP_2OVER' : OP_2OVER,
'OP_2ROT' : OP_2ROT,
'OP_2SWAP' : OP_2SWAP,
'OP_IFDUP' : OP_IFDUP,
'OP_DEPTH' : OP_DEPTH,
'OP_DROP' : OP_DROP,
'OP_DUP' : OP_DUP,
'OP_NIP' : OP_NIP,
'OP_OVER' : OP_OVER,
'OP_PICK' : OP_PICK,
'OP_ROLL' : OP_ROLL,
'OP_ROT' : OP_ROT,
'OP_SWAP' : OP_SWAP,
'OP_TUCK' : OP_TUCK,
'OP_CAT' : OP_CAT,
'OP_SUBSTR' : OP_SUBSTR,
'OP_LEFT' : OP_LEFT,
'OP_RIGHT' : OP_RIGHT,
'OP_SIZE' : OP_SIZE,
'OP_INVERT' : OP_INVERT,
'OP_AND' : OP_AND,
'OP_OR' : OP_OR,
'OP_XOR' : OP_XOR,
'OP_EQUAL' : OP_EQUAL,
'OP_EQUALVERIFY' : OP_EQUALVERIFY,
'OP_RESERVED1' : OP_RESERVED1,
'OP_RESERVED2' : OP_RESERVED2,
'OP_1ADD' : OP_1ADD,
'OP_1SUB' : OP_1SUB,
'OP_2MUL' : OP_2MUL,
'OP_2DIV' : OP_2DIV,
'OP_NEGATE' : OP_NEGATE,
'OP_ABS' : OP_ABS,
'OP_NOT' : OP_NOT,
'OP_0NOTEQUAL' : OP_0NOTEQUAL,
'OP_ADD' : OP_ADD,
'OP_SUB' : OP_SUB,
'OP_MUL' : OP_MUL,
'OP_DIV' : OP_DIV,
'OP_MOD' : OP_MOD,
'OP_LSHIFT' : OP_LSHIFT,
'OP_RSHIFT' : OP_RSHIFT,
'OP_BOOLAND' : OP_BOOLAND,
'OP_BOOLOR' : OP_BOOLOR,
'OP_NUMEQUAL' : OP_NUMEQUAL,
'OP_NUMEQUALVERIFY' : OP_NUMEQUALVERIFY,
'OP_NUMNOTEQUAL' : OP_NUMNOTEQUAL,
'OP_LESSTHAN' : OP_LESSTHAN,
'OP_GREATERTHAN' : OP_GREATERTHAN,
'OP_LESSTHANOREQUAL' : OP_LESSTHANOREQUAL,
'OP_GREATERTHANOREQUAL' : OP_GREATERTHANOREQUAL,
'OP_MIN' : OP_MIN,
'OP_MAX' : OP_MAX,
'OP_WITHIN' : OP_WITHIN,
'OP_RIPEMD160' : OP_RIPEMD160,
'OP_SHA1' : OP_SHA1,
'OP_SHA256' : OP_SHA256,
'OP_HASH160' : OP_HASH160,
'OP_HASH256' : OP_HASH256,
'OP_CODESEPARATOR' : OP_CODESEPARATOR,
'OP_CHECKSIG' : OP_CHECKSIG,
'OP_CHECKSIGVERIFY' : OP_CHECKSIGVERIFY,
'OP_CHECKMULTISIG' : OP_CHECKMULTISIG,
'OP_CHECKMULTISIGVERIFY' : OP_CHECKMULTISIGVERIFY,
'OP_NOP1' : OP_NOP1,
'OP_NOP2' : OP_NOP2,
'OP_NOP3' : OP_NOP3,
'OP_NOP4' : OP_NOP4,
'OP_NOP5' : OP_NOP5,
'OP_NOP6' : OP_NOP6,
'OP_NOP7' : OP_NOP7,
'OP_NOP8' : OP_NOP8,
'OP_NOP9' : OP_NOP9,
'OP_NOP10' : OP_NOP10,
'OP_SMALLINTEGER' : OP_SMALLINTEGER,
'OP_PUBKEYS' : OP_PUBKEYS,
'OP_PUBKEYHASH' : OP_PUBKEYHASH,
'OP_PUBKEY' : OP_PUBKEY,
}
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super(CScriptTruncatedPushDataError, self).__init__(msg)
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bchr(other)
elif isinstance(other, (int, long)):
if 0 <= other <= 16:
other = bytes(bchr(CScriptOp.encode_op_n(other)))
elif other == -1:
other = bytes(bchr(OP_1NEGATE))
else:
other = CScriptOp.encode_op_pushdata(bitcoin.core.bignum.bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# Do the coercion outside of the try block so that errors in it are
# noticed.
other = self.__coerce_instance(other)
try:
# bytes.__add__ always returns bytes instances unfortunately
return CScript(super(CScript, self).__add__(other))
except TypeError:
raise TypeError('Can not add a %r instance to a CScript' % other.__class__)
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super(CScript, cls).__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super(CScript, cls).__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = bord(self[i])
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = bord(self[i])
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = bord(self[i]) + (bord(self[i+1]) << 8) + (bord(self[i+2]) << 16) + (bord(self[i+3]) << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i+datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
# For Python3 compatibility add b before strings so testcases don't
# need to change
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % bitcoin.core.b2x(o)
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def is_p2sh(self):
"""Test if the script is a p2sh scriptPubKey
Note that this test is consensus-critical.
"""
return (len(self) == 23 and
bord(self[0]) == OP_HASH160 and
bord(self[1]) == 0x14 and
bord(self[22]) == OP_EQUAL)
def is_push_only(self):
"""Test if the script only contains pushdata ops
Note that this test is consensus-critical.
"""
for (op, op_data, idx) in self.raw_iter():
# Note how OP_RESERVED is considered a pushdata op.
if op > OP_16:
return False
return True
def is_unspendable(self):
"""Test if the script is provably unspendable"""
return (len(self) > 0 and
bord(self[0]) == OP_RETURN)
def is_valid(self):
"""Return True if the script is valid, False otherwise
The script is valid if all PUSHDATA's are valid; invalid opcodes do not
make is_valid() return False.
"""
try:
list(self)
except CScriptInvalidError:
return False
return True
def to_p2sh_scriptPubKey(self, checksize=True):
"""Create P2SH scriptPubKey from this redeemScript
That is, create the P2SH scriptPubKey that requires this script as a
redeemScript to spend.
checksize - Check if the redeemScript is larger than the 520-byte max
pushdata limit; raise ValueError if limit exceeded.
Since a >520-byte PUSHDATA makes EvalScript() fail, it's not actually
possible to redeem P2SH outputs with redeem scripts >520 bytes.
"""
if checksize and len(self) > MAX_SCRIPT_ELEMENT_SIZE:
raise ValueError("redeemScript exceeds max allowed size; P2SH output would be unspendable")
return CScript([OP_HASH160, bitcoin.core.Hash160(self), OP_EQUAL])
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SCRIPT_VERIFY_P2SH = object()
SCRIPT_VERIFY_STRICTENC = object()
SCRIPT_VERIFY_EVEN_S = object()
SCRIPT_VERIFY_NOCACHE = object()
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def RawSignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
If you're just writing wallet software you probably want SignatureHash()
instead.
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = copy.deepcopy(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for i in range(outIdx):
txtmp.vout.append(bitcoin.core.CTxOut())
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize()
s += struct.pack(b"<I", hashtype)
hash = bitcoin.core.Hash(s)
return (hash, None)
def SignatureHash(script, txTo, inIdx, hashtype):
"""Calculate a signature hash
'Cooked' version that checks if inIdx is out of bounds - this is *not*
consensus-correct behavior, but is what you probably want for general
wallet use.
"""
(h, err) = RawSignatureHash(script, txTo, inIdx, hashtype)
if err is not None:
raise ValueError(err)
return h
|
|
##!/bin/env python
'''farm.py - process data stream on cluster
===========================================
Purpose
-------
This script reads data from stdin and splits it into independent
chunks to be executed on a cluster.
Usage
-----
As a basic, but not very useful example, the following command will
take the input of the file ``go.txt``, split the file by the contents
of the first column and execute a perl command on them::
cat go.txt | farm.py --split-at-colum=1 perl -p -e "s/GO/gaga/"
Type::
python farm.py --help
for command line help.
Documentation
-------------
The input on stdin is split for embarrasingly parallel jobs. The
``--split-at`` options describe how standard input is to be split. A
temporary directory is created in the current directory. This
directory has be visible on the cluster nodes and accessible under the
same name.
The output is written to stdout. Results are returned in the same
order as they are submitted. The script implements a few generic ways
to combine tabular output, for example to avoid duplicating header
lines. The script is also able to handle multiple outputs for jobs.
On error, error messages are echoed and nothing is returned. The
temporary directory is not deleted to allow manual recovery.
Examples
--------
The following command will split the file "go" at the first column and
execute the command perl -p -e "s/GO/gaga/"::
cat go | farm.py --split-at-colum=1 perl -p -e "s/GO/gaga/"
The following command will split a fasta file at each entry and
compute an approximate sequence length::
cat genome.fasta | farm.py --split-at-regex="^>(\S+)" "wc -c"
The following command will split a fasta file at every 10 sequences::
cat genome.fasta | farm.py --split-at-regex="^>(\S+)" --chunk-size=10 "wc -c"
.. todo::
implement continuation of jobs
implement better error messages
use sge array jobs for job control
Command line options
--------------------
'''
import os
import sys
import re
import glob
import subprocess
import tempfile
import shutil
import stat
from multiprocessing.pool import Pool, ThreadPool
import CGAT.Experiment as E
import CGAT.IOTools as IOTools
import CGAT.Blat as Blat
from CGATPipelines.Pipeline import Cluster as Cluster
try:
import drmaa
HAS_DRMAA = True
except (ImportError, RuntimeError):
HAS_DRMAA = False
def chunk_iterator_lines(infile, args, prefix, use_header=False):
"""split by lines."""
chunk_size = args[0]
n = 0
filename = "%s/%010i.in" % (prefix, n)
outfile = IOTools.openFile(filename, "w")
header = None
for line in infile:
if line[0] == "#":
continue
if not header and n == 0 and use_header:
header = line
outfile.write(header)
continue
n += 1
if n % chunk_size == 0:
outfile.close()
yield filename
filename = "%s/%010i.in" % (prefix, n)
outfile = IOTools.openFile(filename, "w")
if header:
outfile.write(header)
outfile.write(line)
outfile.close()
yield filename
def chunk_iterator_column(infile, args, prefix, use_header=False):
"""split at column.
The table need not be sorted by this column.
If num_files is given, files will randomly created
and tags according to column randomly assigned.
"""
column, max_files = args
files = IOTools.FilePool()
header = False
if max_files:
map_tag2file = {}
for line in infile:
if line[0] == "#":
continue
if not header and use_header:
files.setHeader(line)
header = True
continue
key = line[:-1].split("\t")[column]
if max_files:
if key in map_tag2file:
key = map_tag2file[key]
else:
n = "%010i" % (len(map_tag2file) % max_files)
map_tag2file[key] = n
key = n
files.write("%s/%s.in" % (prefix, key), line)
for filename, count in list(files.items()):
E.info("created file %s with %i items" % (filename, count))
yield filename
def chunk_iterator_regex_group(infile, args, prefix, use_header=False):
"""group by regular expression is true.
Entries need to be consecutive.
"""
rex = args[0]
column = args[1]
chunk_size = args[2]
last = None
header = None
n = chunk_size
outfile = None
filename = None
for line in infile:
if line[0] == "#":
continue
if not header and use_header:
header = line
continue
try:
this = rex.search(line[:-1]).groups()[0]
except IndexError:
if outfile:
outfile.write(line)
continue
except AttributeError:
if outfile:
outfile.write(line)
continue
if last != this and n >= chunk_size:
if last:
outfile.close()
yield filename
last = this
filename = "%s/%s.in" % (prefix, this)
outfile = IOTools.openFile(filename, "w")
if header:
outfile.write(header)
n = 0
outfile.write(line)
n += 1
if outfile:
outfile.close()
yield filename
def chunk_iterator_regex_split(infile, args, prefix, use_header=False):
"""split where regular expression is true.
"""
rex = args[0]
chunk_size = args[2]
max_lines = args[3]
nlines = 0
n = 0
filename = "%s/%010i.in" % (prefix, n)
outfile = IOTools.openFile(filename, "w")
for line in infile:
if line[0] == "#":
continue
if rex.search(line[:-1]):
if n > 0 and (n % chunk_size == 0 or
(max_lines and nlines > max_lines)):
outfile.close()
yield filename
filename = "%s/%010i.in" % (prefix, n)
outfile = IOTools.openFile(filename, "w")
nlines = 0
n += 1
outfile.write(line)
nlines += 1
outfile.close()
yield filename
def chunk_iterator_psl_overlap(infile, args, prefix, use_header=False):
"""iterate over overlapping entries in a psl file."""
iterator = Blat.BlatIterator(sys.stdin)
processed_contigs = set()
merge_distance = args[0]
last_sbjct_id = None
sbjct_end = 0
outfile = None
filename = None
while 1:
match = next(iterator)
if match is None:
break
if match.mSbjctId != last_sbjct_id or \
match.mSbjctFrom >= (sbjct_end + merge_distance):
if last_sbjct_id:
outfile.close()
yield filename
if last_sbjct_id != match.mSbjctId and \
match.mSbjctId in processed_contigs:
raise ValueError(
"input not sorted correctly (contig,start): "
"already encountered %s\n%s" %
(match.mSbjctId, str(match)))
last_sbjct_id = match.mSbjctId
processed_contigs.add(last_sbjct_id)
sbjct_start = match.mSbjctFrom
sbjct_end = match.mSbjctTo
if match.mSbjctFrom < sbjct_start:
raise ValueError(
"input not sorted correctly (contig,start): "
"%i < %i\n%s" %
(match.mSbjctFrom, sbjct_start, str(match)))
sbjct_end = max(match.mSbjctTo, sbjct_end)
outfile.write(str(match) + "\n")
if outfile:
outfile.close()
yield filename
class MapperGlobal:
def __init__(self, pattern="%06i"):
self.mMap = {}
assert "%" in pattern, "please supply a pattern"
self.mPattern = pattern
def __call__(self, fn, id):
if id not in self.mMap:
self.mMap[id] = self.mPattern % (len(self.mMap) + 1)
return self.mMap[id]
class MapperLocal:
def __init__(self, pattern="%06i"):
self.mMap = {}
assert "%" in pattern, "please supply a pattern"
self.mPattern = pattern
def __call__(self, fn, id):
key = "%s-%s" % (fn, id)
if key not in self.mMap:
self.mMap[key] = self.mPattern % (len(self.mMap) + 1)
return self.mMap[key]
class MapperEmpty:
def __init__(self):
pass
def __call__(self, fn, id):
return id
class ResultBuilder:
"""the default result builder for table formatted output.
field_index :
"""
def __init__(self,
mapper=None,
field_index=None,
field_name=None,
header_regex=None):
self.mMapper = mapper
self.mFieldIndex = field_index
self.mFieldName = field_name
self.header = None
self.nfields = None
self.header_regex = header_regex
def parseHeader(self, infile, outfile, options):
"""parse header in infile."""
# skip comments until header
while 1:
l = infile.readline()
if not l:
break
if self.header_regex:
if self.header_regex.search(l):
break
elif l[0] != "#":
break
options.stdlog.write(l)
# print only the first header and check if
# all the headers are the same.
if self.header:
if self.header != l:
raise ValueError(
"inconsistent header in file %s\n"
"got=%s\nexpected=%s" % (infile, l, self.header))
else:
outfile.write(l)
self.header = l
self.nfields = l.count("\t")
if self.nfields == 0:
E.warn("only single column in header: %s" % l[:-1])
if self.mFieldIndex is None and self.mFieldName:
try:
self.mFieldIndex = self.header.split(
"\t").index(self.mFieldName)
except ValueError:
E.warn("no mapping, can not find field %s in %s" %
(self.mFieldName, self.header))
self.mFieldName = None
E.debug(
"substituting field: %s, %s" %
(self.mFieldName, self.mFieldIndex))
def __call__(self, filenames, outfile, options):
for fi, fn in filenames:
E.debug("# merging %s" % fn)
infile = IOTools.openFile(fn, "r")
if options.output_header:
self.parseHeader(infile, outfile, options)
for l in infile:
nfields = l.count("\t")
if l[0] == "#":
options.stdlog.write(l)
elif self.nfields is not None and nfields != self.nfields:
# validate number of fields in row, raise warning
# for those not matching and skip.
E.warn(
"# line %s has unexpected number of fields: %i != %i" %
(l[:-1], nfields, self.nfields))
else:
if self.mFieldIndex is not None:
data = l[:-1].split("\t")
try:
data[self.mFieldIndex] = self.mMapper(
fi, data[self.mFieldIndex])
except IndexError:
raise IndexError(
"can not find field %i in %s" %
(self.mFieldIndex, l))
l = "\t".join(data) + "\n"
outfile.write(l)
infile.close()
class ResultBuilderPSL(ResultBuilder):
"""Result builder for psl tables. Here, column 9,
the query id, is substituted."""
def __init__(self, *args, **kwargs):
ResultBuilder.__init__(self, *args, **kwargs)
self.mFieldIndex = 9
self.mFirst = True
def parseHeader(self, infile, outfile, options):
"""parse header in infile."""
# skip comments until header
while 1:
l = infile.readline()
if not l or l[0] != "#":
break
options.stdlog.write(l)
if l.startswith("psLayout version 3"):
if self.mFirst:
outfile.write(l)
for x in range(0, 4):
l = infile.readline()
outfile.write(l)
self.mFirst = False
else:
for x in range(0, 4):
l = infile.readline()
class ResultBuilderFasta(ResultBuilder):
def __init__(self, *args, **kwargs):
ResultBuilder.__init__(self, *args, **kwargs)
def __call__(self, filenames, outfile, options):
for fi, fn in filenames:
infile = IOTools.openFile(fn, "r")
for l in infile:
if l[0] == "#":
options.stdlog.write(l)
continue
elif l[0] == ">":
x = re.search(">(\S+)", l[:-1])
id = self.mMapper(fi, x.groups()[0])
l = ">%s%s" % (id, l[x.end(0):])
outfile.write(l)
infile.close()
class ResultBuilderBinary(ResultBuilder):
'''simply concatenate output files (without any parsing).'''
def __init__(self, *args, **kwargs):
ResultBuilder.__init__(self, *args, **kwargs)
def __call__(self, filenames, outfile, options):
for fi, fn in filenames:
shutil.copyfileobj(IOTools.openFile(fn, "r"), outfile)
class ResultBuilderCopies(ResultBuilder):
'''create indexed copiers.'''
def __init__(self, *args, **kwargs):
ResultBuilder.__init__(self, *args, **kwargs)
def __call__(self, filenames, outfile, options):
idx = 0
base, ext = os.path.splitext(outfile.name)
for fi, fn in filenames:
idx += 1
shutil.copyfile(fn, base + ".%i" % idx + ext)
class ResultBuilderLog(ResultBuilder):
"""processor for log files."""
def __init__(self, *args, **kwargs):
ResultBuilder.__init__(self, *args, **kwargs)
def __call__(self, filenames, outfile, options):
for fi, fn in filenames:
infile = IOTools.openFile(fn, "r")
outfile.write(
"######### logging output for %s ###################\n" % fi)
for l in infile:
outfile.write(l)
infile.close()
def runCommand(data):
filename, cmd, options, tmpdir, subdirs = data
if subdirs:
outdir = "%s.dir/" % (filename)
os.mkdir(outdir)
cmd = re.sub("%DIR%", outdir, cmd)
x = re.search("'--log=(\S+)'", cmd) or re.search("'--L\s+(\S+)'", cmd)
if x:
logfile = filename + ".log"
cmd = cmd[:x.start()] + "--log=%s" % logfile + cmd[x.end():]
else:
logfile = filename + ".out"
# working directory - needs to be the one from which the
# the script is called to resolve input files.
cwd = os.getcwd()
if "<(" in cmd or "|" in cmd:
if "'" in cmd:
raise ValueError(
"advanced bash syntax `<()` combined with single quotes")
cmd = """/bin/bash -c '%s'""" % cmd
if "|" in cmd:
if r"\|" not in cmd:
E.warn(
"pipes (`|`) within command need to be escaped, "
"otherwise jobs run on submit host")
c = '%s -v "BASH_ENV=%s" -q %s -p %i %s %s' % (options.cluster_cmd,
options.bashrc,
options.cluster_queue,
options.cluster_priority,
options.cluster_options,
cmd)
iteration = 0
while 1:
iteration += 1
if iteration > 1:
E.info("%s: re-submitting command (repeat=%i): %s" %
(filename, iteration, c))
else:
E.info("%s: submitting command: %s" % (filename, c))
infile = IOTools.openFile(filename, "r")
outfile = IOTools.openFile(filename + ".out", "w")
errfile = IOTools.openFile(filename + ".err", "a")
retcode = subprocess.call(c,
shell=True,
stdin=infile,
stdout=outfile,
stderr=errfile,
cwd=cwd,
close_fds=True)
infile.close()
outfile.close()
errfile.close()
if hasFinished(retcode, filename, options.output_tag, logfile):
break
if iteration > options.resubmit:
E.warn("%s: giving up executing command: retcode=%i" %
(filename, retcode))
break
E.warn("%s: error while executing command: retcode=%i" %
(filename, retcode))
return (retcode, filename, cmd, logfile, iteration)
def hasFinished(retcode, filename, output_tag, logfile):
"""check if a run has finished."""
E.info("checking status of job %s with returncode %i" %
(filename, retcode))
if retcode != 0:
try:
if not output_tag or not re.search(output_tag,
IOTools.getLastLine(logfile)):
return False
except IOError:
E.warn("could not read output_tag from files %s" % (logfile))
return False
return True
def runDRMAA(data, environment):
'''run jobs in data using drmaa to connect to the cluster.'''
# SNS: Error dection now taken care of with Cluster.py
# expandStatement function
# working directory - needs to be the one from which the
# the script is called to resolve input files.
cwd = os.getcwd()
session = drmaa.Session()
session.initialize()
jobids = []
kwargs = {}
for filename, cmd, options, tmpdir, subdirs in data:
from_stdin, to_stdout = True, True
if subdirs:
outdir = "%s.dir/" % (filename)
os.mkdir(outdir)
cmd = re.sub("%DIR%", outdir, cmd)
x = re.search("'--log=(\S+)'", cmd) or re.search("'--L\s+(\S+)'", cmd)
if x:
logfile = filename + ".log"
cmd = cmd[:x.start()] + "--log=%s" % logfile + cmd[x.end():]
else:
logfile = filename + ".out"
if "%STDIN%" in cmd:
cmd = re.sub("%STDIN%", filename, cmd)
from_stdin = False
if "%STDOUT%" in cmd:
cmd = re.sub("%STDOUT%", filename + ".out", cmd)
to_stdout = False
cmd = " ".join(re.sub("\t+", " ", cmd).split("\n"))
E.info("running statement:\n%s" % cmd)
job_script = tempfile.NamedTemporaryFile(dir=os.getcwd(), delete=False, mode="w+t")
job_script.write("#!/bin/bash\n") # -l -O expand_aliases\n" )
job_script.write(Cluster.expandStatement(cmd) + "\n")
job_script.close()
job_path = os.path.abspath(job_script.name)
os.chmod(job_path, stat.S_IRWXG | stat.S_IRWXU)
# get session for process - only one is permitted
job_name = os.path.basename(kwargs.get("outfile", "farm.py"))
options_dict = vars(options)
options_dict["workingdir"] = os.getcwd()
if options.job_memory:
job_memory = options.job_memory
elif options.cluster_memory_default:
job_memory = options.cluster_memory_default
else:
job_memory = "2G"
jt = Cluster.setupDrmaaJobTemplate(session, options_dict,
job_name, job_memory)
jt.remoteCommand = job_path
# update the environment
jt.jobEnvironment = os.environ.copy()
jt.jobEnvironment.update({'BASH_ENV': os.path.join(os.environ['HOME'],
'.bashrc')})
# SNS: Native specifation setting abstracted
# to Pipeline/Cluster.setupDrmaaJobTemplate()
# use stdin for data
if from_stdin:
jt.inputPath = ":" + filename
# set paths.
# later: allow redirection of stdout and stderr to files
# could this even be across hosts?
if to_stdout:
jt.outputPath = ":" + filename + ".out"
else:
jt.outputPath = ":" + filename + ".stdout"
jt.errorPath = ":" + filename + ".err"
jobid = session.runJob(jt)
jobids.append((jobid, job_path, filename, cmd, logfile))
E.debug("%i jobs have been submitted" % len(jobids))
results = []
for jobid, job_path, filename, cmd, logfile in jobids:
try:
retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
except Exception as msg:
# ignore message 24 in PBS
# code 24: drmaa: Job finished but resource usage information
# and/or termination status could not be provided.":
if not msg.message.startswith("code 24"):
raise
retval = None
if retval and retval.exitStatus != 0:
raise OSError("Child was terminated by signal %i: \n%s\n" %
(retval.exitStatus, cmd))
results.append((retval, filename, cmd, logfile, 1))
os.unlink(job_path)
session.deleteJobTemplate(jt)
session.exit()
def getOptionParser():
"""create parser and add options."""
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"--split-at-lines", dest="split_at_lines", type="int",
help="split jobs according to line number [%default].")
parser.add_option(
"--split-at-column", dest="split_at_column", type="int",
help="split jobs according to column. Columns start at number 1 "
"and the input should be sorted by this column [%default].")
parser.add_option(
"--group-by-regex", dest="group_by_regex", type="string",
help="group jobs according to a regular expression [%default].")
parser.add_option(
"--split-at-regex", dest="split_at_regex", type="string",
help="split jobs according to a regular expression [%default].")
parser.add_option(
"--split-at-tag", dest="split_at_tag", type="int",
help="split a file at a tag [%default].")
parser.add_option(
"--chunk-size", dest="chunksize", type="int",
help="when splitting at regex or tag, aggregate x entries [%default].")
parser.add_option(
"--debug", dest="debug", action="store_true",
help="debug mode. Do not delete temporary file [%default].")
parser.add_option(
"--dry-run", dest="dry_run", action="store_true",
help="dry run. Do not split input and simply forward stdin to stdout. "
"Useful for debugging the command [%default].")
parser.add_option(
"--input-header", dest="input_header", action="store_true",
help="The input stream contains a table header. "
"This header is replicated for each job [%default].")
parser.add_option(
"--output-header", dest="output_header", action="store_true",
help="The output jobs contain a table header. "
"The header is removed for each job except for the first [%default].")
parser.add_option(
"--output-regex-header", dest="output_regex_header", type="string",
help="Regular expression for header (in stdout stream). Any lines "
"before the first line matching this regular expression are ignored"
"[%default].")
parser.add_option(
"--output-tag", dest="output_tag", type="string",
help="The output jobs contain a tag in the last line denoting "
"job completion. If the unix return value denotes an error, the "
"presence of this tag is checked [%default].")
parser.add_option(
"--subdirs", dest="subdirs", action="store_true",
help="Run within separate subdirs for jobs. This permits "
"multiple output streams. Use a placeholder %DIR% if you supply "
"the ouput pattern as a command line option [%default].")
parser.add_option(
"-T", "--temp-dir", dest="tmpdir", type="string",
help="Temporary directory to be used. Default is the current "
"directory [%default].")
parser.add_option("--max-files", dest="max_files", type="int",
help="create at most x files [%default].")
parser.add_option(
"--max-lines", dest="max_lines", type="int",
help="in addition to splitting into chunksize, also split if "
"more than max-lines is reached [%default].")
parser.add_option(
"--renumber", dest="renumber", type="string",
help="renumber ids consecutively, supply a pattern [%default].")
parser.add_option(
"--renumber-column", dest="renumber_column", type="string",
action="append",
help="specify column to renumber. The format is regex:column, "
"for example csv:1 or csv:id [%default].")
parser.add_option(
"-r", "--reduce", dest="reduce", type="string", action="append",
help="Add reduce functions for specific files. The format is "
"file:reducer. The default reducer is 'table' for all files "
"[%default].")
parser.add_option(
"-m", "--map", dest="map", type="string", action="append",
help="Map specific columns in tables. The format is "
"file:column:pattern, for example .table:1:%06i [%default].")
parser.add_option(
"--resume", dest="resume", type="string",
help="resume aborted run from files in dir [%default]")
parser.add_option(
"--collect", dest="collect", type="string",
help="collect files in dir and process as normally "
"[%default]")
parser.add_option(
"--is-binary", dest="binary", action="store_true",
help="the output is binary - files are concatenated "
"without parsing [%default]")
parser.add_option(
"--resubmit", dest="resubmit", type="int",
help="if a job fails, automatically resubmit # times. Set to 0 "
"in order to disable resubmission [%default]")
parser.add_option(
"--fail", dest="resubmit", action="store_false",
help="if a job fails, do not resubmit [%default]")
parser.add_option(
"--bashrc", dest="bashrc", type="string",
help="bashrc file to use [%default]")
parser.add_option(
"--method", dest="method", type="choice",
choices=("multiprocessing", "threads", "drmaa"),
help="method to submit jobs [%default]")
parser.add_option(
"--job-memory", dest="job_memory", type="string",
help="per-job memory requirement."
"Unit must be specified, eg. 100M, 1G ")
parser.add_option(
"-e", "--env", dest="environment", type="string", action="append",
help="environment variables to be passed to the jobs [%default]")
parser.add_option(
"--output-filename-pattern", dest="output_pattern", type="string",
help="Pattern for secondary output filenames. Should contain a '%s' "
"[%default].")
parser.set_defaults(
split_at_lines=None,
split_at_column=None,
split_at_regex=None,
group_by_regex=None,
split_at_tag=None,
chunksize=100,
cluster_cmd='qrsh -cwd -now n',
bashrc="~/.bashrc",
input_header=False,
output_header=False,
output_regex_header=None,
debug=False,
dry_run=False,
tmpdir="./",
subdirs=False,
renumber=None,
output_tag="# job finished",
map=[],
reduce=[],
resume=None,
renumber_column=[],
resubmit=5,
collect=None,
method="drmaa",
job_memory=None,
max_files=None,
max_lines=None,
binary=False,
environment=[],
output_pattern="%s",
)
# stop parsing options at the first argument
parser.disable_interspersed_args()
return parser
def main(argv=None):
parser = getOptionParser()
(options, args) = E.Start(parser, add_cluster_options=True)
if len(args) == 0:
raise ValueError(
"command line argument missing - see usage information")
options.renumber_column = [x.split(":") for x in options.renumber_column]
cmd = args[0]
if len(args) > 1:
cmd += " '" + "' '".join(args[1:]) + "'"
if options.dry_run:
cmd = re.sub("%DIR%", "", cmd)
retcode = subprocess.call(cmd,
shell=True,
stdin=sys.stdin,
stdout=sys.stdout,
cwd=os.getcwd(),
close_fds=True)
E.Stop()
sys.exit(0)
failed_requests = []
started_requests = []
niterations = 0
if not options.collect:
tmpdir = os.path.abspath(tempfile.mkdtemp(dir=options.tmpdir))
E.info(" working in directory %s" % tmpdir)
if options.split_at_lines:
chunk_iterator = chunk_iterator_lines
args = (options.split_at_lines,)
elif options.split_at_column:
chunk_iterator = chunk_iterator_column
args = (options.split_at_column - 1, options.max_files)
elif options.split_at_regex:
chunk_iterator = chunk_iterator_regex_split
args = (re.compile(options.split_at_regex),
0,
options.chunksize,
options.max_lines)
elif options.group_by_regex:
chunk_iterator = chunk_iterator_regex_group
args = (re.compile(options.group_by_regex), 0, options.chunksize)
else:
raise ValueError("please specify a way to chunk input data")
data = [(x, cmd, options, None, options.subdirs)
for x in chunk_iterator(
options.stdin,
args,
prefix=tmpdir,
use_header=options.input_header)]
started_requests = [(x[0], x[0] + ".out") for x in data]
if len(data) == 0:
E.warn("no data received")
E.Stop()
sys.exit(0)
if options.method == "multiprocessing":
pool = Pool(options.cluster_num_jobs)
results = pool.map(runCommand, data, chunksize=1)
elif options.method == "drmaa":
results = []
runDRMAA(data, environment=options.environment)
elif options.method == "threads":
pool = ThreadPool(options.cluster_num_jobs)
results = pool.map(runCommand, data, chunksize=1)
niterations = 0
for retcode, filename, cmd, logfile, iterations in results:
niterations += iterations
if not hasFinished(retcode, filename, options.output_tag, logfile):
failed_requests.append((filename, cmd))
else:
tmpdir = options.collect
started_requests = [(x[:-4], x) for x in glob.glob(tmpdir + "/*.out")]
E.info("collecting %i files from %s" % (len(started_requests),
tmpdir))
if failed_requests:
for fn, cmd in failed_requests:
E.error("failed request: filename= %s, cmd= %s" % (fn, cmd))
else:
E.info("building result from %i parts" % len(started_requests))
if options.renumber:
mapper = MapperLocal(pattern=options.renumber)
else:
mapper = MapperEmpty()
# deal with stdout
name = None
index = None
for pattern, column in options.renumber_column:
if re.search(pattern, "stdout"):
try:
index = int(column) - 1
except ValueError:
name = column
break
if options.binary:
ResultBuilderBinary()(started_requests, options.stdout, options)
else:
regex = None
if options.output_regex_header:
regex = re.compile(options.output_regex_header)
ResultBuilder(mapper=mapper,
field_index=index,
field_name=name,
header_regex=regex
)(started_requests, options.stdout, options)
# deal with logfiles : combine them into a single file
rr = re.search("'--log=(\S+)'", cmd) or re.search("'--L\s+(\S+)'", cmd)
if rr:
E.info("logging output goes to %s" % rr.groups()[0])
logfile = IOTools.openFile(rr.groups()[0], "a")
ResultBuilderLog()(
[(x[0], "%s.log" % x[0]) for x in started_requests],
logfile,
options)
logfile.close()
# deal with other files
if options.subdirs:
files = glob.glob("%s/*.dir/*" % tmpdir)
# remove directory
filenames = set([os.path.basename(x) for x in files])
xx = len(".out")
for filename in filenames:
_, filetype = os.path.splitext(filename)
name = None
index = None
for pattern, column in options.renumber_column:
if re.search(pattern, filename):
try:
index = int(column) - 1
except ValueError:
name = column
break
if options.binary:
builder = ResultBuilderBinary(mapper=mapper)
elif filetype in (".fa", ".fasta"):
builder = ResultBuilderFasta(mapper=mapper)
elif filetype in (".mali", ):
builder = ResultBuilderFasta(mapper=MapperEmpty())
elif filetype in (".psl"):
builder = ResultBuilderPSL(mapper=mapper)
elif filetype in (".gtf", ".gff"):
builder = ResultBuilderGFF(
mapper=mapper, field_index=index, field_name=name)
elif filetype in (".png"):
builder = ResultBuilderCopies(mapper=mapper)
else:
builder = ResultBuilder(
mapper=mapper, field_index=index, field_name=name)
E.debug("chose the following builder for %s: %s: %s" %
(filename, filetype, str(builder)))
E.info("collecting results for %s" % filename)
input_filenames = []
for fi, fn in started_requests:
fn = fn[:-xx] + ".dir/" + filename
if os.path.exists(fn):
input_filenames.append((fi, fn))
E.info("output of %i files goes to %s" %
(len(filenames), filename))
outfile = IOTools.openFile(
options.output_pattern % filename, "w")
builder(input_filenames, outfile, options)
outfile.close()
if not options.debug and (not options.resume or not options.collect):
if len(failed_requests) == 0:
E.info("removing directory %s" % tmpdir)
shutil.rmtree(tmpdir)
else:
E.info("directory %s not removed due to %i failed jobs" %
(tmpdir, len(failed_requests)))
E.info("job control: nstarted=%i, nfinished=%i, nerrors=%i, nrepeats=%i" %
(len(started_requests),
len(started_requests) - len(failed_requests),
len(failed_requests),
niterations))
E.Stop()
if __name__ == '__main__':
sys.exit(main())
|
|
from math import log
import numpy as np
def aggregate_sum(input_thing, classes, universal_set):
if type(input_thing) == list:
return sum(input_thing)
elif type(input_thing) == dict:
output = {}
for key in input_thing:
output[key] = sum(input_thing[key])
return output
else:
raise AttributeError('Expected dictionary or list as first argument')
def aggregate_weighted_sum(input_thing, classes, universal_set):
n = len(universal_set)
weights = [(len(cl.train_members) + len(cl.validate_members)) * 1.0 / n for cl in classes]
n_classes = len(classes)
if type(input_thing) == list:
running_sum = 0
for i in range(n_classes):
running_sum += weights[i] * input_thing[i]
return running_sum
elif type(input_thing) == dict:
return_dict = {}
for key in input_thing:
running_sum = 0
for i in range(n_classes):
running_sum += weights[i] * input_thing[key][i]
return_dict[key] = running_sum
return return_dict
else:
raise AttributeError('Expected dictionary or list as first argument')
def get_calculation_method(method_name):
if method_name == 'tf':
return calculate_importance_tf
elif method_name == 'chi':
return calculate_importance_chi
elif method_name == 'ig':
return calculate_importance_ig
elif method_name == 'gr':
return calculate_importance_gr
elif method_name == 'idf':
return calculate_importance_idf
elif method_name == 'delta':
return calculate_importance_delta
elif method_name == 'rf':
return calculate_importance_rf
elif method_name == 'okapi':
return calculate_importance_okapi
else:
raise Exception('Unknown weighing method')
def get_aggregation_method(method_name):
if method_name == 'sum':
return aggregate_sum
elif method_name == 'weighted_sum':
return aggregate_weighted_sum
else:
raise Exception('Unknown aggregation method')
def calculate_importances(midpoints, classes, universal_set, method, degrees=None, avgdegree=None):
n = len(universal_set)
importance_calculator = get_calculation_method(method)
return_dict = {}
for midpoint in midpoints:
if degrees is None:
return_dict[midpoint] = importance_calculator(classes, universal_set, midpoints[midpoint], n)
else:
return_dict[midpoint] = importance_calculator(classes, universal_set, midpoints[midpoint], n, degrees=degrees, avgdegree=avgdegree)
return return_dict
def calculate_importance_tf(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using term frequency weighing.
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
return [1.0 / len(classes) for _ in classes]
def np_calculate_importance_tf(predicted, label_matrix):
return (1.0 / label_matrix.shape[1]) * np.ones(label_matrix.shape[1])
def calculate_importance_chi(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using chi-squared weighing.
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
actual_pos_num = label.not_test_members_num
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(chi_value(actual_pos_num, predicted_pos_num, tp, n))
return return_list
def np_calculate_importance_chi(predicted, label_matrix, actual_pos_nums):
tp = predicted * label_matrix
predicted_pos_num = np.count_nonzero(predicted) # TODO: speed this up!
tp_nums = np.ones((1, label_matrix.shape[0])).dot(tp)
fp_nums = predicted_pos_num - tp_nums
fn_nums = actual_pos_nums - tp_nums
tn_nums = label_matrix.shape[0] - tp_nums - fp_nums - fn_nums
tmp = tp_nums * tn_nums - fp_nums * fn_nums
# TODO: alternative: tp_nums = count something greater than 0.
top = tmp * tmp
bot = predicted_pos_num * (fn_nums + tn_nums) * actual_pos_nums * (tn_nums + fp_nums)
# bot_zeros = np.where(bot == 0)[0]
# bot[bot_zeros] = 1
# if not np.all(top[bot_zeros] == 0):
# raise Exception('Error in chi implementation')
bot[bot == 0] = 1
res = top / bot
return res
def calculate_importance_ig(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using IG (information gain) weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
actual_pos_num = label.not_test_members_num
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(ig_value(actual_pos_num, predicted_pos_num, tp, n))
return return_list
def calculate_importance_gr(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using the GR (gain ratio)
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
actual_pos_num = label.not_test_members_num
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(gr_value(actual_pos_num, predicted_pos_num, tp, n))
return return_list
def calculate_importance_okapi(classes, universal_set, linked_nodes, n, degrees=None, avgdegree=None):
k1 = 1.5
b = 0.75
predicted_pos = universal_set.intersection(linked_nodes) #
predicted_pos_num = len(predicted_pos)
idf = log((n - predicted_pos_num + 0.5) / (predicted_pos_num + 0.5))
return_vec = np.zeros((len(linked_nodes), 1))
for i, linked_node in enumerate(linked_nodes):
return_vec[i] = (k1 + 1) / (1 + k1 * (1 - b + b * degrees[linked_node] / avgdegree))
return [return_vec for _ in classes]
def calculate_importance_idf(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using idf weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
idf = log(n * 1.0 / (1 + predicted_pos_num))
return_list = [idf for _ in classes]
return return_list
def calculate_importance_delta(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using delta-idf weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
predicted_neg_num = n - predicted_pos_num
return_list = []
for label in classes:
if label is None:
continue
actual_pos_num = label.not_test_members_num
actual_neg_num = n - actual_pos_num
diff = actual_pos_num * 1.0 / (predicted_pos_num + 1) - actual_neg_num * 1.0 / (predicted_neg_num + 1)
return_list.append(abs(diff))
return return_list
def calculate_importance_rf(classes, universal_set, linked_nodes, n, **kwargs):
"""
Calculates importance of a single midpoint using rf weighing
:param classes: List of all classes
:param universal_set: Set of all indices to consider
:param linked_nodes: Set of all nodes linked by the midpoint
:param n: Number of elements of universal set
:return: List of weights of the midpoint for each label in class
"""
predicted_pos = universal_set.intersection(linked_nodes)
predicted_pos_num = len(predicted_pos)
return_list = []
for label in classes:
if label is None:
continue
actual_pos = label.not_test_members
tp = len(predicted_pos.intersection(actual_pos))
return_list.append(rf_value(predicted_pos_num, tp))
return return_list
def rf_value(predicted_pos_num, tp):
fp = predicted_pos_num - tp
return log(2 + tp * 1.0 / max(1, fp), 2)
def ig_value(actual_pos_num, predicted_pos_num, tp, n):
fp = predicted_pos_num - tp
fn = actual_pos_num - tp
tn = n - tp - fp - fn
tpp = tp * 1.0 / n
tnp = tn * 1.0 / n
fpp = fp * 1.0 / n
fnp = fn * 1.0 / n
r = 0
if tp > 0:
r += tpp * log(tp * n * 1.0 / (actual_pos_num * predicted_pos_num), 2)
if fn > 0:
r += fnp * log(fn * n * 1.0 / (actual_pos_num * (n - predicted_pos_num)), 2)
if fp > 0:
r += fpp * log(fp * n * 1.0 / ((n - actual_pos_num) * predicted_pos_num), 2)
if tn > 0:
r += tnp * log(tn * n * 1.0 / ((n - actual_pos_num) * (n - predicted_pos_num)), 2)
assert r >= 0
return r
def gr_value(actual_pos_num, predicted_pos_num, tp, n):
pp = actual_pos_num * 1.0 / n
if pp == 1 or pp == 0:
return 0
return ig_value(actual_pos_num, predicted_pos_num, tp, n) / (- pp * log(pp, 2) - (1 - pp) * log((1 - pp), 2))
def chi_value(actual_pos_num, predicted_pos_num, tp, n):
fp = predicted_pos_num - tp
fn = actual_pos_num - tp
tn = n - tp - fp - fn
tmp = tp * tn - fp * fn
top = tmp * tmp
bot = predicted_pos_num * (fn + tn) * actual_pos_num * (tn + fp)
if bot > 0:
return top * 1.0 / bot
elif bot == 0:
return 0
else:
raise Exception("Error in chi implementation.")
|
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
import logging
from datetime import datetime, timedelta
from sqlalchemy import Table, Column, Integer, Float, String, Unicode, Boolean, DateTime
from sqlalchemy.schema import ForeignKey, Index
from sqlalchemy.orm import relation
from flexget import db_schema, plugin
from flexget.db_schema import UpgradeImpossible
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.log import log_once
from flexget.utils.imdb import ImdbSearch, ImdbParser, extract_id, make_url
from flexget.utils.database import with_session
SCHEMA_VER = 7
Base = db_schema.versioned_base('imdb_lookup', SCHEMA_VER)
# association tables
genres_table = Table('imdb_movie_genres', Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('genre_id', Integer, ForeignKey('imdb_genres.id')),
Index('ix_imdb_movie_genres', 'movie_id', 'genre_id'))
Base.register_table(genres_table)
actors_table = Table('imdb_movie_actors', Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('actor_id', Integer, ForeignKey('imdb_actors.id')),
Index('ix_imdb_movie_actors', 'movie_id', 'actor_id'))
Base.register_table(actors_table)
directors_table = Table('imdb_movie_directors', Base.metadata,
Column('movie_id', Integer, ForeignKey('imdb_movies.id')),
Column('director_id', Integer, ForeignKey('imdb_directors.id')),
Index('ix_imdb_movie_directors', 'movie_id', 'director_id'))
Base.register_table(directors_table)
class Movie(Base):
__tablename__ = 'imdb_movies'
id = Column(Integer, primary_key=True)
title = Column(Unicode)
original_title = Column(Unicode)
url = Column(String, index=True)
# many-to-many relations
genres = relation('Genre', secondary=genres_table, backref='movies')
actors = relation('Actor', secondary=actors_table, backref='movies')
directors = relation('Director', secondary=directors_table, backref='movies')
languages = relation('MovieLanguage', order_by='MovieLanguage.prominence')
score = Column(Float)
votes = Column(Integer)
year = Column(Integer)
plot_outline = Column(Unicode)
mpaa_rating = Column(String, default='')
photo = Column(String)
# updated time, so we can grab new rating counts after 48 hours
# set a default, so existing data gets updated with a rating
updated = Column(DateTime)
@property
def imdb_id(self):
return extract_id(self.url)
@property
def expired(self):
"""
:return: True if movie details are considered to be expired, ie. need of update
"""
if self.updated is None:
log.debug('updated is None: %s' % self)
return True
refresh_interval = 2
if self.year:
# Make sure age is not negative
age = max((datetime.now().year - self.year), 0)
refresh_interval += age * 5
log.debug('movie `%s` age %i expires in %i days' % (self.title, age, refresh_interval))
return self.updated < datetime.now() - timedelta(days=refresh_interval)
def __repr__(self):
return '<Movie(name=%s,votes=%s,year=%s)>' % (self.title, self.votes, self.year)
class MovieLanguage(Base):
__tablename__ = 'imdb_movie_languages'
movie_id = Column(Integer, ForeignKey('imdb_movies.id'), primary_key=True)
language_id = Column(Integer, ForeignKey('imdb_languages.id'), primary_key=True)
prominence = Column(Integer)
language = relation('Language')
def __init__(self, language, prominence=None):
self.language = language
self.prominence = prominence
class Language(Base):
__tablename__ = 'imdb_languages'
id = Column(Integer, primary_key=True)
name = Column(Unicode)
def __init__(self, name):
self.name = name
class Genre(Base):
__tablename__ = 'imdb_genres'
id = Column(Integer, primary_key=True)
name = Column(String)
def __init__(self, name):
self.name = name
class Actor(Base):
__tablename__ = 'imdb_actors'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class Director(Base):
__tablename__ = 'imdb_directors'
id = Column(Integer, primary_key=True)
imdb_id = Column(String)
name = Column(Unicode)
def __init__(self, imdb_id, name=None):
self.imdb_id = imdb_id
self.name = name
class SearchResult(Base):
__tablename__ = 'imdb_search'
id = Column(Integer, primary_key=True)
title = Column(Unicode, index=True)
url = Column(String)
fails = Column(Boolean, default=False)
queried = Column(DateTime)
@property
def imdb_id(self):
return extract_id(self.url)
def __init__(self, title, url=None):
self.title = title
self.url = url
self.queried = datetime.now()
def __repr__(self):
return '<SearchResult(title=%s,url=%s,fails=%s)>' % (self.title, self.url, self.fails)
log = logging.getLogger('imdb_lookup')
@db_schema.upgrade('imdb_lookup')
def upgrade(ver, session):
# v5 We may have cached bad data due to imdb changes, just wipe everything. GitHub #697
# v6 The association tables were not cleared on the last upgrade, clear again. GitHub #714
# v7 Another layout change cached bad data. GitHub #729
if ver is None or ver <= 6:
raise UpgradeImpossible('Resetting imdb_lookup caches because bad data may have been cached.')
return ver
class ImdbLookup(object):
"""
Retrieves imdb information for entries.
Example:
imdb_lookup: yes
Also provides imdb lookup functionality to all other imdb related plugins.
"""
field_map = {
'imdb_url': 'url',
'imdb_id': lambda movie: extract_id(movie.url),
'imdb_name': 'title',
'imdb_original_name': 'original_title',
'imdb_photo': 'photo',
'imdb_plot_outline': 'plot_outline',
'imdb_score': 'score',
'imdb_votes': 'votes',
'imdb_year': 'year',
'imdb_genres': lambda movie: [genre.name for genre in movie.genres],
'imdb_languages': lambda movie: [lang.language.name for lang in movie.languages],
'imdb_actors': lambda movie: dict((actor.imdb_id, actor.name) for actor in movie.actors),
'imdb_directors': lambda movie: dict((director.imdb_id, director.name) for director in movie.directors),
'imdb_mpaa_rating': 'mpaa_rating',
# Generic fields filled by all movie lookup plugins:
'movie_name': 'title',
'movie_year': 'year'}
schema = {'type': 'boolean'}
@plugin.priority(130)
def on_task_metainfo(self, task, config):
if not config:
return
for entry in task.entries:
self.register_lazy_fields(entry)
def register_lazy_fields(self, entry):
entry.register_lazy_func(self.lazy_loader, self.field_map)
def lazy_loader(self, entry):
"""Does the lookup for this entry and populates the entry fields."""
try:
self.lookup(entry)
except plugin.PluginError as e:
log_once(str(e.value).capitalize(), logger=log)
@with_session
def imdb_id_lookup(self, movie_title=None, raw_title=None, session=None):
"""
Perform faster lookup providing just imdb_id.
Falls back to using basic lookup if data cannot be found from cache.
.. note::
API will be changed, it's dumb to return None on errors AND
raise PluginError on some else
:param movie_title: Name of the movie
:param raw_title: Raw entry title
:return: imdb id or None
:raises PluginError: Failure reason
"""
if movie_title:
log.debug('imdb_id_lookup: trying with title: %s' % movie_title)
movie = session.query(Movie).filter(Movie.title == movie_title).first()
if movie:
log.debug('--> success! got %s returning %s' % (movie, movie.imdb_id))
return movie.imdb_id
if raw_title:
log.debug('imdb_id_lookup: trying cache with: %s' % raw_title)
result = session.query(SearchResult).filter(SearchResult.title == raw_title).first()
if result:
# this title is hopeless, give up ..
if result.fails:
return None
log.debug('--> success! got %s returning %s' % (result, result.imdb_id))
return result.imdb_id
if raw_title:
# last hope with hacky lookup
fake_entry = Entry(raw_title, '')
self.lookup(fake_entry)
return fake_entry['imdb_id']
@plugin.internet(log)
@with_session
def lookup(self, entry, search_allowed=True, session=None):
"""
Perform imdb lookup for entry.
:param entry: Entry instance
:param search_allowed: Allow fallback to search
:raises PluginError: Failure reason
"""
from flexget.manager import manager
if entry.get('imdb_id', eval_lazy=False):
log.debug('No title passed. Lookup for %s' % entry['imdb_id'])
elif entry.get('imdb_url', eval_lazy=False):
log.debug('No title passed. Lookup for %s' % entry['imdb_url'])
elif entry.get('title', eval_lazy=False):
log.debug('lookup for %s' % entry['title'])
else:
raise plugin.PluginError('looking up IMDB for entry failed, no title, imdb_url or imdb_id passed.')
# if imdb_id is included, build the url.
if entry.get('imdb_id', eval_lazy=False) and not entry.get('imdb_url', eval_lazy=False):
entry['imdb_url'] = make_url(entry['imdb_id'])
# make sure imdb url is valid
if entry.get('imdb_url', eval_lazy=False):
imdb_id = extract_id(entry['imdb_url'])
if imdb_id:
entry['imdb_url'] = make_url(imdb_id)
else:
log.debug('imdb url %s is invalid, removing it' % entry['imdb_url'])
del (entry['imdb_url'])
# no imdb_url, check if there is cached result for it or if the
# search is known to fail
if not entry.get('imdb_url', eval_lazy=False):
result = session.query(SearchResult).filter(SearchResult.title == entry['title']).first()
if result:
# TODO: 1.2 this should really be checking task.options.retry
if result.fails and not manager.options.execute.retry:
# this movie cannot be found, not worth trying again ...
log.debug('%s will fail lookup' % entry['title'])
raise plugin.PluginError('IMDB lookup failed for %s' % entry['title'])
else:
if result.url:
log.trace('Setting imdb url for %s from db' % entry['title'])
entry['imdb_id'] = result.imdb_id
entry['imdb_url'] = result.url
movie = None
# no imdb url, but information required, try searching
if not entry.get('imdb_url', eval_lazy=False) and search_allowed:
log.verbose('Searching from imdb `%s`' % entry['title'])
search = ImdbSearch()
search_name = entry.get('movie_name', entry['title'], eval_lazy=False)
search_result = search.smart_match(search_name)
if search_result:
entry['imdb_url'] = search_result['url']
# store url for this movie, so we don't have to search on every run
result = SearchResult(entry['title'], entry['imdb_url'])
session.add(result)
session.commit()
log.verbose('Found %s' % (entry['imdb_url']))
else:
log_once('IMDB lookup failed for %s' % entry['title'], log, logging.WARN, session=session)
# store FAIL for this title
result = SearchResult(entry['title'])
result.fails = True
session.add(result)
session.commit()
raise plugin.PluginError('Title `%s` lookup failed' % entry['title'])
# check if this imdb page has been parsed & cached
movie = session.query(Movie).filter(Movie.url == entry['imdb_url']).first()
# If we have a movie from cache, we are done
if movie and not movie.expired:
entry.update_using_map(self.field_map, movie)
return
# Movie was not found in cache, or was expired
if movie is not None:
if movie.expired:
log.verbose('Movie `%s` details expired, refreshing ...' % movie.title)
# Remove the old movie, we'll store another one later.
session.query(MovieLanguage).filter(MovieLanguage.movie_id == movie.id).delete()
session.query(Movie).filter(Movie.url == entry['imdb_url']).delete()
session.commit()
# search and store to cache
if 'title' in entry:
log.verbose('Parsing imdb for `%s`' % entry['title'])
else:
log.verbose('Parsing imdb for `%s`' % entry['imdb_id'])
try:
movie = self._parse_new_movie(entry['imdb_url'], session)
except UnicodeDecodeError:
log.error('Unable to determine encoding for %s. Installing chardet library may help.' %
entry['imdb_url'])
# store cache so this will not be tried again
movie = Movie()
movie.url = entry['imdb_url']
session.add(movie)
session.commit()
raise plugin.PluginError('UnicodeDecodeError')
except ValueError as e:
# TODO: might be a little too broad catch, what was this for anyway? ;P
if manager.options.debug:
log.exception(e)
raise plugin.PluginError('Invalid parameter: %s' % entry['imdb_url'], log)
for att in ['title', 'score', 'votes', 'year', 'genres', 'languages', 'actors', 'directors', 'mpaa_rating']:
log.trace('movie.%s: %s' % (att, getattr(movie, att)))
# Update the entry fields
entry.update_using_map(self.field_map, movie)
def _parse_new_movie(self, imdb_url, session):
"""
Get Movie object by parsing imdb page and save movie into the database.
:param imdb_url: IMDB url
:param session: Session to be used
:return: Newly added Movie
"""
parser = ImdbParser()
parser.parse(imdb_url)
# store to database
movie = Movie()
movie.photo = parser.photo
movie.title = parser.name
movie.original_title = parser.original_name
movie.score = parser.score
movie.votes = parser.votes
movie.year = parser.year
movie.mpaa_rating = parser.mpaa_rating
movie.plot_outline = parser.plot_outline
movie.url = imdb_url
for name in parser.genres:
genre = session.query(Genre).filter(Genre.name == name).first()
if not genre:
genre = Genre(name)
movie.genres.append(genre) # pylint:disable=E1101
for index, name in enumerate(parser.languages):
language = session.query(Language).filter(Language.name == name).first()
if not language:
language = Language(name)
movie.languages.append(MovieLanguage(language, prominence=index))
for imdb_id, name in parser.actors.items():
actor = session.query(Actor).filter(Actor.imdb_id == imdb_id).first()
if not actor:
actor = Actor(imdb_id, name)
movie.actors.append(actor) # pylint:disable=E1101
for imdb_id, name in parser.directors.items():
director = session.query(Director).filter(Director.imdb_id == imdb_id).first()
if not director:
director = Director(imdb_id, name)
movie.directors.append(director) # pylint:disable=E1101
# so that we can track how long since we've updated the info later
movie.updated = datetime.now()
session.add(movie)
return movie
@property
def movie_identifier(self):
"""Returns the plugin main identifier type"""
return 'imdb_id'
@event('plugin.register')
def register_plugin():
plugin.register(ImdbLookup, 'imdb_lookup', api_ver=2, groups=['movie_metainfo'])
|
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from time import sleep
from typing import Any, Dict, List, Optional, Union
import __main__
import numpy as np
import torch
import torch.distributed
from torch.nn.parallel.distributed import DistributedDataParallel
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.distributed import LightningDistributed
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.overrides.distributed import prepare_for_backward
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import (
_FAIRSCALE_AVAILABLE,
_HYDRA_AVAILABLE,
_TORCH_GREATER_EQUAL_1_7,
_TORCH_GREATER_EQUAL_1_8,
_TORCH_GREATER_EQUAL_1_9,
_TORCH_GREATER_EQUAL_1_10,
rank_zero_deprecation,
rank_zero_warn,
)
from pytorch_lightning.utilities.distributed import (
distributed_available,
init_ddp_connection,
rank_zero_only,
ReduceOp,
sync_ddp_if_available,
)
from pytorch_lightning.utilities.exceptions import DeadlockDetectedException, MisconfigurationException
from pytorch_lightning.utilities.seed import reset_seed
from pytorch_lightning.utilities.types import STEP_OUTPUT
if _TORCH_GREATER_EQUAL_1_10:
from torch.distributed.optim import DistributedOptimizer, PostLocalSGDOptimizer, ZeroRedundancyOptimizer
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path
if _TORCH_GREATER_EQUAL_1_8:
from pytorch_lightning.utilities.distributed import register_ddp_comm_hook
if _TORCH_GREATER_EQUAL_1_10:
import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
import torch.distributed.algorithms.model_averaging.averagers as averagers
log = logging.getLogger(__name__)
class DDPPlugin(ParallelPlugin):
"""
Plugin for multi-process single-device training on one or multiple nodes.
The master process in each node spawns N-1 child processes via :func:`subprocess.Popen`,
where N is the number of devices (e.g. GPU) per node.
It is very similar to how :mod:`torch.distributed.launch` launches processes.
"""
distributed_backend = "ddp"
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
num_nodes: Optional[int] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
checkpoint_io: Optional[CheckpointIO] = None,
sync_batchnorm: Optional[bool] = None,
ddp_comm_state: Optional[object] = None,
ddp_comm_hook: Optional[callable] = None,
ddp_comm_wrapper: Optional[callable] = None,
model_averaging_period: Optional[int] = None,
**kwargs: Union[Any, Dict[str, Any]],
) -> None:
super().__init__(
parallel_devices=parallel_devices,
cluster_environment=cluster_environment,
checkpoint_io=checkpoint_io,
)
self.interactive_ddp_procs = []
if num_nodes is not None:
rank_zero_deprecation(
"Argument `num_nodes` in `DDPPlugin` is deprecated in v1.4, and will be removed in v1.6."
" Notice that it will be overriden by the trainer setting."
)
self._num_nodes = num_nodes or 1
if sync_batchnorm is not None:
rank_zero_deprecation(
"Argument `sync_batchnorm` in `DDPPlugin` is deprecated in v1.4, and will be removed in v1.6."
" Notice that it will be overriden by the trainer setting."
)
self._sync_batchnorm = sync_batchnorm or False
self.dist = LightningDistributed()
self.num_processes = len(self.parallel_devices) if self.parallel_devices is not None else 0
self._ddp_kwargs = kwargs
self._task_idx = None
self._ddp_comm_state = ddp_comm_state
self._ddp_comm_hook = ddp_comm_hook
self._ddp_comm_wrapper = ddp_comm_wrapper
self._model_averaging_period = model_averaging_period
self._pids: Optional[List[int]] = None
self._sync_dir: Optional[str] = None
self.set_world_ranks()
@property
def is_distributed(self) -> bool:
return True
@property
def root_device(self) -> torch.device:
return self.parallel_devices[self.local_rank]
@property
def num_nodes(self) -> int:
return self._num_nodes
@num_nodes.setter
def num_nodes(self, num_nodes: int) -> None:
# note that world ranks is related to num_nodes, when resetting it, need to reset world ranks
self._num_nodes = num_nodes
self.set_world_ranks()
@property
def sync_batchnorm(self) -> bool:
return self._sync_batchnorm
@sync_batchnorm.setter
def sync_batchnorm(self, sync_batchnorm: bool) -> None:
self._sync_batchnorm = sync_batchnorm
@property
def task_idx(self) -> Optional[int]:
rank_zero_deprecation(
f"`{self.__class__.__name__}.task_idx` is deprecated in v1.4 and will be removed in v1.6. Use "
f"`{self.__class__.__name__}.local_rank` instead."
)
return self._task_idx
@task_idx.setter
def task_idx(self, task_idx: int) -> None:
self._task_idx = task_idx
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank)
return distributed_sampler_kwargs
@property
def _is_single_process_single_device(self) -> bool:
return True
def setup_environment(self) -> None:
# start the other scripts
if not self.cluster_environment.creates_children():
self._call_children_scripts()
# set the task idx
self.task_idx = self.cluster_environment.local_rank()
self.setup_distributed()
def _call_children_scripts(self):
# bookkeeping of spawned processes
self._check_can_spawn_children()
# DDP Environment variables
os.environ["MASTER_ADDR"] = self.cluster_environment.master_address()
os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port())
# allow the user to pass the node rank
os.environ["NODE_RANK"] = str(self.cluster_environment.node_rank())
os.environ["LOCAL_RANK"] = str(self.cluster_environment.local_rank())
# Check if the current calling command looked like `python a/b/c.py` or `python -m a.b.c`
# See https://docs.python.org/3/reference/import.html#main-spec
if __main__.__spec__ is None: # pragma: no-cover
# Script called as `python a/b/c.py`
# when user is using hydra find the absolute path
path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path
# pull out the commands used to run the script and resolve the abs file path
command = sys.argv
try:
full_path = path_lib(command[0])
except Exception:
full_path = os.path.abspath(command[0])
command[0] = full_path
# use the same python interpreter and actually running
command = [sys.executable] + command
else: # Script called as `python -m a.b.c`
command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:]
# the visible devices tell us how many GPUs we want to use.
# when the trainer script was called the device has already been scoped by the time
# code reaches this point. so, to call the scripts, we need to leave cuda visible devices alone
# but forward the GPUs selected via environment variables
if self.parallel_devices is None:
raise MisconfigurationException("you selected (distribute_backend = ddp) but did not set Trainer(gpus=?)")
os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}"
self.interactive_ddp_procs = []
for local_rank in range(1, self.num_processes):
env_copy = os.environ.copy()
env_copy["LOCAL_RANK"] = f"{local_rank}"
# remove env var if global seed not set
if os.environ.get("PL_GLOBAL_SEED") is None and "PL_GLOBAL_SEED" in env_copy:
del env_copy["PL_GLOBAL_SEED"]
# start process
# if hydra is available and initialized, make sure to set the cwd correctly
cwd: Optional[str] = None
if _HYDRA_AVAILABLE:
if HydraConfig.initialized():
cwd = get_original_cwd()
os_cwd = f'"{os.getcwd()}"'
command += [f"hydra.run.dir={os_cwd}", f"hydra.job.name=train_ddp_process_{local_rank}"]
proc = subprocess.Popen(command, env=env_copy, cwd=cwd)
self.interactive_ddp_procs.append(proc)
# starting all processes at once can cause issues
# with dataloaders delay between 1-10 seconds
delay = np.random.uniform(1, 5, 1)[0]
sleep(delay)
def setup_distributed(self):
reset_seed()
# determine which process we are and world size
self.set_world_ranks()
# set warning rank
rank_zero_only.rank = self.global_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
init_ddp_connection(self.cluster_environment, self.torch_distributed_backend)
# set the ranks and devices
self.dist.rank = self.global_rank
self.dist.device = self.root_device
def _check_can_spawn_children(self):
if self.local_rank != 0:
raise RuntimeError(
"Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen."
" Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user,"
" 2) `ClusterEnvironment.creates_children()` incorrectly implemented."
)
def set_world_ranks(self) -> None:
if self.cluster_environment is None:
return
self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
rank_zero_only.rank = self.cluster_environment.global_rank()
def pre_configure_ddp(self):
# if unset, default `find_unused_parameters` `True`
# Many models require setting this parameter to True, as there are corner cases
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
# todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
if (
_TORCH_GREATER_EQUAL_1_7
and not self.lightning_module.automatic_optimization
and not self._ddp_kwargs.get("find_unused_parameters", False)
):
rank_zero_warn(
"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` "
"to properly work with DDP."
)
self._ddp_kwargs["find_unused_parameters"] = True
def _register_ddp_hooks(self) -> None:
# In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode
# Since 1.9, DDP communication hooks can work on all backends.
if _TORCH_GREATER_EQUAL_1_9 or (
_TORCH_GREATER_EQUAL_1_8 and self.on_gpu and self._is_single_process_single_device
):
register_ddp_comm_hook(
model=self._model,
ddp_comm_state=self._ddp_comm_state,
ddp_comm_hook=self._ddp_comm_hook,
ddp_comm_wrapper=self._ddp_comm_wrapper,
)
# Post-localSDG is only available after 1.9,
# and `torch.distributed.optim` package currently is not available on Windows.
if (
_TORCH_GREATER_EQUAL_1_10
and isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState)
and self.lightning_module.trainer.state.fn == TrainerFn.FITTING
):
self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter)
def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int):
optimizers = self.lightning_module.trainer.optimizers
if self._model_averaging_period is None:
raise ValueError(
"Post-localSGD algorithm is used, " "but model averaging period is not provided to DDP plugin."
)
averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps)
for x, optimizer in enumerate(optimizers):
if isinstance(optimizer, LightningOptimizer):
optimizer = optimizer._optimizer
if (
isinstance(optimizer, DistributedOptimizer)
or isinstance(optimizer, ZeroRedundancyOptimizer)
or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS))
):
raise ValueError(
f"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer."
)
if isinstance(optimizer, PostLocalSGDOptimizer):
continue
optim_class = type(optimizer)
post_localSGD_optimizer = PostLocalSGDOptimizer(
params=optimizer.param_groups,
optimizer_class=optim_class,
averager=averager,
**optimizer.defaults,
)
optimizers[x] = post_localSGD_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = optimizers
trainer.convert_to_lightning_optimizers()
def configure_ddp(self) -> None:
self.pre_configure_ddp()
self._model = DistributedDataParallel(
LightningDistributedModule(self.model), device_ids=self.determine_ddp_device_ids(), **self._ddp_kwargs
)
self._register_ddp_hooks()
def determine_ddp_device_ids(self):
if self.root_device.type == "cpu":
return None
return [self.root_device.index]
def pre_dispatch(self):
# share ddp pids to all processes
self._share_information_to_prevent_deadlock()
# move the model to the correct device
self.model_to_device()
if self.sync_batchnorm:
self.model = self.configure_sync_batchnorm(self.model)
# skip wrapping the model if we are not fitting as no gradients need to be exchanged
trainer_fn = self.lightning_module.trainer.state.fn
if trainer_fn == TrainerFn.FITTING:
self.configure_ddp()
def post_dispatch(self) -> None:
self.cluster_environment.teardown()
def barrier(self, *args, **kwargs) -> None:
if not distributed_available():
return
if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl":
torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())
else:
torch.distributed.barrier()
def broadcast(self, obj: object, src: int = 0) -> object:
return self.dist.broadcast(obj)
def pre_backward(self, closure_loss: torch.Tensor) -> None:
"""Run before precision plugin executes backward"""
if not self.lightning_module.automatic_optimization:
prepare_for_backward(self.model, closure_loss)
def model_to_device(self):
self.model.to(self.root_device)
def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = "mean") -> torch.Tensor:
"""
Reduces a tensor from several distributed processes to one aggregated tensor.
Args:
tensor: the tensor to sync and reduce
group: the process group to gather results from. Defaults to all processes (world)
reduce_op: the reduction operation. Defaults to 'mean'/'avg'.
Can also be a string 'sum' to calculate the sum during reduction.
Return:
reduced value, except when the input was not a tensor the output remains is unchanged
"""
if isinstance(tensor, torch.Tensor):
tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
return tensor
def training_step(self, *args, **kwargs) -> Optional[Any]:
return self.model(*args, **kwargs)
def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
if isinstance(self.model, DistributedDataParallel):
# used when calling `trainer.fit`
return self.model(*args, **kwargs)
else:
# used when calling `trainer.validate`
return self.lightning_module.validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
return self.lightning_module.test_step(*args, **kwargs)
def predict_step(self, *args, **kwargs) -> Any:
return self.lightning_module.predict_step(*args, **kwargs)
def post_training_step(self):
if not self.lightning_module.automatic_optimization:
self.model.require_backward_grad_sync = True
@classmethod
def register_plugins(cls, plugin_registry: Dict) -> None:
plugin_registry.register(
"ddp_find_unused_parameters_false",
cls,
description="DDP Plugin with `find_unused_parameters` as False",
find_unused_parameters=False,
)
def _share_information_to_prevent_deadlock(self):
self._share_pids()
# there should be a unique sync_dir per nodes.
if self.local_rank == 0:
# create a temporary directory used to synchronize processes on deadlock.
self._sync_dir = tempfile.mkdtemp()
sync_dirs = []
global_node_rank_zero = 0
for _ in range(self.num_nodes):
sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero))
global_node_rank_zero += self.world_size // self.num_nodes
self._sync_dir = sync_dirs[self.node_rank]
def _share_pids(self):
"""
Make all DDP processes aware of all processes pids.
"""
self.barrier()
pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device))
pids = pids.cpu().numpy().tolist()
self._pids = pids if isinstance(pids, list) else [pids]
def reconciliate_processes(self, trace: str):
if self.world_size < 2:
return
sync_dir = self._sync_dir
if not sync_dir:
rank_zero_warn("Error handling mechanism for deadlock detection is uninitialized. Skipping check.")
return
# The cluster may be configured to periodically purge the `/tmp`
# directory, in which case `sync_dir` may not exist anymore at this
# point. Idempotently create it to ensure its existence.
Path(sync_dir).mkdir(parents=True, exist_ok=True)
# save a file locally.
torch.save(True, os.path.join(sync_dir, f"{self.global_rank}.pl"))
# sleep for a short time
time.sleep(3)
# return if all processes wrote a file in the `sync_dir`.
# todo (tchaton) Add support for non-shared file-system which will fail.
if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes):
return
for pid in self._pids:
if pid != os.getpid():
os.kill(pid, signal.SIGKILL)
shutil.rmtree(sync_dir)
raise DeadlockDetectedException(f"DeadLock detected from rank: {self.global_rank} \n {trace}")
def teardown(self) -> None:
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
|
|
#!/usr/bin/python3
#
# Copyright (C) 2009 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for unittesting the confd client module"""
import socket
import unittest
from ganeti import confd
from ganeti import constants
from ganeti import errors
import ganeti.confd.client
import testutils
class ResettableMock(object):
def __init__(self, *args, **kwargs):
self.Reset()
def Reset(self):
pass
class MockLogger(ResettableMock):
def Reset(self):
self.debug_count = 0
self.warn_count = 0
self.error_count = 0
def debug(string):
self.debug_count += 1
def warning(string):
self.warn_count += 1
def error(string):
self.error_count += 1
class MockConfdAsyncUDPClient(ResettableMock):
def Reset(self):
self.send_count = 0
self.last_address = ''
self.last_port = -1
self.last_sent = ''
def enqueue_send(self, address, port, payload):
self.send_count += 1
self.last_payload = payload
self.last_port = port
self.last_address = address
class MockCallback(ResettableMock):
def Reset(self):
self.call_count = 0
self.last_up = None
def __call__(self, up):
"""Callback
@type up: L{ConfdUpcallPayload}
@param up: upper callback
"""
self.call_count += 1
self.last_up = up
class MockTime(ResettableMock):
def Reset(self):
self.mytime = 1254213006.5175071
def time(self):
return self.mytime
def increase(self, delta):
self.mytime += delta
class _BaseClientTest:
"""Base class for client tests"""
mc_list = None
new_peers = None
family = None
def setUp(self):
self.mock_time = MockTime()
confd.client.time = self.mock_time
confd.client.ConfdAsyncUDPClient = MockConfdAsyncUDPClient
self.logger = MockLogger()
hmac_key = "mykeydata"
self.callback = MockCallback()
self.client = confd.client.ConfdClient(hmac_key, self.mc_list,
self.callback, logger=self.logger)
def testRequest(self):
req1 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.assertNotEqual(req1.rsalt, req2.rsalt)
self.assertEqual(req1.protocol, constants.CONFD_PROTOCOL_VERSION)
self.assertEqual(req2.protocol, constants.CONFD_PROTOCOL_VERSION)
self.assertRaises(errors.ConfdClientError, confd.client.ConfdClientRequest,
type=-33)
def testClientSend(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
# Cannot send the same request twice
self.assertRaises(errors.ConfdClientError, self.client.SendRequest, req)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
# Coverage is too big
self.assertRaises(errors.ConfdClientError, self.client.SendRequest,
req2, coverage=15)
self.assertEqual(self.client._socket.send_count,
constants.CONFD_DEFAULT_REQ_COVERAGE)
# Send with max coverage
self.client.SendRequest(req2, coverage=-1)
self.assertEqual(self.client._socket.send_count,
constants.CONFD_DEFAULT_REQ_COVERAGE + len(self.mc_list))
self.assertTrue(self.client._socket.last_address in self.mc_list)
def testClientExpire(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
# Make a couple of seconds pass ;)
self.mock_time.increase(2)
# Now sending the second request
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req2)
self.mock_time.increase(constants.CONFD_CLIENT_EXPIRE_TIMEOUT - 1)
# First request should be expired, second one should not
self.client.ExpireRequests()
self.assertEqual(self.callback.call_count, 1)
self.assertEqual(self.callback.last_up.type, confd.client.UPCALL_EXPIRE)
self.assertEqual(self.callback.last_up.salt, req.rsalt)
self.assertEqual(self.callback.last_up.orig_request, req)
self.mock_time.increase(3)
self.assertEqual(self.callback.call_count, 1)
self.client.ExpireRequests()
self.assertEqual(self.callback.call_count, 2)
self.assertEqual(self.callback.last_up.type, confd.client.UPCALL_EXPIRE)
self.assertEqual(self.callback.last_up.salt, req2.rsalt)
self.assertEqual(self.callback.last_up.orig_request, req2)
def testClientCascadeExpire(self):
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
self.mock_time.increase(constants.CONFD_CLIENT_EXPIRE_TIMEOUT +1)
req2 = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req2)
self.assertEqual(self.callback.call_count, 1)
def testUpdatePeerList(self):
self.client.UpdatePeerList(self.new_peers)
self.assertEqual(self.client._peers, self.new_peers)
req = confd.client.ConfdClientRequest(type=constants.CONFD_REQ_PING)
self.client.SendRequest(req)
self.assertEqual(self.client._socket.send_count, len(self.new_peers))
self.assertTrue(self.client._socket.last_address in self.new_peers)
def testSetPeersFamily(self):
self.client._SetPeersAddressFamily()
self.assertEqual(self.client._family, self.family)
mixed_peers = ["192.0.2.99", "2001:db8:beef::13"]
self.client.UpdatePeerList(mixed_peers)
self.assertRaises(errors.ConfdClientError,
self.client._SetPeersAddressFamily)
class TestIP4Client(unittest.TestCase, _BaseClientTest):
"""Client tests"""
mc_list = ["192.0.2.1",
"192.0.2.2",
"192.0.2.3",
"192.0.2.4",
"192.0.2.5",
"192.0.2.6",
"192.0.2.7",
"192.0.2.8",
"192.0.2.9",
]
new_peers = ["198.51.100.1", "198.51.100.2"]
family = socket.AF_INET
def setUp(self):
unittest.TestCase.setUp(self)
_BaseClientTest.setUp(self)
class TestIP6Client(unittest.TestCase, _BaseClientTest):
"""Client tests"""
mc_list = ["2001:db8::1",
"2001:db8::2",
"2001:db8::3",
"2001:db8::4",
"2001:db8::5",
"2001:db8::6",
"2001:db8::7",
"2001:db8::8",
"2001:db8::9",
]
new_peers = ["2001:db8:beef::11", "2001:db8:beef::12"]
family = socket.AF_INET6
def setUp(self):
unittest.TestCase.setUp(self)
_BaseClientTest.setUp(self)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
|
#!/usr/bin/env python
'An interactive fiction system offering control over the narrative discourse.'
__author__ = 'Nick Montfort'
__copyright__ = 'Copyright 2011 Nick Montfort'
__license__ = 'ISC'
__version__ = '0.5.0.0'
__status__ = 'Development'
import sys
import os
import time
import optparse
import clarifier
import command_map
import discourse_model
import joker
import microplanner
import preparer
import presenter
import recognizer
import reply_planner
import world_model
class Multistream(object):
'Encapsulates multiple output streams.'
def __init__(self, streams, log=None):
self.streams = streams
self.log = log
def close(self):
"""Close each of the streams.
If one or more of the streams returns some exit status, the maximum
value is returned by this method."""
overall_status = None
for stream in self.streams:
status = stream.close()
if status is not None:
overall_status = max(overall_status, status)
return overall_status
def write(self, string):
'Write string to each of the streams.'
for stream in self.streams:
stream.write(string)
def start_log(out_streams):
'Open a log file named with the next available integer.'
log_files = [os.path.splitext(l)[0] for l in os.listdir('logs/') if
os.path.splitext(l)[1] == '.log']
if len(log_files) == 0:
latest = 0
else:
latest = max([int(log_file) for log_file in log_files])
log_file = 'logs/' + str(latest + 1) + '.log'
try:
log = file(log_file, 'w')
except IOError, err:
msg = ('Unable to open log file "' + log_file + '" for ' +
'writing due to this error: ' + str(err))
raise joker.StartupError(msg)
# So that we output to the screen and the log file:
out_streams.streams.append(log)
# And indicate that this stream is the log file:
out_streams.log = log
presenter.present('\nLogged to: ' + log_file + '\nSession started ' +
time.strftime("%Y-%m-%d %H:%M:%S"), out_streams)
return out_streams
def initialize(if_file, spin_files, out_streams):
'Load all files and present the header and prologue.'
for startup_string in joker.session_startup(__version__):
presenter.center(startup_string, out_streams)
fiction = joker.load_fiction(if_file, ['discourse', 'items'],
discourse_model.FICTION_DEFAULTS)
presenter.center('fiction: ' + if_file, out_streams)
world = world_model.World(fiction)
world.set_concepts(fiction.concepts)
for i in dir(fiction):
if i[:8] == 'COMMAND_':
setattr(command_map, i.partition('_')[2], getattr(fiction, i))
delattr(fiction, i)
for (key, value) in discourse_model.SPIN_DEFAULTS.items():
if key not in fiction.discourse['spin']:
fiction.discourse['spin'][key] = value
while len(spin_files) > 0:
next_file = spin_files.pop(0)
new_spin = joker.load_spin(fiction.discourse['spin'], next_file)
fiction.discourse['spin'].update(new_spin)
presenter.center('spin: ' + next_file, out_streams)
presenter.present('\n', out_streams)
presenter.present('', out_streams)
discourse = discourse_model.Discourse(fiction.discourse)
reply = joker.show_frontmatter(discourse)
if 'prologue' in discourse.metadata:
reply += '\n\n' + joker.show_prologue(discourse.metadata)
presenter.present(reply, out_streams)
return (world, discourse)
def handle_input(user_input, world, discourse, in_stream, out_streams):
"""Deal with input obtained, sending it to the appropriate module.
The commanded character's concept is used when trying to recognize
commands."""
c_concept = world.concept[discourse.spin['commanded']]
user_input = recognizer.recognize(user_input, discourse, c_concept)
if user_input.unrecognized:
user_input = clarifier.clarify(user_input, c_concept, discourse,
in_stream, out_streams)
if user_input.command:
user_input, id_list, world = simulator(user_input, world,
discourse.spin['commanded'])
if hasattr(world.item['@cosmos'], 'update_spin'):
discourse.spin = world.item['@cosmos'].update_spin(world,
discourse)
spin = discourse.spin
if hasattr(world.item['@cosmos'], 'use_spin'):
spin = world.item['@cosmos'].use_spin(world, discourse.spin)
f_concept = world.concept[spin['focalizer']]
tale, discourse = teller(id_list, f_concept, discourse)
presenter.present(tale, out_streams)
elif user_input.directive:
texts, world, discourse = joker.joke(user_input.normal, world,
discourse)
for text in texts:
if text is not None:
presenter.present(text, out_streams)
discourse.input_list.update(user_input)
return (user_input, world, discourse)
def each_turn(world, discourse, in_stream, out_streams):
'Obtain and processes input, if the session is interactive.'
if discourse.spin['commanded'] is None:
if hasattr(world.item['@cosmos'], 'interval'):
world.item['@cosmos'].interval()
_, id_list, world = simulator(None, world,
discourse.spin['commanded'])
focal_concept = world.concept[discourse.spin['focalizer']]
reply_text, discourse = teller(id_list, focal_concept, discourse)
presenter.present(reply_text, out_streams)
else:
if (hasattr(discourse, 'initial_inputs') and
len(discourse.initial_inputs) > 0):
input_string = discourse.initial_inputs.pop(0)
user_input = preparer.tokenize(input_string, discourse.separator)
presenter.present('[> ' + input_string, out_streams, '', '')
else:
user_input = preparer.prepare(discourse.separator,
discourse.typo.prompt, in_stream,
out_streams)
# After each input, present a newline all by itself.
presenter.present('\n', out_streams, '', '')
while len(user_input.tokens) > 0 and world.running:
(user_input, world, discourse) = handle_input(user_input, world,
discourse, in_stream,
out_streams)
presenter.present(discourse.input_list.show(1),
out_streams.log)
return (world, discourse)
def simulator(user_input, world, commanded, actions_to_do=None):
'Simulate the IF world using the Action from user input.'
if actions_to_do is None:
actions_to_do = []
done_list = []
start_time = world.ticks
for tag in world.item:
if (world.item[tag].actor and not tag == commanded and
world.item[tag].alive):
# The commanded character does not act automatically. That is,
# his, her, or its "act" method is not called.
new_actions = world.item[tag].act(command_map, world.concept[tag])
actions_to_do.extend(new_actions)
if commanded is not None and user_input is not None:
commanded = world.item[commanded]
c_action = commanded.do_command(user_input.normal, command_map, world)
if c_action is not None:
c_action.cause = '"' + ' '.join(user_input.normal) + '"'
actions_to_do.append(c_action)
if user_input is not None:
user_input.caused = c_action.id
current_time = start_time
while len(actions_to_do) > 0 and world.running:
action = actions_to_do.pop(0)
to_be_done = action.do(world)
done_list.append(action.id)
if action.final:
world.running = False
actions_to_do = to_be_done + actions_to_do
if action.end > current_time:
world.advance_clock(action.end - current_time)
current_time = action.end
return user_input, done_list, world
def teller(id_list, concept, discourse):
'Narrate actions based on the concept. Update the discourse.'
reply_plan = reply_planner.plan(id_list, concept, discourse)
section = microplanner.specify(reply_plan, concept, discourse)
output = section.realize(concept, discourse)
return output, discourse
def parse_command_line(argv):
'Improved option/argument parsing and help thanks to Andrew Plotkin.'
parser = optparse.OptionParser(usage='[options] fiction.py [ spin.py ... ]')
parser.add_option('--auto', dest='autofile',
help='read inputs from FILE', metavar='FILE')
parser.add_option('--nodebug', action='store_false', dest='debug',
help='disable debugging directives',
default=True)
opts, args = parser.parse_args(argv[1:])
if not args:
parser.print_usage()
msg = ('At least one argument (the fiction file name) is ' +
'needed; any other file names are processed in order ' +
'as spin files.')
raise joker.StartupError(msg)
return opts, args
def main(argv, in_stream=sys.stdin, out_stream=sys.stdout):
"Set up a session and run Curveship's main loop."
return_code = 0
try:
out_streams = Multistream([out_stream])
opts, args = parse_command_line(argv)
out_streams = start_log(out_streams)
world, discourse = initialize(args[0], args[1:], out_streams)
discourse.debug = opts.debug
if opts.autofile is not None:
auto = open(opts.autofile, 'r+')
discourse.initial_inputs = auto.readlines()
auto.close()
if len(world.act) > 0:
_, id_list, world = simulator(None, world,
discourse.spin['commanded'],
world.act.values())
focal_concept = world.concept[discourse.spin['focalizer']]
reply_text, discourse = teller(id_list, focal_concept, discourse)
presenter.present(reply_text, out_streams)
while world.running:
previous_time = time.time()
world, discourse = each_turn(world, discourse, in_stream,
out_streams)
out_streams.log.write('#' + str(time.time() - previous_time))
except joker.StartupError, err:
presenter.present(err.msg, Multistream([sys.stderr]))
return_code = 2
except KeyboardInterrupt, err:
presenter.present('\n', out_streams)
return_code = 2
except EOFError, err:
presenter.present('\n', out_streams)
return_code = 2
finally:
in_stream.close()
out_streams.close()
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
from DistributedMinigameAI import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM
from direct.fsm import State
from direct.actor import Actor
import DivingGameGlobals
import random
import random
import types
class DistributedDivingGameAI(DistributedMinigameAI):
fishProportions = []
for i in range(6):
fishProportions.append([])
n = 100
fishProportions[0]
fishProportions[0].append(([0, 0.8],
[0.8, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.8],
[0.8, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.7],
[0.7, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([0, 0.5],
[0.5, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[0].append(([n, 0.5],
[0.5, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[1]
fishProportions[1].append(([0, 0.8],
[0.8, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[1].append(([0, 0.8],
[0.8, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[1].append(([0, 0.7],
[0.7, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[1].append(([0, 0.7],
[0.7, 0.9],
[n, n],
[n, n],
[n, n],
[0.9, 1]))
fishProportions[1].append(([0, 0.4],
[0.4, 0.8],
[n, n],
[n, n],
[n, n],
[0.8, 1]))
fishProportions[1].append(([n, 0.3],
[0.3, 0.6],
[n, n],
[n, n],
[n, n],
[0.6, 1]))
fishProportions[2]
fishProportions[2].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[2].append(([0, 0.6],
[0.6, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[2].append(([0, 0.6],
[0.6, 0.8],
[n, n],
[0.8, 1],
[n, n],
[n, n]))
fishProportions[2].append(([0, 0.5],
[0.5, 0.7],
[n, n],
[0.7, 0.9],
[n, n],
[0.9, 1]))
fishProportions[2].append(([0, 0.2],
[0.2, 0.4],
[n, n],
[0.4, 0.75],
[n, n],
[0.75, 1]))
fishProportions[2].append(([n, 0.2],
[0.2, 0.6],
[n, n],
[0.6, 0.8],
[n, n],
[0.8, 1]))
fishProportions[3]
fishProportions[3].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[3].append(([0, 0.6],
[0.6, 1],
[n, n],
[n, n],
[n, n],
[n, n]))
fishProportions[3].append(([0, 0.6],
[0.6, 0.8],
[n, n],
[0.95, 1],
[n, n],
[n, n]))
fishProportions[3].append(([0, 0.5],
[0.5, 0.7],
[n, n],
[0.7, 0.85],
[0.9, 0.95],
[0.95, 1]))
fishProportions[3].append(([0, 0.2],
[0.2, 0.4],
[n, n],
[0.4, 0.75],
[0.75, 0.85],
[0.85, 1]))
fishProportions[3].append(([n, 0.2],
[0.2, 0.6],
[n, n],
[0.6, 0.8],
[n, n],
[0.8, 1]))
fishProportions[4]
fishProportions[4].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[4].append(([0, 0.45],
[0.45, 0.9],
[n, n],
[0.9, 1],
[n, n],
[n, n]))
fishProportions[4].append(([0, 0.2],
[0.2, 0.5],
[n, n],
[0.5, 0.95],
[0.95, 1],
[n, n]))
fishProportions[4].append(([0, 0.1],
[0.1, 0.3],
[n, n],
[0.3, 0.75],
[0.75, 0.8],
[0.8, 1]))
fishProportions[4].append(([n, n],
[0, 0.15],
[n, n],
[0.15, 0.4],
[n, n],
[0.4, 1]))
fishProportions[4].append(([n, n],
[n, n],
[n, n],
[0, 0.4],
[n, n],
[0.6, 1]))
fishProportions[5]
fishProportions[5].append(([0, 0.7],
[0.7, 0.9],
[0.9, 1],
[n, n],
[n, n],
[n, n]))
fishProportions[5].append(([0, 0.45],
[0.45, 0.9],
[n, n],
[0.9, 1],
[n, n],
[n, n]))
fishProportions[5].append(([0, 0.2],
[0.2, 0.5],
[n, n],
[0.5, 0.95],
[0.95, 1],
[n, n]))
fishProportions[5].append(([0, 0.1],
[0.1, 0.3],
[n, n],
[0.3, 0.75],
[0.75, 0.8],
[0.8, 1]))
fishProportions[5].append(([n, n],
[0, 0.15],
[n, n],
[0.15, 0.4],
[n, n],
[0.4, 1]))
fishProportions[5].append(([n, n],
[n, n],
[n, n],
[0, 0.4],
[n, n],
[0.6, 1]))
difficultyPatternsAI = {ToontownGlobals.ToontownCentral: [3.5, fishProportions[0], 1.5],
ToontownGlobals.DonaldsDock: [3.0, fishProportions[1], 1.8],
ToontownGlobals.DaisyGardens: [2.5, fishProportions[2], 2.1],
ToontownGlobals.MinniesMelodyland: [2.0, fishProportions[3], 2.4],
ToontownGlobals.TheBrrrgh: [2.0, fishProportions[4], 2.7],
ToontownGlobals.DonaldsDreamland: [1.5, fishProportions[5], 3.0]}
def __init__(self, air, minigameId):
try:
self.DistributedDivingGameAI_initialized
except:
self.DistributedDivingGameAI_initialized = 1
DistributedMinigameAI.__init__(self, air, minigameId)
self.gameFSM = ClassicFSM.ClassicFSM('DistributedDivingGameAI', [State.State('inactive', self.enterInactive, self.exitInactive, ['swimming']), State.State('swimming', self.enterSwimming, self.exitSwimming, ['cleanup']), State.State('cleanup', self.enterCleanup, self.exitCleanup, ['inactive'])], 'inactive', 'inactive')
self.addChildGameFSM(self.gameFSM)
self.__timeBase = globalClockDelta.localToNetworkTime(globalClock.getRealTime())
def delete(self):
self.notify.debug('delete')
del self.gameFSM
DistributedMinigameAI.delete(self)
def setGameReady(self):
self.notify.debug('setGameReady')
self.sendUpdate('setTrolleyZone', [self.trolleyZone])
for avId in self.scoreDict.keys():
self.scoreDict[avId] = 0
self.treasureHolders = [0] * self.numPlayers
self.SPAWNTIME = self.difficultyPatternsAI[self.getSafezoneId()][0]
self.proportion = self.difficultyPatternsAI[self.getSafezoneId()][1]
self.REWARDMOD = self.difficultyPatternsAI[self.getSafezoneId()][2]
DistributedMinigameAI.setGameReady(self)
self.spawnings = []
for i in range(DivingGameGlobals.NUM_SPAWNERS):
self.spawnings.append(Sequence(Func(self.spawnFish, i), Wait(self.SPAWNTIME + random.random()), Func(self.spawnFish, i), Wait(self.SPAWNTIME - 0.5 + random.random())))
self.spawnings[i].loop()
def setGameStart(self, timestamp):
self.notify.debug('setGameStart')
DistributedMinigameAI.setGameStart(self, timestamp)
self.gameFSM.request('swimming')
self.scoreTracking = {}
for avId in self.scoreDict.keys():
self.scoreTracking[avId] = [0,
0,
0,
0,
0]
def getCrabMoving(self, crabId, crabX, dir):
timestamp = globalClockDelta.getFrameNetworkTime()
rand1 = int(random.random() * 10)
rand2 = int(random.random() * 10)
self.sendUpdate('setCrabMoving', [crabId,
timestamp,
rand1,
rand2,
crabX,
dir])
def treasureRecovered(self):
if not hasattr(self, 'scoreTracking'):
return
avId = self.air.getAvatarIdFromSender()
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.treasureRecovered: invalid avId')
return
if avId not in self.treasureHolders:
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.treasureRecovered: tried to recover without holding treasure')
return
self.treasureHolders[self.treasureHolders.index(avId)] = 0
timestamp = globalClockDelta.getFrameNetworkTime()
newSpot = int(random.random() * 30)
self.scoreTracking[avId][4] += 1
for someAvId in self.scoreDict.keys():
if someAvId == avId:
self.scoreDict[avId] += 10 * (self.REWARDMOD * 0.25)
self.scoreDict[someAvId] += 10 * (self.REWARDMOD * 0.75 / float(len(self.scoreDict.keys())))
self.sendUpdate('incrementScore', [avId, newSpot, timestamp])
def hasScoreMult(self):
return 0
def setGameAbort(self):
self.notify.debug('setGameAbort')
taskMgr.remove(self.taskName('gameTimer'))
if self.gameFSM.getCurrentState():
self.gameFSM.request('cleanup')
DistributedMinigameAI.setGameAbort(self)
def gameOver(self):
self.notify.debug('gameOver')
self.gameFSM.request('cleanup')
DistributedMinigameAI.gameOver(self)
trackingString = 'MiniGame Stats : Diving Game'
trackingString += '\nDistrict:%s' % self.getSafezoneId()
for avId in self.scoreTracking.keys():
trackingString = trackingString + '\navId:%s fishHits:%s crabHits:%s treasureCatches:%s treasureDrops:%s treasureRecoveries:%s Score: %s' % (avId,
self.scoreTracking[avId][0],
self.scoreTracking[avId][1],
self.scoreTracking[avId][2],
self.scoreTracking[avId][3],
self.scoreTracking[avId][4],
self.scoreDict[avId])
#jjkoletar: why. do we care atm? self.air.writeServerEvent('MiniGame Stats', None, trackingString)
return
def enterInactive(self):
self.notify.debug('enterInactive')
def exitInactive(self):
pass
def getTimeBase(self):
return self.__timeBase
def enterSwimming(self):
self.notify.debug('enterSwimming')
duration = 65.0
taskMgr.doMethodLater(duration, self.timerExpired, self.taskName('gameTimer'))
def timerExpired(self, task):
self.notify.debug('timer expired')
for avId in self.scoreDict.keys():
if self.scoreDict[avId] < 5:
self.scoreDict[avId] = 5
self.gameOver()
return Task.done
def exitSwimming(self):
for i in range(DivingGameGlobals.NUM_SPAWNERS):
self.spawnings[i].pause()
def enterCleanup(self):
self.notify.debug('enterCleanup')
for i in range(DivingGameGlobals.NUM_SPAWNERS):
self.spawnings[i].finish()
del self.spawnings
self.gameFSM.request('inactive')
def exitCleanup(self):
pass
def pickupTreasure(self, chestId):
if not hasattr(self, 'scoreTracking'):
return
timestamp = globalClockDelta.getFrameNetworkTime()
avId = self.air.getAvatarIdFromSender()
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.pickupTreasure: invalid avId')
return
if avId in self.treasureHolders:
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.pickupTreasure: already holding treasure')
return
if not (0 <= chestId < len(self.treasureHolders)):
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.pickupTreasure: invalid chest requested (#%d)' % chestId)
return
if self.treasureHolders[chestId]:
# This chest is already held by someone else. Because this can happen
# during normal play (race conditions if two Toons swim into the treasure
# simultaneously) we do not log a suspicious event and silently ignore it.
return
self.scoreTracking[avId][2] += 1
self.treasureHolders[chestId] = avId
self.sendUpdate('setTreasureGrabbed', [avId, chestId])
def spawnFish(self, spawnerId):
timestamp = globalClockDelta.getFrameNetworkTime()
props = self.proportion[spawnerId]
num = random.random()
for i in range(len(props)):
prop = props[i]
low = prop[0]
high = prop[1]
if num > low and num <= high:
offset = int(10 * random.random())
self.sendUpdate('fishSpawn', [timestamp,
i,
spawnerId,
offset])
return
def handleCrabCollision(self, status):
avId = self.air.getAvatarIdFromSender()
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.handleCrabCollision: invalid avId')
return
timestamp = globalClockDelta.getFrameNetworkTime()
self.scoreTracking[avId][1] += 1
if status == 'normal' or status == 'treasure':
timestamp = globalClockDelta.getFrameNetworkTime()
self.sendUpdate('performCrabCollision', [avId, timestamp])
self.dropTreasure()
def handleFishCollision(self, spawnId, spawnerId, status):
avId = self.air.getAvatarIdFromSender()
if avId not in self.avIdList:
self.air.writeServerEvent('suspicious', avId=avId, issue='DivingGameAI.handleFishCollision: invalid avId')
return
timestamp = globalClockDelta.getFrameNetworkTime()
self.scoreTracking[avId][0] += 1
self.sendUpdate('performFishCollision', [avId,
spawnId,
spawnerId,
timestamp])
self.dropTreasure()
def dropTreasure(self):
avId = self.air.getAvatarIdFromSender()
timestamp = globalClockDelta.getFrameNetworkTime()
if avId in self.treasureHolders:
self.treasureHolders[self.treasureHolders.index(avId)] = 0
self.scoreTracking[avId][3] += 1
self.sendUpdate('setTreasureDropped', [avId, timestamp])
|
|
from pandac.PandaModules import *
from toontown.toonbase.ToontownGlobals import *
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
import random
from otp.level import DistributedLevel
from direct.directnotify import DirectNotifyGlobal
import CountryClubRoomBase, CountryClubRoom
import FactoryEntityCreator
import CountryClubRoomSpecs
from otp.level import LevelSpec, LevelConstants
from toontown.toonbase import TTLocalizer
if __dev__:
from otp.level import EditorGlobals
def getCountryClubRoomReadyPostName(doId):
return 'countryClubRoomReady-%s' % doId
class DistributedCountryClubRoom(DistributedLevel.DistributedLevel, CountryClubRoomBase.CountryClubRoomBase, CountryClubRoom.CountryClubRoom):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCountryClubRoom')
EmulateEntrancePoint = False
def __init__(self, cr):
DistributedLevel.DistributedLevel.__init__(self, cr)
CountryClubRoomBase.CountryClubRoomBase.__init__(self)
CountryClubRoom.CountryClubRoom.__init__(self)
self.suitIds = []
self.suits = []
self.reserveSuits = []
self.joiningReserves = []
self.suitsInitialized = 0
self.goonClipPlanes = {}
self.countryClub = None
return
def createEntityCreator(self):
return FactoryEntityCreator.FactoryEntityCreator(level=self)
def generate(self):
self.notify.debug('generate')
DistributedLevel.DistributedLevel.generate(self)
def delete(self):
del self.countryClub
DistributedLevel.DistributedLevel.delete(self)
CountryClubRoom.CountryClubRoom.delete(self)
self.ignoreAll()
def setCountryClubId(self, countryClubId):
self.notify.debug('countryClubId: %s' % countryClubId)
CountryClubRoomBase.CountryClubRoomBase.setCountryClubId(self, countryClubId)
def setRoomId(self, roomId):
self.notify.debug('roomId: %s' % roomId)
CountryClubRoomBase.CountryClubRoomBase.setRoomId(self, roomId)
def setRoomNum(self, num):
self.notify.debug('roomNum: %s' % num)
CountryClubRoom.CountryClubRoom.setRoomNum(self, num)
def levelAnnounceGenerate(self):
self.notify.debug('levelAnnounceGenerate')
DistributedLevel.DistributedLevel.levelAnnounceGenerate(self)
specModule = CountryClubRoomSpecs.getCountryClubRoomSpecModule(self.roomId)
roomSpec = LevelSpec.LevelSpec(specModule)
if __dev__:
typeReg = self.getCountryClubEntityTypeReg()
roomSpec.setEntityTypeReg(typeReg)
DistributedLevel.DistributedLevel.initializeLevel(self, roomSpec)
def getReadyPostName(self):
return getCountryClubRoomReadyPostName(self.doId)
def privGotSpec(self, levelSpec):
if __dev__:
if not levelSpec.hasEntityTypeReg():
typeReg = self.getCountryClubEntityTypeReg()
levelSpec.setEntityTypeReg(typeReg)
DistributedLevel.DistributedLevel.privGotSpec(self, levelSpec)
base.localAvatar.setH(-90)
CountryClubRoom.CountryClubRoom.enter(self)
self.acceptOnce('leavingCountryClub', self.announceLeaving)
bboard.post(self.getReadyPostName())
def fixupLevelModel(self):
CountryClubRoom.CountryClubRoom.setGeom(self, self.geom)
CountryClubRoom.CountryClubRoom.initFloorCollisions(self)
def setCountryClub(self, countryClub):
self.countryClub = countryClub
def setBossConfronted(self, avId):
self.countryClub.setBossConfronted(avId)
def setDefeated(self):
self.notify.info('setDefeated')
from toontown.coghq import DistributedCountryClub
messenger.send(DistributedCountryClub.DistributedCountryClub.WinEvent)
def initVisibility(self, *args, **kw):
pass
def shutdownVisibility(self, *args, **kw):
pass
def lockVisibility(self, *args, **kw):
pass
def unlockVisibility(self, *args, **kw):
pass
def enterZone(self, *args, **kw):
pass
def updateVisibility(self, *args, **kw):
pass
def setVisibility(self, *args, **kw):
pass
def resetVisibility(self, *args, **kw):
pass
def handleVisChange(self, *args, **kw):
pass
def forceSetZoneThisFrame(self, *args, **kw):
pass
def getParentTokenForEntity(self, entId):
if __dev__:
pass
return 1000000 * self.roomNum + entId
def enterLtNotPresent(self):
CountryClubRoom.CountryClubRoom.enterLtNotPresent(self)
if __dev__:
bboard.removeIfEqual(EditorGlobals.EditTargetPostName, self)
self.ignore('f2')
def enterLtPresent(self):
CountryClubRoom.CountryClubRoom.enterLtPresent(self)
if __dev__:
bboard.post(EditorGlobals.EditTargetPostName, self)
if self.countryClub is not None:
self.countryClub.currentRoomName = CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName[self.roomId]
def printPos(self = self):
thisZone = self.getZoneNode(LevelConstants.UberZoneEntId)
pos = base.localAvatar.getPos(thisZone)
h = base.localAvatar.getH(thisZone)
roomName = CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName[self.roomId]
print 'countryClub pos: %s, h: %s, room: %s' % (repr(pos), h, roomName)
if self.countryClub is not None:
floorNum = self.countryClub.floorNum
else:
floorNum = '???'
posStr = 'X: %.3f' % pos[0] + '\nY: %.3f' % pos[1] + '\nZ: %.3f' % pos[2] + '\nH: %.3f' % h + '\ncountryClubId: %s' % self.countryClubId + '\nfloor: %s' % floorNum + '\nroomId: %s' % self.roomId + '\nroomName: %s' % roomName
base.localAvatar.setChatAbsolute(posStr, CFThought | CFTimeout)
return
self.accept('f2', printPos)
return
def handleSOSPanel(self, panel):
avIds = []
for avId in self.avIdList:
if base.cr.doId2do.get(avId):
avIds.append(avId)
panel.setFactoryToonIdList(avIds)
def disable(self):
self.notify.debug('disable')
CountryClubRoom.CountryClubRoom.exit(self)
if hasattr(self, 'suits'):
del self.suits
if hasattr(self, 'relatedObjectMgrRequest') and self.relatedObjectMgrRequest:
self.cr.relatedObjectMgr.abortRequest(self.relatedObjectMgrRequest)
del self.relatedObjectMgrRequest
bboard.remove(self.getReadyPostName())
DistributedLevel.DistributedLevel.disable(self)
def setSuits(self, suitIds, reserveSuitIds):
oldSuitIds = list(self.suitIds)
self.suitIds = suitIds
self.reserveSuitIds = reserveSuitIds
def reservesJoining(self):
pass
def getCogSpec(self, cogId):
cogSpecModule = CountryClubRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.CogData[cogId]
def getReserveCogSpec(self, cogId):
cogSpecModule = CountryClubRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.ReserveCogData[cogId]
def getBattleCellSpec(self, battleCellId):
cogSpecModule = CountryClubRoomSpecs.getCogSpecModule(self.roomId)
return cogSpecModule.BattleCells[battleCellId]
def getFloorOuchLevel(self):
return 8
def getTaskZoneId(self):
return self.countryClubId
def getBossTaunt(self):
return TTLocalizer.CountryClubBossTaunt
def getBossBattleTaunt(self):
return TTLocalizer.CountryClubBossBattleTaunt
def __str__(self):
if hasattr(self, 'roomId'):
return '%s %s: %s' % (self.__class__.__name__, self.roomId, CountryClubRoomSpecs.BossbotCountryClubRoomId2RoomName[self.roomId])
else:
return 'DistributedCountryClubRoom'
def __repr__(self):
return str(self)
def forceOuch(self, penalty):
self.setOuch(penalty)
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from mock import call
from openstackclient.tests.unit.volume.v2 import fakes as volume_fakes
from openstackclient.volume.v2 import consistency_group_snapshot
class TestConsistencyGroupSnapshot(volume_fakes.TestVolume):
def setUp(self):
super(TestConsistencyGroupSnapshot, self).setUp()
# Get a shortcut to the TransferManager Mock
self.cgsnapshots_mock = (
self.app.client_manager.volume.cgsnapshots)
self.cgsnapshots_mock.reset_mock()
self.consistencygroups_mock = (
self.app.client_manager.volume.consistencygroups)
self.consistencygroups_mock.reset_mock()
class TestConsistencyGroupSnapshotCreate(TestConsistencyGroupSnapshot):
_consistency_group_snapshot = (
volume_fakes.
FakeConsistencyGroupSnapshot.
create_one_consistency_group_snapshot()
)
consistency_group = (
volume_fakes.FakeConsistencyGroup.create_one_consistency_group())
columns = (
'consistencygroup_id',
'created_at',
'description',
'id',
'name',
'status',
)
data = (
_consistency_group_snapshot.consistencygroup_id,
_consistency_group_snapshot.created_at,
_consistency_group_snapshot.description,
_consistency_group_snapshot.id,
_consistency_group_snapshot.name,
_consistency_group_snapshot.status,
)
def setUp(self):
super(TestConsistencyGroupSnapshotCreate, self).setUp()
self.cgsnapshots_mock.create.return_value = (
self._consistency_group_snapshot)
self.consistencygroups_mock.get.return_value = (
self.consistency_group)
# Get the command object to test
self.cmd = (consistency_group_snapshot.
CreateConsistencyGroupSnapshot(self.app, None))
def test_consistency_group_snapshot_create(self):
arglist = [
'--consistency-group', self.consistency_group.id,
'--description', self._consistency_group_snapshot.description,
self._consistency_group_snapshot.name,
]
verifylist = [
('consistency_group', self.consistency_group.id),
('description', self._consistency_group_snapshot.description),
('snapshot_name', self._consistency_group_snapshot.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.consistencygroups_mock.get.assert_called_once_with(
self.consistency_group.id)
self.cgsnapshots_mock.create.assert_called_once_with(
self.consistency_group.id,
name=self._consistency_group_snapshot.name,
description=self._consistency_group_snapshot.description,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
def test_consistency_group_snapshot_create_no_consistency_group(self):
arglist = [
'--description', self._consistency_group_snapshot.description,
self._consistency_group_snapshot.name,
]
verifylist = [
('description', self._consistency_group_snapshot.description),
('snapshot_name', self._consistency_group_snapshot.name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.consistencygroups_mock.get.assert_called_once_with(
self._consistency_group_snapshot.name)
self.cgsnapshots_mock.create.assert_called_once_with(
self.consistency_group.id,
name=self._consistency_group_snapshot.name,
description=self._consistency_group_snapshot.description,
)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
class TestConsistencyGroupSnapshotDelete(TestConsistencyGroupSnapshot):
consistency_group_snapshots = (
volume_fakes.FakeConsistencyGroupSnapshot.
create_consistency_group_snapshots(count=2)
)
def setUp(self):
super(TestConsistencyGroupSnapshotDelete, self).setUp()
self.cgsnapshots_mock.get = (
volume_fakes.FakeConsistencyGroupSnapshot.
get_consistency_group_snapshots(self.consistency_group_snapshots)
)
self.cgsnapshots_mock.delete.return_value = None
# Get the command object to mock
self.cmd = (consistency_group_snapshot.
DeleteConsistencyGroupSnapshot(self.app, None))
def test_consistency_group_snapshot_delete(self):
arglist = [
self.consistency_group_snapshots[0].id
]
verifylist = [
("consistency_group_snapshot",
[self.consistency_group_snapshots[0].id])
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
self.cgsnapshots_mock.delete.assert_called_once_with(
self.consistency_group_snapshots[0].id)
self.assertIsNone(result)
def test_multiple_consistency_group_snapshots_delete(self):
arglist = []
for c in self.consistency_group_snapshots:
arglist.append(c.id)
verifylist = [
('consistency_group_snapshot', arglist),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
result = self.cmd.take_action(parsed_args)
calls = []
for c in self.consistency_group_snapshots:
calls.append(call(c.id))
self.cgsnapshots_mock.delete.assert_has_calls(calls)
self.assertIsNone(result)
class TestConsistencyGroupSnapshotList(TestConsistencyGroupSnapshot):
consistency_group_snapshots = (
volume_fakes.FakeConsistencyGroupSnapshot.
create_consistency_group_snapshots(count=2)
)
consistency_group = (
volume_fakes.FakeConsistencyGroup.create_one_consistency_group()
)
columns = [
'ID',
'Status',
'Name',
]
columns_long = [
'ID',
'Status',
'ConsistencyGroup ID',
'Name',
'Description',
'Created At',
]
data = []
for c in consistency_group_snapshots:
data.append((
c.id,
c.status,
c.name,
))
data_long = []
for c in consistency_group_snapshots:
data_long.append((
c.id,
c.status,
c.consistencygroup_id,
c.name,
c.description,
c.created_at,
))
def setUp(self):
super(TestConsistencyGroupSnapshotList, self).setUp()
self.cgsnapshots_mock.list.return_value = (
self.consistency_group_snapshots)
self.consistencygroups_mock.get.return_value = self.consistency_group
# Get the command to test
self.cmd = (
consistency_group_snapshot.
ListConsistencyGroupSnapshot(self.app, None)
)
def test_consistency_group_snapshot_list_without_options(self):
arglist = []
verifylist = [
("all_projects", False),
("long", False),
("status", None),
("consistency_group", None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
search_opts = {
'all_tenants': False,
'status': None,
'consistencygroup_id': None,
}
self.cgsnapshots_mock.list.assert_called_once_with(
detailed=True, search_opts=search_opts)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
def test_consistency_group_snapshot_list_with_long(self):
arglist = [
"--long",
]
verifylist = [
("all_projects", False),
("long", True),
("status", None),
("consistency_group", None),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
search_opts = {
'all_tenants': False,
'status': None,
'consistencygroup_id': None,
}
self.cgsnapshots_mock.list.assert_called_once_with(
detailed=True, search_opts=search_opts)
self.assertEqual(self.columns_long, columns)
self.assertEqual(self.data_long, list(data))
def test_consistency_group_snapshot_list_with_options(self):
arglist = [
"--all-project",
"--status", self.consistency_group_snapshots[0].status,
"--consistency-group", self.consistency_group.id,
]
verifylist = [
("all_projects", True),
("long", False),
("status", self.consistency_group_snapshots[0].status),
("consistency_group", self.consistency_group.id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
search_opts = {
'all_tenants': True,
'status': self.consistency_group_snapshots[0].status,
'consistencygroup_id': self.consistency_group.id,
}
self.consistencygroups_mock.get.assert_called_once_with(
self.consistency_group.id)
self.cgsnapshots_mock.list.assert_called_once_with(
detailed=True, search_opts=search_opts)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, list(data))
class TestConsistencyGroupSnapshotShow(TestConsistencyGroupSnapshot):
_consistency_group_snapshot = (
volume_fakes.
FakeConsistencyGroupSnapshot.
create_one_consistency_group_snapshot()
)
columns = (
'consistencygroup_id',
'created_at',
'description',
'id',
'name',
'status',
)
data = (
_consistency_group_snapshot.consistencygroup_id,
_consistency_group_snapshot.created_at,
_consistency_group_snapshot.description,
_consistency_group_snapshot.id,
_consistency_group_snapshot.name,
_consistency_group_snapshot.status,
)
def setUp(self):
super(TestConsistencyGroupSnapshotShow, self).setUp()
self.cgsnapshots_mock.get.return_value = (
self._consistency_group_snapshot)
self.cmd = (consistency_group_snapshot.
ShowConsistencyGroupSnapshot(self.app, None))
def test_consistency_group_snapshot_show(self):
arglist = [
self._consistency_group_snapshot.id
]
verifylist = [
("consistency_group_snapshot", self._consistency_group_snapshot.id)
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
columns, data = self.cmd.take_action(parsed_args)
self.cgsnapshots_mock.get.assert_called_once_with(
self._consistency_group_snapshot.id)
self.assertEqual(self.columns, columns)
self.assertEqual(self.data, data)
|
|
# -*- coding: utf-8 -*-
from datetime import datetime
import magento
import logging
import xmlrpclib
import socket
from trytond.pool import PoolMeta, Pool
from trytond.transaction import Transaction
from trytond.pyson import Eval
from trytond.model import ModelView, ModelSQL, fields
from .api import OrderConfig
__metaclass__ = PoolMeta
__all__ = ['Channel', 'MagentoTier']
MAGENTO_STATES = {
'invisible': ~(Eval('source') == 'magento'),
'required': Eval('source') == 'magento'
}
INVISIBLE_IF_NOT_MAGENTO = {
'invisible': ~(Eval('source') == 'magento'),
}
logger = logging.getLogger('magento')
def batch(iterable, n=1):
l = len(iterable)
for ndx in range(0, l, n):
yield iterable[ndx:min(ndx + n, l)]
class Channel:
"""
Sale Channel model
"""
__name__ = 'sale.channel'
# Instance
magento_url = fields.Char(
"Magento Site URL", states=MAGENTO_STATES, depends=['source']
)
magento_api_user = fields.Char(
"API User", states=MAGENTO_STATES, depends=['source']
)
magento_api_key = fields.Char(
"API Key", states=MAGENTO_STATES, depends=['source']
)
magento_carriers = fields.One2Many(
"magento.instance.carrier", "channel", "Carriers / Shipping Methods",
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_order_prefix = fields.Char(
'Sale Order Prefix',
help="This helps to distinguish between orders from different channels",
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
# website
magento_website_id = fields.Integer(
'Website ID', readonly=True,
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_website_name = fields.Char(
'Website Name', readonly=True,
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_website_code = fields.Char(
'Website Code', readonly=True,
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_root_category_id = fields.Integer(
'Root Category ID', states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_store_name = fields.Char(
'Store Name', readonly=True, states=INVISIBLE_IF_NOT_MAGENTO,
depends=['source']
)
magento_store_id = fields.Integer(
'Store ID', readonly=True, states=INVISIBLE_IF_NOT_MAGENTO,
depends=['source']
)
#: Checking this will make sure that only the done shipments which have a
#: carrier and tracking reference are exported.
magento_export_tracking_information = fields.Boolean(
'Export tracking information', help='Checking this will make sure'
' that only the done shipments which have a carrier and tracking '
'reference are exported. This will update carrier and tracking '
'reference on magento for the exported shipments as well.',
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_taxes = fields.One2Many(
"sale.channel.magento.tax", "channel", "Taxes",
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
magento_price_tiers = fields.One2Many(
'sale.channel.magento.price_tier', 'channel', 'Default Price Tiers',
states=INVISIBLE_IF_NOT_MAGENTO, depends=['source']
)
product_listings = fields.One2Many(
'product.product.channel_listing', 'channel', 'Product Listings',
)
magento_payment_gateways = fields.One2Many(
'magento.instance.payment_gateway', 'channel', 'Payments',
)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(Channel, cls).__setup__()
cls._sql_constraints += [
(
'unique_magento_channel',
'UNIQUE(magento_url, magento_website_id, magento_store_id)',
'This store is already added'
)
]
cls._error_messages.update({
"connection_error": "Incorrect API Settings! \n"
"Please check and correct the API settings on channel.",
"multiple_channels": 'Selected operation can be done only for one'
' channel at a time',
'invalid_magento_channel':
'Current channel does not belongs to Magento !'
})
cls._buttons.update({
'import_magento_carriers': {
'invisible': Eval('source') != 'magento'
},
'configure_magento_connection': {
'invisible': Eval('source') != 'magento'
}
})
def validate_magento_channel(self):
"""
Make sure channel source is magento
"""
if self.source != 'magento':
self.raise_user_error('invalid_magento_channel')
@classmethod
def get_source(cls):
"""
Get the source
"""
res = super(Channel, cls).get_source()
res.append(('magento', 'Magento'))
return res
@staticmethod
def default_magento_order_prefix():
"""
Sets default value for magento order prefix
"""
return 'mag_'
@staticmethod
def default_magento_root_category_id():
"""
Sets default root category id. Is set to 1, because the default
root category is 1
"""
return 1
def get_taxes(self, rate):
"Return list of tax records with the given rate"
for mag_tax in self.magento_taxes:
if mag_tax.tax_percent == rate:
return list(mag_tax.taxes)
return []
def import_order_states(self):
"""
Import order states for magento channel
Downstream implementation for channel.import_order_states
"""
if self.source != 'magento':
return super(Channel, self).import_order_states()
with Transaction().set_context({'current_channel': self.id}):
# Import order states
with OrderConfig(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as order_config_api:
order_states_data = order_config_api.get_states()
for code, name in order_states_data.iteritems():
self.create_order_state(code, name)
@classmethod
@ModelView.button_action('magento.wizard_configure_magento')
def configure_magento_connection(cls, channels):
"""
Configure magento connection for current channel
:param channels: List of active records of channels
"""
pass
def test_magento_connection(self):
"""
Test magento connection and display appropriate message to user
:param channels: Active record list of magento channels
"""
# Make sure channel belongs to magento
self.validate_magento_channel()
try:
with magento.API(
self.magento_url, self.magento_api_user,
self.magento_api_key
):
return
except (
xmlrpclib.Fault, IOError, xmlrpclib.ProtocolError, socket.timeout
):
self.raise_user_error("connection_error")
@classmethod
@ModelView.button_action('magento.wizard_import_magento_carriers')
def import_magento_carriers(cls, channels):
"""
Import carriers/shipping methods from magento for channels
:param channels: Active record list of magento channels
"""
InstanceCarrier = Pool().get('magento.instance.carrier')
for channel in channels:
channel.validate_magento_channel()
with Transaction().set_context({'current_channel': channel.id}):
with OrderConfig(
channel.magento_url, channel.magento_api_user,
channel.magento_api_key
) as order_config_api:
mag_carriers = order_config_api.get_shipping_methods()
InstanceCarrier.create_all_using_magento_data(mag_carriers)
@classmethod
def get_current_magento_channel(cls):
"""Helper method to get the current magento_channel.
"""
channel = cls.get_current_channel()
# Make sure channel belongs to magento
channel.validate_magento_channel()
return channel
def import_products(self):
"""
Import products for this magento channel
Downstream implementation for channel.import_products
"""
if self.source != 'magento':
return super(Channel, self).import_products()
self.import_category_tree()
with Transaction().set_context({'current_channel': self.id}):
with magento.Product(
self.magento_url, self.magento_api_user, self.magento_api_key
) as product_api:
# TODO: Implement pagination and import each product as async
# task
magento_products = product_api.list()
products = []
for magento_product in magento_products:
products.append(self.import_product(magento_product['sku']))
return products
def import_product(self, sku, product_data=None):
"""
Import specific product for this magento channel
Downstream implementation for channel.import_product
"""
Product = Pool().get('product.product')
Listing = Pool().get('product.product.channel_listing')
if self.source != 'magento':
return super(Channel, self).import_product(sku, product_data)
if not sku:
# SKU is required can not continue
return
# Sanitize SKU
sku = sku.strip()
products = Product.search([
('code', '=', sku),
])
listings = Listing.search([
('product.code', '=', sku),
('channel', '=', self)
])
if not products or not listings:
# Either way we need the product data from magento. Make that
# dreaded API call.
with magento.Product(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as product_api:
product_data = product_api.info(sku, identifierType="sku")
# XXX: sanitize product_data, sometimes product sku may
# contain trailing spaces
product_data['sku'] = product_data['sku'].strip()
# Create a product since there is no match for an existing
# product with the SKU.
if not products:
product = Product.create_from(self, product_data)
else:
product, = products
if not listings:
Listing.create_from(self, product_data)
else:
product = products[0]
return product
def import_category_tree(self):
"""
Imports the category tree and creates categories in a hierarchy same as
that on Magento
:param website: Active record of website
"""
Category = Pool().get('product.category')
self.validate_magento_channel()
with Transaction().set_context({'current_channel': self.id}):
with magento.Category(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as category_api:
category_tree = category_api.tree(
self.magento_root_category_id
)
Category.create_tree_using_magento_data(category_tree)
def import_orders(self):
"""
Downstream implementation of channel.import_orders
:return: List of active record of sale imported
"""
if self.source != 'magento':
return super(Channel, self).import_orders()
new_sales = []
with Transaction().set_context({'current_channel': self.id}):
order_states = self.get_order_states_to_import()
order_states_to_import_in = map(
lambda state: state.code, order_states
)
with magento.Order(
self.magento_url, self.magento_api_user, self.magento_api_key
) as order_api:
# Filter orders store_id using list()
# then get info of each order using info()
# and call find_or_create_using_magento_data on sale
filter = {
'store_id': {'=': self.magento_store_id},
'state': {'in': order_states_to_import_in},
}
self.write([self], {
'last_order_import_time': datetime.utcnow()
})
page = 1
has_next = True
orders_summaries = []
while has_next:
# XXX: Pagination is only available in
# magento extension >= 1.6.1
api_res = order_api.search(
filters=filter, limit=3000, page=page
)
has_next = api_res['hasNext']
page += 1
orders_summaries.extend(api_res['items'])
for order_summary in orders_summaries:
new_sales.append(self.import_order(order_summary))
return new_sales
def import_order(self, order_info):
"Downstream implementation to import sale order from magento"
if self.source != 'magento':
return super(Channel, self).import_order(order_info)
Sale = Pool().get('sale.sale')
sale = Sale.find_using_magento_data(order_info)
if sale:
return sale
with Transaction().set_context({'current_channel': self.id}):
with magento.Order(
self.magento_url, self.magento_api_user, self.magento_api_key
) as order_api:
order_data = order_api.info(order_info['increment_id'])
return Sale.create_using_magento_data(order_data)
@classmethod
def export_order_status_to_magento_using_cron(cls):
"""
Export sales orders status to magento using cron
:param store_views: List of active record of store view
"""
channels = cls.search([('source', '=', 'magento')])
for channel in channels:
channel.export_order_status()
def export_order_status(self):
"""
Export sale order status to magento for the current store view.
If last export time is defined, export only those orders which are
updated after last export time.
:return: List of active records of sales exported
"""
Sale = Pool().get('sale.sale')
if self.source != 'magento':
return super(Channel, self).export_order_status()
exported_sales = []
domain = [('channel', '=', self.id)]
if self.last_order_export_time:
domain = [
('write_date', '>=', self.last_order_export_time)
]
sales = Sale.search(domain)
self.last_order_export_time = datetime.utcnow()
self.save()
for sale in sales:
exported_sales.append(sale.export_order_status_to_magento())
return exported_sales
@classmethod
def export_shipment_status_to_magento_using_cron(cls):
"""
Export Shipment status for shipments using cron
"""
channels = cls.search([('source', '=', 'magento')])
for channel in channels:
channel.export_shipment_status_to_magento()
def export_shipment_status_to_magento(self):
"""
Exports shipment status for shipments to magento, if they are shipped
:return: List of active record of shipment
"""
Shipment = Pool().get('stock.shipment.out')
Sale = Pool().get('sale.sale')
SaleLine = Pool().get('sale.line')
self.validate_magento_channel()
sale_domain = [
('channel', '=', self.id),
('shipment_state', '=', 'sent'),
('magento_id', '!=', None),
('shipments', '!=', None),
]
if self.last_shipment_export_time:
sale_domain.append(
('write_date', '>=', self.last_shipment_export_time)
)
sales = Sale.search(sale_domain)
self.last_shipment_export_time = datetime.utcnow()
self.save()
updated_sales = set([])
for sale in sales:
# Get the increment id from the sale reference
increment_id = sale.reference[
len(self.magento_order_prefix): len(sale.reference)
]
for shipment in sale.shipments:
try:
# Some checks to make sure that only valid shipments are
# being exported
if shipment.is_tracking_exported_to_magento or \
shipment.state != 'done' or \
shipment.magento_increment_id:
continue
updated_sales.add(sale)
with magento.Shipment(
self.magento_url, self.magento_api_user,
self.magento_api_key
) as shipment_api:
item_qty_map = {}
for move in shipment.outgoing_moves:
if isinstance(move.origin, SaleLine) \
and move.origin.magento_id:
# This is done because there can be multiple
# lines with the same product and they need
# to be send as a sum of quanitities
item_qty_map.setdefault(
str(move.origin.magento_id), 0
)
item_qty_map[str(move.origin.magento_id)] += \
move.quantity
shipment_increment_id = shipment_api.create(
order_increment_id=increment_id,
items_qty=item_qty_map
)
Shipment.write(list(sale.shipments), {
'magento_increment_id': shipment_increment_id,
})
if self.magento_export_tracking_information and (
hasattr(shipment, 'tracking_number') and
hasattr(shipment, 'carrier') and
shipment.tracking_number and shipment.carrier
):
with Transaction().set_context(
current_channel=self.id):
shipment.export_tracking_info_to_magento()
except xmlrpclib.Fault, fault:
if fault.faultCode == 102:
# A shipment already exists for this order,
# we cannot do anything about it.
# Maybe it was already exported earlier or was created
# separately on magento
# Hence, just continue
continue
return updated_sales
def export_product_prices(self):
"""
Exports tier prices of products from tryton to magento for this channel
:return: List of products
"""
if self.source != 'magento':
return super(Channel, self).export_product_prices()
ChannelListing = Pool().get('product.product.channel_listing')
price_domain = [
('channel', '=', self.id),
]
if self.last_product_price_export_time:
price_domain.append([
'OR', [(
'product.write_date', '>=',
self.last_product_price_export_time
)], [(
'product.template.write_date', '>=',
self.last_product_price_export_time
)]
])
product_listings = ChannelListing.search(price_domain)
self.last_product_price_export_time = datetime.utcnow()
self.save()
for listing in product_listings:
# Get the price tiers from the product listing if the list has
# price tiers else get the default price tiers from current
# channel
price_tiers = listing.price_tiers or self.magento_price_tiers
price_data = []
for tier in price_tiers:
if hasattr(tier, 'product_listing'):
# The price tier comes from a product listing, then it has a
# function field for price, we use it directly
price = tier.price
else:
# The price tier comes from the default tiers on
# channel,
# we dont have a product on tier, so we use the current
# product in loop for computing the price for this tier
price = self.price_list.compute(
None, listing.product, listing.product.list_price,
tier.quantity, self.default_uom
)
price_data.append({
'qty': tier.quantity,
'price': float(price),
})
# Update stock information to magento
with magento.ProductTierPrice(
self.magento_url, self.magento_api_user, self.magento_api_key
) as tier_price_api:
tier_price_api.update(
listing.product_identifier, price_data,
identifierType="productID"
)
return len(product_listings)
def get_default_tryton_action(self, code, name):
"""
Returns tryton order state for magento state
:param name: Name of the magento state
:return: A dictionary of tryton state and shipment and invoice methods
"""
if self.source != 'magento':
return super(Channel, self).get_default_tryton_action(code, name)
if code in ('new', 'holded'):
return {
'action': 'process_manually',
'invoice_method': 'order',
'shipment_method': 'order'
}
elif code in ('pending_payment', 'payment_review'):
return {
'action': 'import_as_past',
'invoice_method': 'order',
'shipment_method': 'invoice'
}
elif code in ('closed', 'complete'):
return {
'action': 'import_as_past',
'invoice_method': 'order',
'shipment_method': 'order'
}
elif code == 'processing':
return {
'action': 'process_automatically',
'invoice_method': 'order',
'shipment_method': 'order'
}
else:
return {
'action': 'do_not_import',
'invoice_method': 'manual',
'shipment_method': 'manual'
}
def update_order_status(self):
"Downstream implementation of order_status update"
Sale = Pool().get('sale.sale')
if self.source != 'magento':
return super(Channel, self).update_order_status()
sales = Sale.search([
('channel', '=', self.id),
('state', 'in', ('confirmed', 'processing')),
])
order_ids = [sale.reference for sale in sales]
for order_ids_batch in batch(order_ids, 50):
with magento.Order(
self.magento_url, self.magento_api_user, self.magento_api_key
) as order_api:
orders_data = order_api.info_multi(order_ids_batch)
for i, order_data in enumerate(orders_data):
if order_data.get('isFault'):
if order_data['faultCode'] == '100':
# 100: Requested order not exists.
# TODO: Remove order from channel or add some
# exception.
pass
logger.warning("Order %s: %s %s" % (
order_ids_batch[i], order_data['faultCode'],
order_data['faultMessage']
))
continue
sale, = Sale.search([
('reference', '=', order_data['increment_id'])
])
sale.update_order_status_from_magento(order_data=order_data)
class MagentoTier(ModelSQL, ModelView):
"""Price Tiers for store
This model stores the default price tiers to be used while sending
tier prices for a product from Tryton to Magento.
The product also has a similar table like this. If there are no entries in
the table on product, then these tiers are used.
"""
__name__ = 'sale.channel.magento.price_tier'
channel = fields.Many2One(
'sale.channel', 'Magento Store', required=True, readonly=True,
domain=[('source', '=', 'magento')]
)
quantity = fields.Float('Quantity', required=True)
@classmethod
def __setup__(cls):
"""
Setup the class before adding to pool
"""
super(MagentoTier, cls).__setup__()
cls._sql_constraints += [
(
'channel_quantity_unique', 'UNIQUE(channel, quantity)',
'Quantity in price tiers must be unique for a channel'
)
]
|
|
#!/usr/bin/env python
"""Helper functionality for gui testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import atexit
import binascii
import functools
import logging
import os
import threading
import time
from absl import flags
from future.builtins import range
from future.moves.urllib import parse as urlparse
import portpicker
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.common import action_chains
from selenium.webdriver.common import keys
from selenium.webdriver.support import select
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import crypto as rdf_crypto
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import compatibility
from grr_response_proto import tests_pb2
from grr_response_server import data_store
from grr_response_server import flow_base
from grr_response_server import foreman_rules
from grr_response_server import output_plugin
from grr_response_server.databases import db
from grr_response_server.flows.general import processes
from grr_response_server.flows.general import transfer
from grr_response_server.gui import api_auth_manager
from grr_response_server.gui import api_call_router_with_approval_checks
from grr_response_server.gui import webauth
from grr_response_server.gui import wsgiapp_testlib
from grr_response_server.gui.api_plugins import user as api_user
from grr_response_server.rdfvalues import flow_runner as rdf_flow_runner
from grr_response_server.rdfvalues import objects as rdf_objects
from grr.test_lib import acl_test_lib
from grr.test_lib import action_mocks
from grr.test_lib import artifact_test_lib as ar_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
from grr.test_lib import vfs_test_lib
flags.DEFINE_string(
"chrome_driver_path", None,
"Path to the chrome driver binary. If not set, webdriver "
"will search on PATH for the binary.")
flags.DEFINE_string(
"chrome_binary_path", None,
"Path to the Chrome binary. If not set, webdriver will search for "
"Chrome on PATH.")
flags.DEFINE_bool(
"use_headless_chrome", False, "If set, run Chrome driver in "
"headless mode. Useful when running tests in a window-manager-less "
"environment.")
flags.DEFINE_bool(
"disable_chrome_sandboxing", False,
"Whether to disable chrome sandboxing (e.g when running in a Docker "
"container).")
# A increasing sequence of times.
TIME_0 = test_lib.FIXED_TIME
TIME_1 = TIME_0 + rdfvalue.Duration.From(1, rdfvalue.DAYS)
TIME_2 = TIME_1 + rdfvalue.Duration.From(1, rdfvalue.DAYS)
def DateString(t):
return t.Format("%Y-%m-%d")
def DateTimeString(t):
return t.Format("%Y-%m-%d %H:%M:%S")
def CreateFileVersions(client_id):
"""Add new versions for a file."""
content_1 = b"Hello World"
content_2 = b"Goodbye World"
# This file already exists in the fixture at TIME_0, we write a
# later version.
CreateFileVersion(
client_id, "fs/os/c/Downloads/a.txt", content_1, timestamp=TIME_1)
CreateFileVersion(
client_id, "fs/os/c/Downloads/a.txt", content_2, timestamp=TIME_2)
return (content_1, content_2)
def CreateFileVersion(client_id, path, content=b"", timestamp=None):
"""Add a new version for a file."""
if timestamp is None:
timestamp = rdfvalue.RDFDatetime.Now()
with test_lib.FakeTime(timestamp):
path_type, components = rdf_objects.ParseCategorizedPath(path)
client_path = db.ClientPath(client_id, path_type, components)
vfs_test_lib.CreateFile(client_path, content=content)
def CreateFolder(client_id, path, timestamp):
"""Creates a VFS folder."""
with test_lib.FakeTime(timestamp):
path_type, components = rdf_objects.ParseCategorizedPath(path)
path_info = rdf_objects.PathInfo()
path_info.path_type = path_type
path_info.components = components
path_info.directory = True
data_store.REL_DB.WritePathInfos(client_id, [path_info])
def SeleniumAction(f):
"""Decorator to do multiple attempts in case of WebDriverException."""
@functools.wraps(f)
def Decorator(*args, **kwargs):
delay = 0.2
num_attempts = 15
cur_attempt = 0
while True:
try:
return f(*args, **kwargs)
except exceptions.WebDriverException as e:
logging.warning("Selenium raised %s", utils.SmartUnicode(e))
cur_attempt += 1
if cur_attempt == num_attempts:
raise
time.sleep(delay)
return Decorator
class DisabledHttpErrorChecksContextManager(object):
"""Context manager to be returned by test's DisabledHttpErrorChecks call."""
def __init__(self, test):
self.test = test
def __enter__(self):
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
self.test.ignore_http_errors = False
self.test.driver.execute_script("window.grrInterceptedHTTPErrors_ = []")
class GRRSeleniumTest(test_lib.GRRBaseTest, acl_test_lib.AclTestMixin):
"""Baseclass for selenium UI tests."""
# Default duration (in seconds) for WaitUntil.
duration = 5
# Time to wait between polls for WaitUntil.
sleep_time = 0.2
# This is the global selenium handle.
driver = None
# Base url of the Admin UI
base_url = None
@staticmethod
def _TearDownSelenium():
"""Tear down Selenium session."""
try:
if GRRSeleniumTest.driver:
GRRSeleniumTest.driver.quit()
except Exception as e: # pylint: disable=broad-except
logging.exception(e)
@staticmethod
def _SetUpSelenium(port):
"""Set up Selenium session."""
atexit.register(GRRSeleniumTest._TearDownSelenium)
GRRSeleniumTest.base_url = ("http://localhost:%s" % port)
# pylint: disable=unreachable
os.environ.pop("http_proxy", None)
options = webdriver.ChromeOptions()
if flags.FLAGS.chrome_binary_path:
options.binary_location = flags.FLAGS.chrome_binary_path
options.add_argument("--disable-notifications")
if flags.FLAGS.use_headless_chrome:
options.add_argument("--headless")
options.add_argument("--window-size=1400,1080")
if flags.FLAGS.disable_chrome_sandboxing:
options.add_argument("--no-sandbox")
if flags.FLAGS.chrome_driver_path:
GRRSeleniumTest.driver = webdriver.Chrome(
flags.FLAGS.chrome_driver_path, chrome_options=options)
else:
GRRSeleniumTest.driver = webdriver.Chrome(chrome_options=options)
# TODO(user): Hack! This is needed to allow downloads in headless mode.
# Remove this code when upstream Python ChromeDriver implementation has
# send_command implemented.
#
# See
# https://stackoverflow.com/questions/45631715/downloading-with-chrome-headless-and-selenium
# and the code in setUp().
# pylint: disable=protected-access
GRRSeleniumTest.driver.command_executor._commands["send_command"] = (
"POST", "/session/$sessionId/chromium/send_command")
# pylint: enable=protected-access
# pylint: enable=unreachable
_selenium_set_up_lock = threading.RLock()
_selenium_set_up_done = False
@classmethod
def setUpClass(cls):
super(GRRSeleniumTest, cls).setUpClass()
with GRRSeleniumTest._selenium_set_up_lock:
if not GRRSeleniumTest._selenium_set_up_done:
port = portpicker.pick_unused_port()
logging.info("Picked free AdminUI port %d.", port)
# Start up a server in another thread
GRRSeleniumTest._server_trd = wsgiapp_testlib.ServerThread(
port, name="SeleniumServerThread")
GRRSeleniumTest._server_trd.StartAndWaitUntilServing()
GRRSeleniumTest._SetUpSelenium(port)
GRRSeleniumTest._selenium_set_up_done = True
def InstallACLChecks(self):
"""Installs AccessControlManager and stubs out SendEmail."""
acrwac = api_call_router_with_approval_checks
# Clear the cache of the approvals-based router.
acrwac.ApiCallRouterWithApprovalChecks.ClearCache()
name = compatibility.GetName(acrwac.ApiCallRouterWithApprovalChecks)
config_overrider = test_lib.ConfigOverrider({"API.DefaultRouter": name})
config_overrider.Start()
self.addCleanup(config_overrider.Stop)
# Make sure ApiAuthManager is initialized with this configuration setting.
api_auth_manager.InitializeApiAuthManager()
def _CheckJavascriptErrors(self):
errors = self.driver.execute_script(
"return (() => {const e = window.grrInterceptedJSErrors_ || []; "
"window.grrInterceptedJSErrors_ = []; return e;})();")
msgs = []
for e in errors:
msg = "[javascript]: %s" % e
logging.error(msg)
msgs.append(msg)
if msgs:
self.fail("Javascript error encountered during test: %s" %
"\n\t".join(msgs))
def DisableHttpErrorChecks(self):
self.ignore_http_errors = True
return DisabledHttpErrorChecksContextManager(self)
def GetHttpErrors(self):
return self.driver.execute_script(
"return (() => {const e = window.grrInterceptedHTTPErrors_ || []; "
"window.grrInterceptedHTTPErrors_ = []; return e;})();")
def _CheckHttpErrors(self):
if self.ignore_http_errors:
return
msgs = []
for e in self.GetHttpErrors():
msg = "[http]: {!r}".format(e)
logging.error(msg)
msgs.append(msg)
if msgs:
self.fail("HTTP request failed during test: %s" % "\n\t".join(msgs))
def CheckBrowserErrors(self):
self._CheckJavascriptErrors()
self._CheckHttpErrors()
def WaitUntil(self, condition_cb, *args):
self.CheckBrowserErrors()
for _ in range(int(self.duration / self.sleep_time)):
try:
res = condition_cb(*args)
if res:
return res
# Raise in case of a test-related error (i.e. failing assertion).
except self.failureException:
raise
# The element might not exist yet and selenium could raise here. (Also
# Selenium raises Exception not StandardError).
except Exception as e: # pylint: disable=broad-except
logging.warning("Selenium raised %s", utils.SmartUnicode(e))
self.CheckBrowserErrors()
time.sleep(self.sleep_time)
self.fail(
"condition %s %s not met, body is: %s" %
(condition_cb, args, self.driver.find_element_by_tag_name("body").text))
def _FindElements(self, selector):
selector_type, effective_selector = selector.split("=", 1)
if selector_type != "css":
raise ValueError(
"Only CSS selector is supported for querying multiple elements.")
elems = self.driver.execute_script(
"return $(\"" + effective_selector.replace("\"", "\\\"") + "\");")
return [e for e in elems if e.is_displayed()]
def _FindElement(self, selector):
try:
selector_type, effective_selector = selector.split("=", 1)
except ValueError:
effective_selector = selector
selector_type = None
if selector_type == "css":
elems = self.driver.execute_script(
"return $(\"" + effective_selector.replace("\"", "\\\"") + "\");")
elems = [e for e in elems if e.is_displayed()]
if not elems:
raise exceptions.NoSuchElementException()
else:
return elems[0]
elif selector_type == "link":
links = self.driver.find_elements_by_partial_link_text(effective_selector)
for l in links:
if l.text.strip() == effective_selector:
return l
raise exceptions.NoSuchElementException()
elif selector_type == "xpath":
return self.driver.find_element_by_xpath(effective_selector)
elif selector_type == "id":
return self.driver.find_element_by_id(effective_selector)
elif selector_type == "name":
return self.driver.find_element_by_name(effective_selector)
elif selector_type is None:
if effective_selector.startswith("//"):
return self.driver.find_element_by_xpath(effective_selector)
else:
return self.driver.find_element_by_id(effective_selector)
else:
raise ValueError("unknown selector type %s" % selector_type)
@SeleniumAction
def Open(self, url):
# In GRR Selenium tests calling Open() implies page refresh.
# We make sure that browser/webdriver is not confused by the fact that
# only the fragment part of the URL (after the '#' symbol) changes.
# It's important to not confuse WebDriver since it tends to get stuck
# when confused.
self.driver.get("data:.")
self.driver.get(self.base_url + url)
@SeleniumAction
def Refresh(self):
self.driver.refresh()
@SeleniumAction
def Back(self):
self.driver.back()
@SeleniumAction
def Forward(self):
self.driver.forward()
def WaitUntilNot(self, condition_cb, *args):
self.WaitUntil(lambda: not condition_cb(*args))
def GetPageTitle(self):
return self.driver.title
def IsElementPresent(self, target):
try:
self._FindElement(target)
return True
except exceptions.NoSuchElementException:
return False
def GetCurrentUrlPath(self):
url = urlparse.urlparse(self.driver.current_url)
result = url.path
if url.fragment:
result += "#" + url.fragment
return result
def GetElement(self, target):
try:
return self._FindElement(target)
except exceptions.NoSuchElementException:
return None
def GetVisibleElement(self, target):
try:
element = self._FindElement(target)
if element.is_displayed():
return element
except exceptions.NoSuchElementException:
pass
return None
def IsTextPresent(self, text):
return self.AllTextsPresent([text])
def AllTextsPresent(self, texts):
body = self.driver.find_element_by_tag_name("body").text
for text in texts:
if utils.SmartUnicode(text) not in body:
return False
return True
def IsVisible(self, target):
element = self.GetElement(target)
return element and element.is_displayed()
def GetText(self, target):
element = self.WaitUntil(self.GetVisibleElement, target)
return element.text.strip()
def GetValue(self, target):
return self.GetAttribute(target, "value")
def GetAttribute(self, target, attribute):
element = self.WaitUntil(self.GetVisibleElement, target)
return element.get_attribute(attribute)
def IsUserNotificationPresent(self, contains_string):
self.Click("css=#notification_button")
self.WaitUntil(self.IsElementPresent, "css=grr-user-notification-dialog")
self.WaitUntilNot(
self.IsElementPresent,
"css=grr-user-notification-dialog:contains('Loading...')")
notifications_text = self.GetText("css=grr-user-notification-dialog")
self.Click("css=grr-user-notification-dialog button:contains('Close')")
return contains_string in notifications_text
def GetJavaScriptValue(self, js_expression):
return self.driver.execute_script(js_expression)
def _WaitForAjaxCompleted(self):
self.WaitUntilEqual(
[], self.GetJavaScriptValue,
"return (window.$ && $('body') && $('body').injector && "
"$('body').injector().get('$http').pendingRequests) || []")
@SeleniumAction
def Type(self, target, text, end_with_enter=False):
element = self.WaitUntil(self.GetVisibleElement, target)
element.clear()
element.send_keys(text)
if end_with_enter:
element.send_keys(keys.Keys.ENTER)
# We experienced that Selenium sometimes swallows the last character of the
# text sent. Raising an exception here will just retry in that case.
if not end_with_enter:
if text != self.GetValue(target):
raise exceptions.WebDriverException("Send_keys did not work correctly.")
@SeleniumAction
def Click(self, target):
# Selenium clicks elements by obtaining their position and then issuing a
# click action in the middle of this area. This may lead to misclicks when
# elements are moving. Make sure that they are stationary before issuing
# the click action (specifically, using the bootstrap "fade" class that
# slides dialogs in is highly discouraged in combination with .Click()).
# Since Selenium does not know when the page is ready after AJAX calls, we
# need to wait for AJAX completion here to be sure that all event handlers
# are attached to their respective DOM elements.
self._WaitForAjaxCompleted()
element = self.WaitUntil(self.GetVisibleElement, target)
element.click()
@SeleniumAction
def MoveMouseTo(self, target):
self._WaitForAjaxCompleted()
element = self.WaitUntil(self.GetVisibleElement, target)
action_chains.ActionChains(self.driver).move_to_element(element).perform()
@SeleniumAction
def DoubleClick(self, target):
# Selenium clicks elements by obtaining their position and then issuing a
# click action in the middle of this area. This may lead to misclicks when
# elements are moving. Make sure that they are stationary before issuing
# the click action (specifically, using the bootstrap "fade" class that
# slides dialogs in is highly discouraged in combination with
# .DoubleClick()).
# Since Selenium does not know when the page is ready after AJAX calls, we
# need to wait for AJAX completion here to be sure that all event handlers
# are attached to their respective DOM elements.
self._WaitForAjaxCompleted()
element = self.WaitUntil(self.GetVisibleElement, target)
action_chains.ActionChains(self.driver).double_click(element).perform()
@SeleniumAction
def Select(self, target, label):
element = self.WaitUntil(self.GetVisibleElement, target)
select.Select(element).select_by_visible_text(label)
def GetSelectedLabel(self, target):
element = self.WaitUntil(self.GetVisibleElement, target)
return select.Select(element).first_selected_option.text.strip()
def IsChecked(self, target):
return self.WaitUntil(self.GetVisibleElement, target).is_selected()
def GetCssCount(self, target):
if not target.startswith("css="):
raise ValueError("invalid target for GetCssCount: " + target)
return len(self._FindElements(target))
def WaitUntilEqual(self, target, condition_cb, *args):
condition_value = None
for _ in range(int(self.duration / self.sleep_time)):
try:
condition_value = condition_cb(*args)
if condition_value == target:
return True
# Raise in case of a test-related error (i.e. failing assertion).
except self.failureException:
raise
# The element might not exist yet and selenium could raise here. (Also
# Selenium raises Exception not StandardError).
except Exception as e: # pylint: disable=broad-except
logging.warning("Selenium raised %s", utils.SmartUnicode(e))
time.sleep(self.sleep_time)
self.fail("condition %s(%s) not met (expected=%s, got_last_time=%s)" %
(condition_cb, args, target, condition_value))
def WaitUntilContains(self, target, condition_cb, *args):
data = ""
target = utils.SmartUnicode(target)
for _ in range(int(self.duration / self.sleep_time)):
try:
data = condition_cb(*args)
if target in data:
return True
# Raise in case of a test-related error (i.e. failing assertion).
except self.failureException:
raise
# The element might not exist yet and selenium could raise here.
except Exception as e: # pylint: disable=broad-except
logging.warning("Selenium raised %s", utils.SmartUnicode(e))
time.sleep(self.sleep_time)
self.fail("condition not met. got: %r, does not contain: %s" %
(data, target))
def setUp(self):
super(GRRSeleniumTest, self).setUp()
# Used by CheckHttpErrors
self.ignore_http_errors = False
self.token.username = u"gui_user"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
# Make the user use the advanced gui so we can test it.
data_store.REL_DB.WriteGRRUser(
self.token.username, ui_mode=api_user.GUISettings.UIMode.ADVANCED)
artifact_patcher = ar_test_lib.PatchDatastoreOnlyArtifactRegistry()
artifact_patcher.start()
self.addCleanup(artifact_patcher.stop)
self.InstallACLChecks()
if flags.FLAGS.use_headless_chrome:
params = {
"cmd": "Page.setDownloadBehavior",
"params": {
"behavior": "allow",
"downloadPath": self.temp_dir
}
}
result = self.driver.execute("send_command", params)
if result["status"] != 0:
raise RuntimeError("can't set Page.setDownloadBehavior: %s" % result)
def tearDown(self):
self.CheckBrowserErrors()
super(GRRSeleniumTest, self).tearDown()
def WaitForNotification(self, username):
sleep_time = 0.2
iterations = 50
for _ in range(iterations):
try:
pending_notifications = data_store.REL_DB.ReadUserNotifications(
username, state=rdf_objects.UserNotification.State.STATE_PENDING)
if pending_notifications:
return
except IOError:
pass
time.sleep(sleep_time)
self.fail("Notification for user %s never sent." % username)
class GRRSeleniumHuntTest(hunt_test_lib.StandardHuntTestMixin, GRRSeleniumTest):
"""Common functionality for hunt gui tests."""
def _CreateForemanClientRuleSet(self):
return foreman_rules.ForemanClientRuleSet(rules=[
foreman_rules.ForemanClientRule(
rule_type=foreman_rules.ForemanClientRule.Type.REGEX,
regex=foreman_rules.ForemanRegexClientRule(
field="CLIENT_NAME", attribute_regex="GRR"))
])
def _CreateHuntWithDownloadedFile(self):
hunt = self.CreateSampleHunt(
path=os.path.join(self.base_path, "test.plist"), client_count=1)
self.RunHunt(
client_ids=self.client_ids,
client_mock=action_mocks.FileFinderClientMock())
return hunt
def CheckState(self, state):
self.WaitUntil(self.IsElementPresent, "css=div[state=\"%s\"]" % state)
def CreateSampleHunt(self,
path=None,
stopped=False,
output_plugins=None,
client_limit=0,
client_count=10,
creator=None):
self.client_ids = self.SetupClients(client_count)
self.hunt_urn = self.StartHunt(
flow_runner_args=rdf_flow_runner.FlowRunnerArgs(
flow_name=compatibility.GetName(transfer.GetFile)),
flow_args=transfer.GetFileArgs(
pathspec=rdf_paths.PathSpec(
path=path or "/tmp/evil.txt",
pathtype=rdf_paths.PathSpec.PathType.OS,
)),
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=output_plugins or [],
client_rate=0,
client_limit=client_limit,
creator=creator or self.token.username,
paused=stopped)
return self.hunt_urn
def CreateGenericHuntWithCollection(self, values=None):
self.client_ids = self.SetupClients(1)
CreateFileVersion(self.client_ids[0], "fs/os/c/bin/bash")
if values is None:
values = [
rdfvalue.RDFURN("aff4:/sample/1"),
rdfvalue.RDFURN("aff4:/%s/fs/os/c/bin/bash" % self.client_ids[0]),
rdfvalue.RDFURN("aff4:/sample/3")
]
hunt_urn = self.StartHunt(
client_rule_set=self._CreateForemanClientRuleSet(),
output_plugins=[],
creator=self.token.username)
self.AddResultsToHunt(hunt_urn, self.client_ids[0], values)
return hunt_urn, self.client_ids[0]
class SearchClientTestBase(hunt_test_lib.StandardHuntTestMixin,
GRRSeleniumTest):
def CreateSampleHunt(self, description, creator=None):
return self.StartHunt(description=description, paused=True, creator=creator)
class RecursiveTestFlowArgs(rdf_structs.RDFProtoStruct):
protobuf = tests_pb2.RecursiveTestFlowArgs
class RecursiveTestFlow(flow_base.FlowBase):
"""A test flow which starts some subflows."""
args_type = RecursiveTestFlowArgs
# If a flow doesn't have a category, it can't be started/terminated by a
# non-supervisor user when FullAccessControlManager is used.
category = "/Test/"
def Start(self):
if self.args.depth < 2:
for i in range(2):
self.Log("Subflow call %d", i)
self.CallFlow(
compatibility.GetName(self.__class__),
depth=self.args.depth + 1,
next_state="End")
class FlowWithOneLogStatement(flow_base.FlowBase):
"""Flow that logs a single statement."""
def Start(self):
self.Log("I do log.")
class FlowWithOneStatEntryResult(flow_base.FlowBase):
"""Test flow that calls SendReply once with a StatEntry value."""
def Start(self):
self.SendReply(
rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
path="/some/unique/path",
pathtype=rdf_paths.PathSpec.PathType.OS)))
class FlowWithOneNetworkConnectionResult(flow_base.FlowBase):
"""Test flow that calls SendReply once with a NetworkConnection value."""
def Start(self):
self.SendReply(rdf_client_network.NetworkConnection(pid=42))
class FlowWithOneHashEntryResult(flow_base.FlowBase):
"""Test flow that calls SendReply once with a HashEntry value."""
def Start(self):
hash_result = rdf_crypto.Hash(
sha256=binascii.unhexlify(
"9e8dc93e150021bb4752029ebbff51394aa36f069cf19901578e4f06017acdb5"),
sha1=binascii.unhexlify("6dd6bee591dfcb6d75eb705405302c3eab65e21a"),
md5=binascii.unhexlify("8b0a15eefe63fd41f8dc9dee01c5cf9a"))
self.SendReply(hash_result)
class DummyOutputPlugin(output_plugin.OutputPlugin):
"""Output plugin that does nothing."""
name = "dummy"
description = "Dummy do do."
args_type = processes.ListProcessesArgs
def ProcessResponses(self, state, responses):
pass
|
|
"""
Dummy client runner
This module implements a stand-alone launcher for stress-testing
an Evennia game. It will launch any number of fake clients. These
clients will log into the server and start doing random operations.
Customizing and weighing these operations differently depends on
which type of game is tested. The module contains a testing module
for plain Evennia.
Please note that you shouldn't run this on a production server!
Launch the program without any arguments or options to see a
full step-by-step setup help.
Basically (for testing default Evennia):
- Use an empty/testing database.
- set PERMISSION_PLAYER_DEFAULT = "Builders"
- start server, eventually with profiling active
- launch this client runner
If you want to customize the runner's client actions
(because you changed the cmdset or needs to better
match your use cases or add more actions), you can
change which actions by adding a path to
DUMMYRUNNER_ACTIONS_MODULE = <path.to.your.module>
in your settings. See utils.dummyrunner_actions.py
for instructions on how to define this module.
"""
import os, sys, time, random
from optparse import OptionParser
from twisted.conch import telnet
from twisted.internet import reactor, protocol
# from twisted.application import internet, service
# from twisted.web import client
from twisted.internet.task import LoopingCall
# Tack on the root evennia directory to the python path and initialize django settings
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
os.environ["DJANGO_SETTINGS_MODULE"] = "game.settings"
#from game import settings
#try:
# from django.conf import settings as settings2
# settings2.configure()
#except RuntimeError:
# pass
#finally:
# del settings2
from django.conf import settings
from src.utils import utils
HELPTEXT = """
Usage: dummyrunner.py [-h][-v][-V] [nclients]
DO NOT RUN THIS ON A PRODUCTION SERVER! USE A CLEAN/TESTING DATABASE!
This stand-alone program launches dummy telnet clients against a
running Evennia server. The idea is to mimic real players logging in
and repeatedly doing resource-heavy commands so as to stress test the
game. It uses the default command set to log in and issue commands, so
if that was customized, some of the functionality will not be tested
(it will not fail, the commands will just not be recognized). The
running clients will create new objects and rooms all over the place
as part of their running, so using a clean/testing database is
strongly recommended.
Setup:
1) setup a fresh/clean database (if using sqlite, just safe-copy
away your real evennia.db3 file and create a new one with
manage.py)
2) in game/settings.py, add
PERMISSION_PLAYER_DEFAULT="Builders"
3a) Start Evennia like normal.
3b) If you want profiling, start Evennia like this instead:
python runner.py -S start
this will start Evennia under cProfiler with output server.prof.
4) run this dummy runner:
python dummyclients.py <nr_of_clients> [timestep] [port]
Default is to connect one client to port 4000, using a 5 second
timestep. Increase the number of clients and shorten the
timestep (minimum is 1s) to further stress the game.
You can stop the dummy runner with Ctrl-C.
5) Log on and determine if game remains responsive despite the
heavier load. Note that if you do profiling, there is an
additional overhead from the profiler too!
6) If you use profiling, let the game run long enough to gather
data, then stop the server. You can inspect the server.prof file
from a python prompt (see Python's manual on cProfiler).
"""
# number of clients to launch if no input is given on command line
DEFAULT_NCLIENTS = 1
# time between each 'tick', in seconds, if not set on command
# line. All launched clients will be called upon to possibly do an
# action with this frequency.
DEFAULT_TIMESTEP = 2
# chance of a client performing an action, per timestep. This helps to
# spread out usage randomly, like it would be in reality.
CHANCE_OF_ACTION = 0.05
# Port to use, if not specified on command line
DEFAULT_PORT = settings.TELNET_PORTS[0]
#------------------------------------------------------------
# Helper functions
#------------------------------------------------------------
def idcounter():
"generates subsequent id numbers"
idcount = 0
while True:
idcount += 1
yield idcount
OID = idcounter()
CID = idcounter()
def makeiter(obj):
"makes everything iterable"
if not hasattr(obj, '__iter__'):
return [obj]
return obj
#------------------------------------------------------------
# Client classes
#------------------------------------------------------------
class DummyClient(telnet.StatefulTelnetProtocol):
"""
Handles connection to a running Evennia server,
mimicking a real player by sending commands on
a timer.
"""
def connectionMade(self):
# public properties
self.cid = CID.next()
self.istep = 0
self.exits = [] # exit names created
self.objs = [] # obj names created
self._report = ""
self._cmdlist = [] # already stepping in a cmd definition
self._ncmds = 0
self._actions = self.factory.actions
self._echo_brief = self.factory.verbose == 1
self._echo_all = self.factory.verbose == 2
#print " ** client %i connected." % self.cid
reactor.addSystemEventTrigger('before', 'shutdown', self.logout)
# start client tick
d = LoopingCall(self.step)
d.start(self.factory.timestep, now=True).addErrback(self.error)
def dataReceived(self, data):
"Echo incoming data to stdout"
if self._echo_all:
print data
def connectionLost(self, reason):
"loosing the connection"
#print " ** client %i lost connection." % self.cid
def error(self, err):
"error callback"
print err
def counter(self):
"produces a unique id, also between clients"
return OID.next()
def logout(self):
"Causes the client to log out of the server. Triggered by ctrl-c signal."
cmd, report = self._actions[1](self)
print "client %i %s (%s actions)" % (self.cid, report, self.istep)
self.sendLine(cmd)
def step(self):
"""
Perform a step. This is called repeatedly by the runner
and causes the client to issue commands to the server.
This holds all "intelligence" of the dummy client.
"""
if random.random() > CHANCE_OF_ACTION:
return
if not self._cmdlist:
# no cmdlist in store, get a new one
if self.istep == 0:
cfunc = self._actions[0]
else: # random selection using cumulative probabilities
rand = random.random()
cfunc = [func for cprob, func in self._actions[2] if cprob >= rand][0]
# assign to internal cmdlist
cmd, self._report = cfunc(self)
self._cmdlist = list(makeiter(cmd))
self._ncmds = len(self._cmdlist)
# output
if self.istep == 0 and not (self._echo_brief or self._echo_all):
print "client %i %s" % (self.cid, self._report)
elif self.istep == 0 or self._echo_brief or self._echo_all:
print "client %i %s (%i/%i)" % (self.cid, self._report, self._ncmds-(len(self._cmdlist)-1), self._ncmds)
# launch the action by popping the first element from cmdlist (don't hide tracebacks)
self.sendLine(str(self._cmdlist.pop(0)))
self.istep += 1 # only steps up if an action is taken
class DummyFactory(protocol.ClientFactory):
protocol = DummyClient
def __init__(self, actions, timestep, verbose):
"Setup the factory base (shared by all clients)"
self.actions = actions
self.timestep = timestep
self.verbose = verbose
#------------------------------------------------------------
# Access method:
# Starts clients and connects them to a running server.
#------------------------------------------------------------
def start_all_dummy_clients(actions, nclients=1, timestep=5, telnet_port=4000, verbose=0):
# validating and preparing the action tuple
# make sure the probabilities add up to 1
pratio = 1.0 / sum(tup[0] for tup in actions[2:])
flogin, flogout, probs, cfuncs = actions[0], actions[1], [tup[0] * pratio for tup in actions[2:]], [tup[1] for tup in actions[2:]]
# create cumulative probabilies for the random actions
cprobs = [sum(v for i,v in enumerate(probs) if i<=k) for k in range(len(probs))]
# rebuild a new, optimized action structure
actions = (flogin, flogout, zip(cprobs, cfuncs))
# setting up all clients (they are automatically started)
factory = DummyFactory(actions, timestep, verbose)
for i in range(nclients):
reactor.connectTCP("localhost", telnet_port, factory)
# start reactor
reactor.run()
#------------------------------------------------------------
# Command line interface
#------------------------------------------------------------
if __name__ == '__main__':
# parsing command line with default vals
parser = OptionParser(usage="%prog [options] <nclients> [timestep, [port]]",
description="This program requires some preparations to run properly. Start it without any arguments or options for full help.")
parser.add_option('-v', '--verbose', action='store_const', const=1, dest='verbose',
default=0,help="echo brief description of what clients do every timestep.")
parser.add_option('-V', '--very-verbose', action='store_const',const=2, dest='verbose',
default=0,help="echo all client returns to stdout (hint: use only with nclients=1!)")
options, args = parser.parse_args()
nargs = len(args)
nclients = DEFAULT_NCLIENTS
timestep = DEFAULT_TIMESTEP
port = DEFAULT_PORT
try:
if not args : raise Exception
if nargs > 0: nclients = max(1, int(args[0]))
if nargs > 1: timestep = max(1, int(args[1]))
if nargs > 2: port = int(args[2])
except Exception:
print HELPTEXT
sys.exit()
# import the ACTION tuple from a given module
try:
action_modpath = settings.DUMMYRUNNER_ACTIONS_MODULE
except AttributeError:
# use default
action_modpath = "src.utils.dummyrunner.dummyrunner_actions"
actions = utils.variable_from_module(action_modpath, "ACTIONS")
print "Connecting %i dummy client(s) to port %i using a %i second timestep ... " % (nclients, port, timestep)
t0 = time.time()
start_all_dummy_clients(actions, nclients, timestep, port,
verbose=options.verbose)
ttot = time.time() - t0
print "... dummy client runner finished after %i seconds." % ttot
|
|
########
# Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
import os
import mock
import json
import tempfile
import unittest
from cloudify_rest_client.cluster import ClusterState, ClusterNode
from cloudify_rest_client.exceptions import NotClusterMaster
from ... import env
from .test_base import CliCommandTest
from ...exceptions import CloudifyCliError
from ...commands.cluster import (_wait_for_cluster_initialized,
pass_cluster_client)
from ...execution_events_fetcher import WAIT_FOR_EXECUTION_SLEEP_INTERVAL
class WaitForClusterTest(unittest.TestCase):
def test_polls_until_done(self):
"""The CLI stops polling when the cluster is initialized."""
client = mock.Mock()
# prepare a mock "cluster.status()" method that will return
# initialized = False on the first 4 calls, and initialized = True
# on the 5th call
client.cluster.status = mock.Mock(
side_effect=[ClusterState({'initialized': False})] * 4 +
[ClusterState({'initialized': True})])
with mock.patch('cloudify_cli.commands.cluster.time') as mock_time:
mock_time.time.return_value = 0
status = _wait_for_cluster_initialized(client)
self.assertEqual(5, len(client.cluster.status.mock_calls))
self.assertTrue(status['initialized'])
def test_stops_at_timeout(self):
"""If the cluster is never started, polling stops at timeout."""
timeout = 900
clock = {'time': 1000}
client = mock.Mock()
client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': False}))
def _mock_sleep(n):
clock['time'] += n
def _mock_time():
return clock['time']
with mock.patch('cloudify_cli.commands.cluster.time') as mock_time:
mock_time.sleep = mock.Mock(side_effect=_mock_sleep)
mock_time.time = _mock_time
with self.assertRaises(CloudifyCliError) as cm:
_wait_for_cluster_initialized(client, timeout=timeout)
self.assertIn('timed out', cm.exception.message.lower())
# there should be (timeout//interval) time.sleep(interval) calls,
# so the total time waited is equal to timeout
self.assertEqual(timeout // WAIT_FOR_EXECUTION_SLEEP_INTERVAL,
len(mock_time.sleep.mock_calls))
def test_passes_log_cursor(self):
# prepare mock status responses containing logs. The first status
# contains logs that end at cursor=1, so the next call needs to provide
# since='1'. The 2nd status has cursor=2 and 3, so the next call needs
# to be since='3'. The next call returns no logs at all, so since stays
# '3'.
status_responses = [
ClusterState({
'initialized': False,
'logs': [{'timestamp': 1, 'message': 'a', 'cursor': '1'}]
}),
ClusterState({
'initialized': False,
'logs': [{'timestamp': 1, 'message': 'a', 'cursor': '2'},
{'timestamp': 1, 'message': 'a', 'cursor': '3'}]
}),
ClusterState({'initialized': False}),
ClusterState({
'initialized': True,
'logs': [{'timestamp': 1, 'message': 'a', 'cursor': '4'}]
})
]
client = mock.Mock()
client.cluster.status = mock.Mock(side_effect=status_responses)
with mock.patch('cloudify_cli.commands.cluster.time') as mock_time:
mock_time.time.return_value = 1000
_wait_for_cluster_initialized(client, logger=mock.Mock())
self.assertEqual(4, len(client.cluster.status.mock_calls))
since_passed = [kw['since'] for _, _, kw in
client.cluster.status.mock_calls]
self.assertEqual([None, '1', '3', '3'], since_passed)
class ClusterStartTest(CliCommandTest):
def setUp(self):
super(ClusterStartTest, self).setUp()
self.use_manager()
def test_already_in_cluster(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': True}))
self.invoke('cfy cluster start --cluster-host-ip 1.2.3.4',
'already part of a Cloudify Manager cluster')
def test_start_success(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
ClusterState({'initialized': True}),
])
self.client.cluster.start = mock.Mock()
outcome = self.invoke('cfy cluster start --cluster-host-ip 1.2.3.4')
self.assertIn('cluster started', outcome.logs)
def test_start_success_with_logs(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
ClusterState({
'initialized': False,
'logs': [{'timestamp': 1, 'message': 'one log message',
'cursor': '1'}]
}),
ClusterState({'initialized': True}),
])
self.client.cluster.start = mock.Mock()
with mock.patch('cloudify_cli.commands.cluster.time') as mock_time:
mock_time.time.return_value = 1000
outcome = self.invoke(
'cfy cluster start --cluster-host-ip 1.2.3.4')
self.assertIn('cluster started', outcome.logs)
self.assertIn('one log message', outcome.logs)
def test_start_error(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
ClusterState({'error': 'some error happened'}),
])
self.client.cluster.start = mock.Mock()
self.invoke('cfy cluster start --cluster-host-ip 1.2.3.4',
'some error happened')
def test_profile_updated(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
ClusterState({'initialized': True}),
])
self.client.cluster.start = mock.Mock()
outcome = self.invoke('cfy cluster start --cluster-host-ip 1.2.3.4')
self.assertIn('cluster started', outcome.logs)
self.assertEqual(1, len(env.profile.cluster))
self.assertEqual(env.profile.manager_ip,
env.profile.cluster[0]['manager_ip'])
class ClusterNodesTest(CliCommandTest):
def setUp(self):
super(ClusterNodesTest, self).setUp()
self.use_manager()
def test_list_nodes(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': True}))
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'name': 'node name 1', 'host_ip': '1.2.3.4'})
])
outcome = self.invoke('cfy cluster nodes list')
self.assertIn('node name 1', outcome.output)
def test_list_not_initialized(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': False}))
self.invoke('cfy cluster nodes list',
'not part of a Cloudify Manager cluster')
def test_set_node_cert(self):
env.profile.cluster = [{'name': 'm1', 'manager_ip': '1.2.3.4'}]
with tempfile.NamedTemporaryFile() as f:
self.invoke('cfy cluster nodes set-certificate m1 {0}'
.format(f.name))
def test_set_node_cert_doesnt_exist(self):
env.profile.cluster = [{'name': 'm1', 'manager_ip': '1.2.3.4'}]
self.invoke('cfy cluster nodes set-certificate m1 /tmp/not-a-file',
'does not exist')
def test_set_node_cert_no_such_node(self):
env.profile.cluster = [{'name': 'm1', 'manager_ip': '1.2.3.4'}]
with tempfile.NamedTemporaryFile() as f:
self.invoke('cfy cluster nodes set-certificate not-a-node {0}'
.format(f.name),
'not found in the cluster profile')
def test_get_node(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': True}))
self.client.cluster.nodes.details = mock.Mock(return_value={
'id': 'm1',
'options': {
'option1': 'value1'
}
})
outcome = self.invoke('cluster nodes get m1')
self.assertIn('value1', outcome.output)
def test_get_node_json(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': True}))
self.client.cluster.nodes.details = mock.Mock(return_value={
'id': 'm1',
'options': {
'option1': 'value1'
}
})
outcome = self.invoke('cluster nodes get m1 --json')
parsed = json.loads(outcome.output)
self.assertEqual(parsed['options'], {'option1': 'value1'})
class ClusterJoinTest(CliCommandTest):
def setUp(self):
super(ClusterJoinTest, self).setUp()
self.use_manager()
self.master_profile = env.ProfileContext()
self.master_profile.manager_ip = 'master_profile'
self.master_profile.cluster = [{'manager_ip': '1.2.3.4'}]
self.master_profile.save()
def test_join_success(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
NotClusterMaster('not cluster master')
])
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'host_ip': '10.10.1.10', 'online': True})
])
self.client.cluster.nodes.add = mock.Mock(return_value=ClusterNode({
'credentials': 'abc'
}))
self.client.cluster.join = mock.Mock()
outcome = self.invoke('cfy cluster join {0}'
.format(self.master_profile.manager_ip))
self.assertIn('joined cluster', outcome.logs)
def test_join_profile_updated(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
NotClusterMaster('not cluster master')
])
self.client.cluster.nodes.add = mock.Mock(return_value=ClusterNode({
'credentials': 'abc'
}))
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'host_ip': '10.10.1.10', 'online': True})
])
self.client.cluster.join = mock.Mock()
outcome = self.invoke('cfy cluster join {0}'
.format(self.master_profile.manager_ip))
self.assertIn('joined cluster', outcome.logs)
master_profile = env.get_profile_context('master_profile')
self.assertEqual(2, len(master_profile.cluster))
self.assertEqual(env.profile.manager_ip,
master_profile.cluster[1]['manager_ip'])
def test_join_origin_profile_updated(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
NotClusterMaster('not cluster master')
])
self.client.cluster.nodes.add = mock.Mock(return_value=ClusterNode({
'credentials': 'abc'
}))
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write('cert or key here\n')
self.addCleanup(os.unlink, f.name)
self.master_profile.cluster[0]['ssh_key'] = f.name
self.master_profile.save()
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'host_ip': '10.10.1.10', 'online': True})
])
self.client.cluster.join = mock.Mock()
outcome = self.invoke('cfy cluster join {0}'
.format(self.master_profile.manager_ip))
self.assertIn('joined cluster', outcome.logs)
self.assertEqual(2, len(env.profile.cluster))
joined_node = env.profile.cluster[1]
self.assertEqual('10.10.1.10', joined_node['manager_ip'])
master_node = env.profile.cluster[0]
self.assertIn('ssh_key', master_node)
# check that the master's ssh key was copied to the local profile's
# workdir
self.assertTrue(master_node['ssh_key'].startswith(env.profile.workdir))
def test_join_duplicate_name(self):
self.client.cluster.status = mock.Mock(side_effect=[
ClusterState({'initialized': False}),
ClusterState({}),
])
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'host_ip': '10.10.1.10', 'online': True, 'name': 'n'})
])
self.client.cluster.join = mock.Mock()
self.invoke('cfy cluster join {0} --cluster-node-name n'
.format(self.master_profile.manager_ip),
'is already a member of the cluster')
class UpdateProfileTest(CliCommandTest):
def setUp(self):
super(UpdateProfileTest, self).setUp()
self.use_manager()
env.profile.cluster = [{'manager_ip': env.profile.manager_ip,
'name': 'master'}]
env.profile.save()
def test_nodes_added_to_profile(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': True}))
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'name': 'master',
'host_ip': env.profile.manager_ip}),
ClusterNode({'name': 'node name 1', 'host_ip': '1.2.3.4'}),
ClusterNode({'name': 'node name 2', 'host_ip': '5.6.7.8'})
])
self.client.cluster.join = mock.Mock()
outcome = self.invoke('cfy cluster update-profile')
self.assertIn('Adding cluster node 1.2.3.4', outcome.logs)
self.assertIn('Adding cluster node 5.6.7.8', outcome.logs)
self.assertEqual(env.profile.cluster, [
{'manager_ip': env.profile.manager_ip, 'name': 'master'},
{'manager_ip': '1.2.3.4', 'name': 'node name 1'},
{'manager_ip': '5.6.7.8', 'name': 'node name 2'}
])
def test_nodes_removed_from_profile(self):
self.client.cluster.status = mock.Mock(
return_value=ClusterState({'initialized': True}))
self.client.cluster.nodes.list = mock.Mock(return_value=[
ClusterNode({'name': 'node name 1', 'host_ip': '1.2.3.4'}),
ClusterNode({'name': 'node name 2', 'host_ip': '5.6.7.8'})
])
self.client.cluster.join = mock.Mock()
self.invoke('cfy cluster update-profile')
self.assertEqual(env.profile.cluster, [
{'manager_ip': '1.2.3.4', 'name': 'node name 1'},
{'manager_ip': '5.6.7.8', 'name': 'node name 2'}
])
def test_set_node_cert(self):
env.profile.cluster.append({'manager_ip': '1.2.3.4', 'name': 'node2'})
env.profile.save()
with tempfile.NamedTemporaryFile() as f:
self.invoke('cfy cluster nodes set-certificate {0} {1}'
.format('master', f.name))
with tempfile.NamedTemporaryFile() as f2:
self.invoke('cfy cluster nodes set-certificate {0} {1}'
.format('node2', f2.name))
self.assertEqual(env.profile.cluster[0]['cert'], f.name)
self.assertEqual(env.profile.cluster[1]['cert'], f2.name)
class PassClusterClientTest(unittest.TestCase):
def test_pass_cluster_client_not_initialized(self):
@pass_cluster_client()
def _f(client):
pass
mock_client = mock.Mock()
mock_client.cluster.status.return_value = \
ClusterState({'initialized': False})
with mock.patch('cloudify_cli.env.get_rest_client',
return_value=mock_client):
with self.assertRaises(CloudifyCliError) as cm:
_f()
mock_client.cluster.status.assert_any_call()
self.assertIn('not part of a Cloudify Manager cluster',
str(cm.exception))
def test_pass_cluster_client_initialized(self):
@pass_cluster_client()
def _f(client):
pass
mock_client = mock.Mock()
mock_client.cluster.status.return_value = \
ClusterState({'initialized': True})
with mock.patch('cloudify_cli.env.get_rest_client',
return_value=mock_client):
_f()
mock_client.cluster.status.assert_any_call()
|
|
"""
Copyright 2015 Basho Technologies, Inc.
This file is provided to you under the Apache License,
Version 2.0 (the "License"); you may not use this file
except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import re
from six import PY2
from riak import RiakError
from riak.util import lazy_property, bytes_to_str
if PY2:
from urllib import quote_plus, urlencode
else:
from urllib.parse import quote_plus, urlencode
class RiakHttpResources(object):
"""
Methods for RiakHttpTransport related to URL generation, i.e.
creating the proper paths.
"""
def ping_path(self):
return mkpath(self.riak_kv_wm_ping)
def stats_path(self):
return mkpath(self.riak_kv_wm_stats)
def mapred_path(self, **options):
return mkpath(self.riak_kv_wm_mapred, **options)
def bucket_list_path(self, bucket_type=None, **options):
query = {'buckets': True}
query.update(options)
if self.riak_kv_wm_bucket_type and bucket_type:
return mkpath("/types", quote_plus(bucket_type),
"buckets", **query)
elif self.riak_kv_wm_buckets:
return mkpath("/buckets", **query)
else:
return mkpath(self.riak_kv_wm_raw, **query)
def bucket_properties_path(self, bucket, bucket_type=None, **options):
if self.riak_kv_wm_bucket_type and bucket_type:
return mkpath("/types", quote_plus(bucket_type), "buckets",
quote_plus(bucket), "props", **options)
elif self.riak_kv_wm_buckets:
return mkpath("/buckets", quote_plus(bucket),
"props", **options)
else:
query = options.copy()
query.update(props=True, keys=False)
return mkpath(self.riak_kv_wm_raw, quote_plus(bucket), **query)
def bucket_type_properties_path(self, bucket_type, **options):
return mkpath("/types", quote_plus(bucket_type), "props",
**options)
def key_list_path(self, bucket, bucket_type=None, **options):
query = {'keys': True, 'props': False}
query.update(options)
if self.riak_kv_wm_bucket_type and bucket_type:
return mkpath("/types", quote_plus(bucket_type), "buckets",
quote_plus(bucket), "keys", **query)
if self.riak_kv_wm_buckets:
return mkpath("/buckets", quote_plus(bucket), "keys",
**query)
else:
return mkpath(self.riak_kv_wm_raw, quote_plus(bucket), **query)
def object_path(self, bucket, key=None, bucket_type=None, **options):
if key:
key = quote_plus(key)
if self.riak_kv_wm_bucket_type and bucket_type:
return mkpath("/types", quote_plus(bucket_type), "buckets",
quote_plus(bucket), "keys", key, **options)
elif self.riak_kv_wm_buckets:
return mkpath("/buckets", quote_plus(bucket), "keys",
key, **options)
else:
return mkpath(self.riak_kv_wm_raw, quote_plus(bucket), key,
**options)
def index_path(self, bucket, index, start, finish=None, bucket_type=None,
**options):
if not self.riak_kv_wm_buckets:
raise RiakError("Indexes are unsupported by this Riak node")
if finish is not None:
finish = quote_plus(str(finish))
if self.riak_kv_wm_bucket_type and bucket_type:
return mkpath("/types", quote_plus(bucket_type),
"buckets", quote_plus(bucket),
"index", quote_plus(index), quote_plus(str(start)),
finish, **options)
else:
return mkpath("/buckets", quote_plus(bucket),
"index", quote_plus(index), quote_plus(str(start)),
finish, **options)
def search_index_path(self, index=None, **options):
"""
Builds a Yokozuna search index URL.
:param index: optional name of a yz index
:type index: string
:param options: optional list of additional arguments
:type index: dict
:rtype URL string
"""
if not self.yz_wm_index:
raise RiakError("Yokozuna search is unsupported by this Riak node")
if index:
quote_plus(index)
return mkpath(self.yz_wm_index, "index", index, **options)
def search_schema_path(self, index, **options):
"""
Builds a Yokozuna search Solr schema URL.
:param index: a name of a yz solr schema
:type index: string
:param options: optional list of additional arguments
:type index: dict
:rtype URL string
"""
if not self.yz_wm_schema:
raise RiakError("Yokozuna search is unsupported by this Riak node")
return mkpath(self.yz_wm_schema, "schema", quote_plus(index),
**options)
def solr_select_path(self, index, query, **options):
if not self.riak_solr_searcher_wm and not self.yz_wm_search:
raise RiakError("Search is unsupported by this Riak node")
qs = {'q': query, 'wt': 'json', 'fl': '*,score'}
qs.update(options)
if index:
index = quote_plus(index)
return mkpath("/solr", index, "select", **qs)
def solr_update_path(self, index):
if not self.riak_solr_searcher_wm:
raise RiakError("Riak Search 1 is unsupported by this Riak node")
if index:
index = quote_plus(index)
return mkpath(self.riak_solr_indexer_wm, index, "update")
def counters_path(self, bucket, key, **options):
if not self.riak_kv_wm_counter:
raise RiakError("Counters are unsupported by this Riak node")
return mkpath(self.riak_kv_wm_buckets, quote_plus(bucket), "counters",
quote_plus(key), **options)
def datatypes_path(self, bucket_type, bucket, key=None, **options):
if not self.bucket_types():
raise RiakError("Datatypes are unsupported by this Riak node")
if key:
key = quote_plus(key)
return mkpath("/types", quote_plus(bucket_type), "buckets",
quote_plus(bucket), "datatypes", key, **options)
# Feature detection overrides
def bucket_types(self):
return self.riak_kv_wm_bucket_type is not None
def index_term_regex(self):
if self.riak_kv_wm_bucket_type is not None:
return True
else:
return super(RiakHttpResources, self).index_term_regex()
# Resource root paths
@lazy_property
def riak_kv_wm_bucket_type(self):
if 'riak_kv_wm_bucket_type' in self.resources:
return "/types"
@lazy_property
def riak_kv_wm_buckets(self):
if 'riak_kv_wm_buckets' in self.resources:
return "/buckets"
@lazy_property
def riak_kv_wm_raw(self):
return self.resources.get('riak_kv_wm_raw') or "/riak"
@lazy_property
def riak_kv_wm_link_walker(self):
return self.resources.get('riak_kv_wm_linkwalker') or "/riak"
@lazy_property
def riak_kv_wm_mapred(self):
return self.resources.get('riak_kv_wm_mapred') or "/mapred"
@lazy_property
def riak_kv_wm_ping(self):
return self.resources.get('riak_kv_wm_ping') or "/ping"
@lazy_property
def riak_kv_wm_stats(self):
return self.resources.get('riak_kv_wm_stats') or "/stats"
@lazy_property
def riak_solr_searcher_wm(self):
return self.resources.get('riak_solr_searcher_wm')
@lazy_property
def riak_solr_indexer_wm(self):
return self.resources.get('riak_solr_indexer_wm')
@lazy_property
def riak_kv_wm_counter(self):
return self.resources.get('riak_kv_wm_counter')
@lazy_property
def yz_wm_search(self):
return self.resources.get('yz_wm_search')
@lazy_property
def yz_wm_extract(self):
return self.resources.get('yz_wm_extract')
@lazy_property
def yz_wm_schema(self):
return self.resources.get('yz_wm_schema')
@lazy_property
def yz_wm_index(self):
return self.resources.get('yz_wm_index')
@lazy_property
def resources(self):
return self.get_resources()
def mkpath(*segments, **query):
"""
Constructs the path & query portion of a URI from path segments
and a dict.
"""
# Remove empty segments (e.g. no key specified)
segments = [bytes_to_str(s) for s in segments if s is not None]
# Join the segments into a path
pathstring = '/'.join(segments)
# Remove extra slashes
pathstring = re.sub('/+', '/', pathstring)
# Add the query string if it exists
_query = {}
for key in query:
if query[key] in [False, True]:
_query[key] = str(query[key]).lower()
elif query[key] is not None:
if PY2 and isinstance(query[key], unicode):
_query[key] = query[key].encode('utf-8')
else:
_query[key] = query[key]
if len(_query) > 0:
pathstring += "?" + urlencode(_query)
if not pathstring.startswith('/'):
pathstring = '/' + pathstring
return pathstring
|
|
# -*- coding: utf-8 -*-
import sys
__all__ = ["ConflictLogBase", "ConflictLogXlsx",
"ConflictLogExcel"]
HAVE_EXCEL_API = False
HAVE_XLSX_WRITER = False
if sys.platform == "win32":
try:
from win32com.client import gencache
from win32com.client import DispatchWithEvents
HAVE_EXCEL_API = True
except ImportError:
pass
elif sys.platform == "linux":
try:
from pywpsrpc.rpcetapi import createEtRpcInstance, etapi
HAVE_EXCEL_API = True
except ImportError:
pass
if not HAVE_EXCEL_API:
try:
import openpyxl
HAVE_XLSX_WRITER = True
except ImportError:
pass
class MergeInfo:
def __init__(self, local, remote, author):
self.local = local
self.remote = remote
self.author = author
class ConflictLogBase:
def __init__(self):
# the starting row to log
self._curRow = 6
def addFile(self, file):
return False
def addCommit(self, commit):
return False
def save(self):
pass
def setResolveMethod(self, file, desc):
pass
def setMergeInfo(self, info):
pass
class ConflictLogXlsx(ConflictLogBase):
def __init__(self, logFile):
super().__init__()
self.logFile = logFile
self.book = openpyxl.load_workbook(logFile)
self.sheet = self.book.active
self._curFile = None
def addFile(self, path):
self._curFile = path
return True
def addCommit(self, commit):
if self._curFile:
self._curRow += 1
cell = "A%s" % self._curRow
self.sheet[cell] = self._curFile
self._curFile = None
msg = '{} ("{}", {}, {})'.format(
commit["sha1"],
commit["subject"],
commit["author"],
commit["date"])
cell = "{}{}".format("B" if commit["branchA"] else "C", self._curRow)
self.sheet[cell].alignment = openpyxl.styles.Alignment(
wrap_text=True, vertical="center")
text = self.sheet[cell].value
if text:
text += "\r\n" + msg
else:
text = msg
self.sheet[cell].value = text
return True
def save(self):
self.book.save(self.logFile)
def setResolveMethod(self, file, desc):
self._curRow += 1
cell = "A%s" % self._curRow
self.sheet[cell] = file
cell = "D%s" % self._curRow
self.sheet[cell] = desc
def setMergeInfo(self, info):
self.sheet["B1"] = info.local
self.sheet["B2"] = info.remote
self.sheet["B3"] = info.author
class WorkbookEvents:
def OnBeforeClose(self, cancel):
return True
class ConflictLogExcel(ConflictLogBase):
def __init__(self, logFile):
super().__init__()
self._curFile = None
self._isWin = sys.platform == "win32"
self.app = None
self.book = None
self.sheet = None
self.logFile = logFile
self._rpc = None
self._mergeInfo = None
self._ensureExcel()
def _ensureExcel(self):
if not HAVE_EXCEL_API or self.sheet:
return
try:
if self._isWin:
if not self.app:
self.app = gencache.EnsureDispatch("Excel.Application")
self.app.Visible = True
self.book = DispatchWithEvents(
self.app.Workbooks.Open(self.logFile),
WorkbookEvents)
else:
if not self._rpc:
_, self._rpc = createEtRpcInstance()
if not self.app:
_, self.app = self._rpc.getEtApplication()
self.app.Visible = True
_, self.book = self.app.Workbooks.Open(self.logFile)
self._rpc.registerEvent(self.app,
etapi.DIID_AppEvents,
"WorkbookBeforeClose",
self._onWorkbookBeforeClose)
self.sheet = self.book.Sheets[1]
if self._mergeInfo is not None:
self._setMergeInfo(self._mergeInfo)
self._mergeInfo = None
except Exception:
pass
def addFile(self, file):
self._curFile = file
return True
def addCommit(self, commit):
self._ensureExcel()
if not self.sheet:
return False
if self._curFile:
self._curRow += 1
self._setCellValue("A%s" % self._curRow, self._curFile)
self._curFile = None
msg = '{} ("{}", {}, {})'.format(
commit["sha1"],
commit["subject"],
commit["author"],
commit["date"])
cell = "{}{}".format("B" if commit["branchA"] else "C", self._curRow)
if not self._setCellValue(cell, msg, True):
return False
self.book.Save()
return True
def setResolveMethod(self, file, desc):
self._curRow += 1
self._setCellValue("A%s" % self._curRow, file)
self._setCellValue("D%s" % self._curRow, desc)
def setMergeInfo(self, info):
# delay for buggy wps on Linux
if not self.book:
self._mergeInfo = info
else:
self._setMergeInfo(info)
def _setMergeInfo(self, info):
self._setCellValue("B1", info.local)
self._setCellValue("B2", info.remote)
self._setCellValue("B3", info.author)
def _setCellValue(self, cell, value, append=False):
rg = self.sheet.Range(cell)
rg.WrapText = True
text = rg.Value
if text and append:
text += "\r\n" + value
else:
text = value
rg.Value = text
return True
def _onWorkbookBeforeClose(self, wookbook):
# not allow close the doc
return wookbook == self.book
|
|
"""
Basic uniform mesh refinement functions.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.discrete.fem import Mesh
from six.moves import range
def refine_2_3(mesh_in):
"""
Refines mesh out of triangles by cutting cutting each edge in half
and making 4 new finer triangles out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_3')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 3)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[3], c[5],
c[3], c[4], c[5],
c[1], c[4], c[3],
c[2], c[5], c[4]]).T
new_conn = new_conn.reshape((4 * n_el, 3))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_2_4(mesh_in):
"""
Refines mesh out of quadrilaterals by cutting cutting each edge in
half and making 4 new finer quadrilaterals out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('2_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 4)) + o1
nodes = nm.arange(n_el) + o2
c = nm.c_[conn, e_nodes, nodes].T
new_conn = nm.vstack([c[0], c[4], c[8], c[7],
c[1], c[5], c[8], c[4],
c[2], c[6], c[8], c[5],
c[3], c[7], c[8], c[6]]).T
new_conn = new_conn.reshape((4 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(4)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_4(mesh_in):
"""
Refines tetrahedra by cutting each edge in half and making 8 new
finer tetrahedra out of one coarser one. Old nodal coordinates come
first in `coors`, then the new ones. The new tetrahedra are similar
to the old one, no degeneration is supposed to occur as at most 3
congruence classes of tetrahedra appear, even when re-applied
iteratively (provided that `conns` are not modified between two
applications - ordering of vertices in tetrahedra matters not only
for positivity of volumes).
References:
- Juergen Bey: Simplicial grid refinement: on Freudenthal s algorithm and
the optimal number of congruence classes, Numer.Math. 85 (2000),
no. 1, 1--29, or
- Juergen Bey: Tetrahedral grid refinement, Computing 55 (1995),
no. 4, 355--378, or
http://citeseer.ist.psu.edu/bey95tetrahedral.html
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres]
o1 = mesh_in.n_nod
cc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
conn = mesh_in.get_conn('3_4')
n_el = conn.shape[0]
e_nodes = cc.indices.reshape((n_el, 6)) + o1
c = nm.c_[conn, e_nodes].T
new_conn = nm.vstack([c[0], c[4], c[6], c[7],
c[4], c[1], c[5], c[8],
c[6], c[5], c[2], c[9],
c[7], c[8], c[9], c[3],
c[4], c[6], c[7], c[8],
c[4], c[6], c[8], c[5],
c[6], c[7], c[8], c[9],
c[6], c[5], c[9], c[8]]).T
new_conn = new_conn.reshape((8 * n_el, 4))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_3_8(mesh_in):
"""
Refines hexahedral mesh by cutting cutting each edge in half and
making 8 new finer hexahedrons out of one coarser one.
"""
cmesh = mesh_in.cmesh
# Unique edge centres.
e_centres = cmesh.get_centroids(cmesh.dim - 2)
# Unique face centres.
f_centres = cmesh.get_centroids(cmesh.dim - 1)
# Unique element centres.
centres = cmesh.get_centroids(cmesh.dim)
# New coordinates after the original ones.
coors = nm.r_[mesh_in.coors, e_centres, f_centres, centres]
o1 = mesh_in.n_nod
o2 = o1 + e_centres.shape[0]
o3 = o2 + f_centres.shape[0]
ecc = cmesh.get_conn(cmesh.dim, cmesh.dim - 2)
fcc = cmesh.get_conn(cmesh.dim, cmesh.dim - 1)
conn = mesh_in.get_conn('3_8')
n_el = conn.shape[0]
st = nm.vstack
e_nodes = ecc.indices.reshape((n_el, 12)) + o1
f_nodes = fcc.indices.reshape((n_el, 6)) + o2
nodes = nm.arange(n_el) + o3
c = nm.c_[conn, e_nodes, f_nodes, nodes].T
new_conn = st([c[0], c[8], c[20], c[11], c[16], c[22], c[26], c[21],
c[1], c[9], c[20], c[8], c[17], c[24], c[26], c[22],
c[2], c[10], c[20], c[9], c[18], c[25], c[26], c[24],
c[3], c[11], c[20], c[10], c[19], c[21], c[26], c[25],
c[4], c[15], c[23], c[12], c[16], c[21], c[26], c[22],
c[5], c[12], c[23], c[13], c[17], c[22], c[26], c[24],
c[6], c[13], c[23], c[14], c[18], c[24], c[26], c[25],
c[7], c[14], c[23], c[15], c[19], c[25], c[26], c[21]]).T
new_conn = new_conn.reshape((8 * n_el, 8))
new_mat_id = cmesh.cell_groups.repeat(8)
mesh = Mesh.from_data(mesh_in.name + '_r', coors, None, [new_conn],
[new_mat_id], mesh_in.descs )
return mesh
def refine_reference(geometry, level):
"""
Refine reference element given by `geometry`.
Notes
-----
The error edges must be generated in the order of the connectivity
of the previous (lower) level.
"""
from sfepy.discrete.fem import FEDomain
from sfepy.discrete.fem.geometry_element import geometry_data
gcoors, gconn = geometry.coors, geometry.conn
if level == 0:
return gcoors, gconn, None
gd = geometry_data[geometry.name]
conn = nm.array([gd.conn], dtype=nm.int32)
mat_id = conn[:, 0].copy()
mat_id[:] = 0
mesh = Mesh.from_data('aux', gd.coors, None, [conn],
[mat_id], [geometry.name])
domain = FEDomain('aux', mesh)
for ii in range(level):
domain = domain.refine()
coors = domain.mesh.coors
conn = domain.get_conn()
n_el = conn.shape[0]
if geometry.name == '2_3':
aux_conn = conn.reshape((n_el // 4, 4, 3))
ir = [[0, 1, 2], [2, 2, 3], [3, 3, 0]]
ic = [[0, 0, 0], [0, 1, 0], [0, 1, 0]]
elif geometry.name == '2_4':
aux_conn = conn.reshape((n_el // 4, 4, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 3, 0], [0, 0, 2], [3, 3, 1]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 1, 0], [1, 2, 1], [1, 2, 1]]
elif geometry.name == '3_4':
aux_conn = conn.reshape((n_el // 8, 8, 4))
ir = [[0, 0, 1], [1, 1, 2], [2, 0, 0], [3, 1, 1], [3, 2, 2], [3, 0, 0]]
ic = [[0, 1, 1], [1, 2, 2], [2, 2, 0], [3, 3, 1], [3, 3, 2], [3, 3, 0]]
elif geometry.name == '3_8':
aux_conn = conn.reshape((n_el // 8, 8, 8))
ir = [[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[0, 0, 1], [1, 1, 2], [2, 2, 3], [3, 0, 0], [0, 0, 2], [0, 0, 1],
[4, 4, 5], [5, 5, 6], [6, 6, 7], [7, 4, 4], [4, 4, 6], [4, 4, 5],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [3, 3, 7],
[0, 0, 4], [1, 1, 5], [2, 2, 6], [0, 0, 4],
[0, 0, 4]]
ic = [[0, 1, 0], [0, 1, 0], [0, 1, 0], [0, 3, 0], [1, 2, 1], [3, 2, 1],
[4, 5, 4], [4, 5, 4], [4, 5, 4], [4, 7, 4], [5, 6, 5], [7, 6, 5],
[0, 3, 0], [0, 3, 0], [0, 3, 0], [0, 1, 0], [3, 2, 3], [1, 2, 3],
[0, 4, 0], [0, 4, 0], [0, 4, 0], [0, 4, 0],
[1, 5, 3], [1, 5, 3], [1, 5, 3], [3, 7, 1],
[2, 6, 2]]
else:
raise ValueError('unsupported geometry! (%s)' % geometry.name)
conn = nm.array(conn, dtype=nm.int32)
error_edges = aux_conn[:, ir, ic]
return coors, conn, error_edges
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for db.task layer."""
import datetime
import json
import ddt
import jsonschema
import mock
from rally.common import objects
from rally import consts
from rally import exceptions
from tests.unit import test
@ddt.ddt
class TaskTestCase(test.TestCase):
def setUp(self):
super(TaskTestCase, self).setUp()
self.task = {
"uuid": "00ef46a2-c5b8-4aea-a5ca-0f54a10cbca1",
"status": consts.TaskStatus.INIT,
"verification_log": "",
}
@mock.patch("rally.common.objects.task.db.task_create")
def test_init_with_create(self, mock_task_create):
mock_task_create.return_value = self.task
task = objects.Task(status=consts.TaskStatus.FAILED)
mock_task_create.assert_called_once_with({
"status": consts.TaskStatus.FAILED})
self.assertEqual(task["uuid"], self.task["uuid"])
@mock.patch("rally.common.objects.task.db.task_create")
def test_init_without_create(self, mock_task_create):
task = objects.Task(task=self.task)
self.assertFalse(mock_task_create.called)
self.assertEqual(task["uuid"], self.task["uuid"])
@mock.patch("rally.common.objects.task.uuid.uuid4",
return_value="some_uuid")
@mock.patch("rally.common.objects.task.db.task_create")
def test_init_with_fake_true(self, mock_task_create, mock_uuid4):
task = objects.Task(fake=True)
self.assertFalse(mock_task_create.called)
self.assertTrue(mock_uuid4.called)
self.assertEqual(task["uuid"], mock_uuid4.return_value)
@mock.patch("rally.common.objects.task.db.task_get")
def test_get(self, mock_task_get):
mock_task_get.return_value = self.task
task = objects.Task.get(self.task["uuid"])
mock_task_get.assert_called_once_with(self.task["uuid"])
self.assertEqual(task["uuid"], self.task["uuid"])
@mock.patch("rally.common.objects.task.db.task_get_status")
def test_get_status(self, mock_task_get_status):
task = objects.Task(task=self.task)
status = task.get_status(task["uuid"])
self.assertEqual(status, mock_task_get_status.return_value)
@mock.patch("rally.common.objects.task.db.task_delete")
@mock.patch("rally.common.objects.task.db.task_create")
def test_create_and_delete(self, mock_task_create, mock_task_delete):
mock_task_create.return_value = self.task
task = objects.Task()
task.delete()
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=None)
@mock.patch("rally.common.objects.task.db.task_delete")
@mock.patch("rally.common.objects.task.db.task_create")
def test_create_and_delete_status(self, mock_task_create,
mock_task_delete):
mock_task_create.return_value = self.task
task = objects.Task()
task.delete(status=consts.TaskStatus.FINISHED)
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=consts.TaskStatus.FINISHED)
@mock.patch("rally.common.objects.task.db.task_delete")
def test_delete_by_uuid(self, mock_task_delete):
objects.Task.delete_by_uuid(self.task["uuid"])
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=None)
@mock.patch("rally.common.objects.task.db.task_delete")
def test_delete_by_uuid_status(self, mock_task_delete):
objects.Task.delete_by_uuid(self.task["uuid"],
consts.TaskStatus.FINISHED)
mock_task_delete.assert_called_once_with(
self.task["uuid"], status=consts.TaskStatus.FINISHED)
@mock.patch("rally.common.objects.task.db.task_list",
return_value=[{"uuid": "a",
"created_at": "b",
"status": consts.TaskStatus.FAILED,
"tag": "d",
"deployment_name": "some_name"}])
def list(self, mock_db_task_list):
tasks = objects.Task.list(status="somestatus")
mock_db_task_list.assert_called_once_with("somestatus", None)
self.assertIs(type(tasks), list)
self.assertIsInstance(tasks[0], objects.Task)
self.assertEqual(mock_db_task_list.return_value["uuis"],
tasks[0]["uuid"])
@mock.patch("rally.common.objects.deploy.db.task_update")
@mock.patch("rally.common.objects.task.db.task_create")
def test_update(self, mock_task_create, mock_task_update):
mock_task_create.return_value = self.task
mock_task_update.return_value = {"opt": "val2"}
deploy = objects.Task(opt="val1")
deploy._update({"opt": "val2"})
mock_task_update.assert_called_once_with(
self.task["uuid"], {"opt": "val2"})
self.assertEqual(deploy["opt"], "val2")
@ddt.data(
{
"status": "some_status", "allowed_statuses": ("s_1", "s_2")
},
{
"status": "some_status", "allowed_statuses": None
}
)
@ddt.unpack
@mock.patch("rally.common.objects.task.db.task_update_status")
@mock.patch("rally.common.objects.task.db.task_update")
def test_update_status(self, mock_task_update, mock_task_update_status,
status, allowed_statuses):
task = objects.Task(task=self.task)
task.update_status(consts.TaskStatus.FINISHED, allowed_statuses)
if allowed_statuses:
self.assertFalse(mock_task_update.called)
mock_task_update_status.assert_called_once_with(
self.task["uuid"],
consts.TaskStatus.FINISHED,
allowed_statuses
)
else:
self.assertFalse(mock_task_update_status.called)
mock_task_update.assert_called_once_with(
self.task["uuid"],
{"status": consts.TaskStatus.FINISHED},
)
@mock.patch("rally.common.objects.task.db.task_update")
def test_update_verification_log(self, mock_task_update):
mock_task_update.return_value = self.task
task = objects.Task(task=self.task)
task.update_verification_log({"a": "fake"})
mock_task_update.assert_called_once_with(
self.task["uuid"],
{"verification_log": json.dumps({"a": "fake"})}
)
def test_extend_results(self):
self.assertRaises(TypeError, objects.Task.extend_results)
now = datetime.datetime.now()
iterations = [
{"timestamp": i + 2, "error": [], "duration": i + 5,
"scenario_output": {"errors": "", "data": {}},
"error": [], "idle_duration": i,
"atomic_actions": {
"keystone.create_user": i + 10}} for i in range(10)]
obsolete = [
{"task_uuid": "foo_uuid", "created_at": now, "updated_at": None,
"id": 11, "key": {"kw": {"foo": 42},
"name": "Foo.bar", "pos": 0},
"data": {"raw": iterations, "sla": [],
"full_duration": 40, "load_duration": 32}}]
expected = [
{"iterations": "foo_iterations", "sla": [],
"key": {"kw": {"foo": 42}, "name": "Foo.bar", "pos": 0},
"info": {
"atomic": {"keystone.create_user": {"max_duration": 19,
"min_duration": 10}},
"iterations_count": 10, "iterations_failed": 0,
"max_duration": 14, "min_duration": 5, "output_names": [],
"tstamp_start": 2, "full_duration": 40, "load_duration": 32}}]
# serializable is default
results = objects.Task.extend_results(obsolete)
self.assertIsInstance(results[0]["iterations"], type(iter([])))
self.assertEqual(list(results[0]["iterations"]), iterations)
results[0]["iterations"] = "foo_iterations"
self.assertEqual(results, expected)
# serializable is False
results = objects.Task.extend_results(obsolete, serializable=False)
self.assertIsInstance(results[0]["iterations"], type(iter([])))
self.assertEqual(list(results[0]["iterations"]), iterations)
results[0]["iterations"] = "foo_iterations"
self.assertEqual(results, expected)
# serializable is True
results = objects.Task.extend_results(obsolete, serializable=True)
self.assertEqual(list(results[0]["iterations"]), iterations)
expected[0]["created_at"] = now.strftime("%Y-%d-%mT%H:%M:%S")
expected[0]["updated_at"] = None
jsonschema.validate(results[0],
objects.task.TASK_EXTENDED_RESULT_SCHEMA)
results[0]["iterations"] = "foo_iterations"
self.assertEqual(results, expected)
@mock.patch("rally.common.objects.task.db.task_result_get_all_by_uuid",
return_value="foo_results")
def test_get_results(self, mock_task_result_get_all_by_uuid):
task = objects.Task(task=self.task)
results = task.get_results()
mock_task_result_get_all_by_uuid.assert_called_once_with(
self.task["uuid"])
self.assertEqual(results, "foo_results")
@mock.patch("rally.common.objects.task.db.task_result_create")
def test_append_results(self, mock_task_result_create):
task = objects.Task(task=self.task)
task.append_results("opt", "val")
mock_task_result_create.assert_called_once_with(
self.task["uuid"], "opt", "val")
@mock.patch("rally.common.objects.task.db.task_update")
def test_set_failed(self, mock_task_update):
mock_task_update.return_value = self.task
task = objects.Task(task=self.task)
task.set_failed()
mock_task_update.assert_called_once_with(
self.task["uuid"],
{"status": consts.TaskStatus.FAILED, "verification_log": "\"\""},
)
@ddt.data(
{
"soft": True, "status": consts.TaskStatus.INIT
},
{
"soft": True, "status": consts.TaskStatus.VERIFYING
},
{
"soft": False, "status": consts.TaskStatus.INIT
},
{
"soft": False, "status": consts.TaskStatus.VERIFYING
}
)
@ddt.unpack
def test_abort_with_init_and_verifying_states(self, soft, status):
task = objects.Task(mock.MagicMock(), fake=True)
task.get_status = mock.MagicMock(
side_effect=(status, status, "running"))
task._update_status_in_abort = mock.MagicMock()
self.assertRaises(exceptions.RallyException, task.abort, soft)
self.assertEqual(1, task.get_status.call_count)
self.assertFalse(task._update_status_in_abort.called)
@ddt.data(
{
"soft": True, "status": consts.TaskStatus.ABORTED
},
{
"soft": True, "status": consts.TaskStatus.FINISHED
},
{
"soft": True, "status": consts.TaskStatus.FAILED
},
{
"soft": False, "status": consts.TaskStatus.ABORTED
},
{
"soft": False, "status": consts.TaskStatus.FINISHED
},
{
"soft": False, "status": consts.TaskStatus.FAILED
}
)
@ddt.unpack
def test_abort_with_finished_states(self, soft, status):
task = objects.Task(mock.MagicMock(), fake=True)
task.get_status = mock.MagicMock(return_value=status)
task.update_status = mock.MagicMock()
self.assertRaises(exceptions.RallyException, task.abort, soft)
self.assertEqual(1, task.get_status.call_count)
self.assertFalse(task.update_status.called)
@ddt.data(True, False)
def test_abort_with_running_state(self, soft):
task = objects.Task(mock.MagicMock(), fake=True)
task.get_status = mock.MagicMock(return_value="running")
task.update_status = mock.MagicMock()
task.abort(soft)
if soft:
status = consts.TaskStatus.SOFT_ABORTING
else:
status = consts.TaskStatus.ABORTING
task.update_status.assert_called_once_with(
status,
allowed_statuses=(consts.TaskStatus.RUNNING,
consts.TaskStatus.SOFT_ABORTING)
)
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
from operator import attrgetter
from telemetry.web_perf.metrics import rendering_frame
# These are LatencyInfo component names indicating the various components
# that the input event has travelled through.
# This is when the input event first reaches chrome.
UI_COMP_NAME = 'INPUT_EVENT_LATENCY_UI_COMPONENT'
# This is when the input event was originally created by OS.
ORIGINAL_COMP_NAME = 'INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT'
# This is when the input event was sent from browser to renderer.
BEGIN_COMP_NAME = 'INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT'
# This is when an input event is turned into a scroll update.
BEGIN_SCROLL_UPDATE_COMP_NAME = (
'LATENCY_BEGIN_SCROLL_LISTENER_UPDATE_MAIN_COMPONENT')
# This is when a scroll update is forwarded to the main thread.
FORWARD_SCROLL_UPDATE_COMP_NAME = (
'INPUT_EVENT_LATENCY_FORWARD_SCROLL_UPDATE_TO_MAIN_COMPONENT')
# This is when the input event has reached swap buffer.
END_COMP_NAME = 'INPUT_EVENT_GPU_SWAP_BUFFER_COMPONENT'
# Name for a main thread scroll update latency event.
SCROLL_UPDATE_EVENT_NAME = 'Latency::ScrollUpdate'
# Name for a gesture scroll update latency event.
GESTURE_SCROLL_UPDATE_EVENT_NAME = 'InputLatency::GestureScrollUpdate'
# These are keys used in the 'data' field dictionary located in
# BenchmarkInstrumentation::ImplThreadRenderingStats.
VISIBLE_CONTENT_DATA = 'visible_content_area'
APPROXIMATED_VISIBLE_CONTENT_DATA = 'approximated_visible_content_area'
CHECKERBOARDED_VISIBLE_CONTENT_DATA = 'checkerboarded_visible_content_area'
# These are keys used in the 'errors' field dictionary located in
# RenderingStats in this file.
APPROXIMATED_PIXEL_ERROR = 'approximated_pixel_percentages'
CHECKERBOARDED_PIXEL_ERROR = 'checkerboarded_pixel_percentages'
def GetLatencyEvents(process, timeline_range):
"""Get LatencyInfo trace events from the process's trace buffer that are
within the timeline_range.
Input events dump their LatencyInfo into trace buffer as async trace event
of name starting with "InputLatency". Non-input events with name starting
with "Latency". The trace event has a memeber 'data' containing its latency
history.
"""
latency_events = []
if not process:
return latency_events
for event in itertools.chain(
process.IterAllAsyncSlicesStartsWithName('InputLatency'),
process.IterAllAsyncSlicesStartsWithName('Latency')):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
for ss in event.sub_slices:
if 'data' in ss.args:
latency_events.append(ss)
return latency_events
def ComputeEventLatencies(input_events):
""" Compute input event latencies.
Input event latency is the time from when the input event is created to
when its resulted page is swap buffered.
Input event on differnt platforms uses different LatencyInfo component to
record its creation timestamp. We go through the following component list
to find the creation timestamp:
1. INPUT_EVENT_LATENCY_ORIGINAL_COMPONENT -- when event is created in OS
2. INPUT_EVENT_LATENCY_UI_COMPONENT -- when event reaches Chrome
3. INPUT_EVENT_LATENCY_BEGIN_RWH_COMPONENT -- when event reaches RenderWidget
If the latency starts with a
LATENCY_BEGIN_SCROLL_UPDATE_MAIN_COMPONENT component, then it is
classified as a scroll update instead of a normal input latency measure.
Returns:
A list sorted by increasing start time of latencies which are tuples of
(input_event_name, latency_in_ms).
"""
input_event_latencies = []
for event in input_events:
data = event.args['data']
if END_COMP_NAME in data:
end_time = data[END_COMP_NAME]['time']
if ORIGINAL_COMP_NAME in data:
start_time = data[ORIGINAL_COMP_NAME]['time']
elif UI_COMP_NAME in data:
start_time = data[UI_COMP_NAME]['time']
elif BEGIN_COMP_NAME in data:
start_time = data[BEGIN_COMP_NAME]['time']
elif BEGIN_SCROLL_UPDATE_COMP_NAME in data:
start_time = data[BEGIN_SCROLL_UPDATE_COMP_NAME]['time']
else:
raise ValueError('LatencyInfo has no begin component')
latency = (end_time - start_time) / 1000.0
input_event_latencies.append((start_time, event.name, latency))
input_event_latencies.sort()
return [(name, latency) for _, name, latency in input_event_latencies]
def HasRenderingStats(process):
""" Returns True if the process contains at least one
BenchmarkInstrumentation::*RenderingStats event with a frame.
"""
if not process:
return False
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::DisplayRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
for event in process.IterAllSlicesOfName(
'BenchmarkInstrumentation::ImplThreadRenderingStats'):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return True
return False
def GetTimestampEventName(process):
""" Returns the name of the events used to count frame timestamps. """
if process.name == 'SurfaceFlinger':
return 'vsync_before'
event_name = 'BenchmarkInstrumentation::DisplayRenderingStats'
for event in process.IterAllSlicesOfName(event_name):
if 'data' in event.args and event.args['data']['frame_count'] == 1:
return event_name
return 'BenchmarkInstrumentation::ImplThreadRenderingStats'
class RenderingStats(object):
def __init__(self, renderer_process, browser_process, surface_flinger_process,
timeline_ranges):
"""
Utility class for extracting rendering statistics from the timeline (or
other loggin facilities), and providing them in a common format to classes
that compute benchmark metrics from this data.
Stats are lists of lists of numbers. The outer list stores one list per
timeline range.
All *_time values are measured in milliseconds.
"""
assert len(timeline_ranges) > 0
self.refresh_period = None
# Find the top level process with rendering stats (browser or renderer).
if surface_flinger_process:
timestamp_process = surface_flinger_process
self._GetRefreshPeriodFromSurfaceFlingerProcess(surface_flinger_process)
elif HasRenderingStats(browser_process):
timestamp_process = browser_process
else:
timestamp_process = renderer_process
timestamp_event_name = GetTimestampEventName(timestamp_process)
# A lookup from list names below to any errors or exceptions encountered
# in attempting to generate that list.
self.errors = {}
self.frame_timestamps = []
self.frame_times = []
self.approximated_pixel_percentages = []
self.checkerboarded_pixel_percentages = []
# End-to-end latency for input event - from when input event is
# generated to when the its resulted page is swap buffered.
self.input_event_latency = []
self.frame_queueing_durations = []
# Latency from when a scroll update is sent to the main thread until the
# resulting frame is swapped.
self.scroll_update_latency = []
# Latency for a GestureScrollUpdate input event.
self.gesture_scroll_update_latency = []
for timeline_range in timeline_ranges:
self.frame_timestamps.append([])
self.frame_times.append([])
self.approximated_pixel_percentages.append([])
self.checkerboarded_pixel_percentages.append([])
self.input_event_latency.append([])
self.scroll_update_latency.append([])
self.gesture_scroll_update_latency.append([])
if timeline_range.is_empty:
continue
self._InitFrameTimestampsFromTimeline(
timestamp_process, timestamp_event_name, timeline_range)
self._InitImplThreadRenderingStatsFromTimeline(
renderer_process, timeline_range)
self._InitInputLatencyStatsFromTimeline(
browser_process, renderer_process, timeline_range)
self._InitFrameQueueingDurationsFromTimeline(
renderer_process, timeline_range)
def _GetRefreshPeriodFromSurfaceFlingerProcess(self, surface_flinger_process):
for event in surface_flinger_process.IterAllEventsOfName('vsync_before'):
self.refresh_period = event.args['data']['refresh_period']
return
def _InitInputLatencyStatsFromTimeline(
self, browser_process, renderer_process, timeline_range):
latency_events = GetLatencyEvents(browser_process, timeline_range)
# Plugin input event's latency slice is generated in renderer process.
latency_events.extend(GetLatencyEvents(renderer_process, timeline_range))
event_latencies = ComputeEventLatencies(latency_events)
# Don't include scroll updates in the overall input latency measurement,
# because scroll updates can take much more time to process than other
# input events and would therefore add noise to overall latency numbers.
self.input_event_latency[-1] = [
latency for name, latency in event_latencies
if name != SCROLL_UPDATE_EVENT_NAME]
self.scroll_update_latency[-1] = [
latency for name, latency in event_latencies
if name == SCROLL_UPDATE_EVENT_NAME]
self.gesture_scroll_update_latency[-1] = [
latency for name, latency in event_latencies
if name == GESTURE_SCROLL_UPDATE_EVENT_NAME]
def _GatherEvents(self, event_name, process, timeline_range):
events = []
for event in process.IterAllSlicesOfName(event_name):
if event.start >= timeline_range.min and event.end <= timeline_range.max:
if 'data' not in event.args:
continue
events.append(event)
events.sort(key=attrgetter('start'))
return events
def _AddFrameTimestamp(self, event):
frame_count = event.args['data']['frame_count']
if frame_count > 1:
raise ValueError('trace contains multi-frame render stats')
if frame_count == 1:
self.frame_timestamps[-1].append(
event.start)
if len(self.frame_timestamps[-1]) >= 2:
self.frame_times[-1].append(
self.frame_timestamps[-1][-1] - self.frame_timestamps[-1][-2])
def _InitFrameTimestampsFromTimeline(
self, process, timestamp_event_name, timeline_range):
for event in self._GatherEvents(
timestamp_event_name, process, timeline_range):
self._AddFrameTimestamp(event)
def _InitImplThreadRenderingStatsFromTimeline(self, process, timeline_range):
event_name = 'BenchmarkInstrumentation::ImplThreadRenderingStats'
for event in self._GatherEvents(event_name, process, timeline_range):
data = event.args['data']
if VISIBLE_CONTENT_DATA not in data:
self.errors[APPROXIMATED_PIXEL_ERROR] = (
'Calculating approximated_pixel_percentages not possible because '
'visible_content_area was missing.')
self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
'Calculating checkerboarded_pixel_percentages not possible because '
'visible_content_area was missing.')
return
visible_content_area = data[VISIBLE_CONTENT_DATA]
if visible_content_area == 0:
self.errors[APPROXIMATED_PIXEL_ERROR] = (
'Calculating approximated_pixel_percentages would have caused '
'a divide-by-zero')
self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
'Calculating checkerboarded_pixel_percentages would have caused '
'a divide-by-zero')
return
if APPROXIMATED_VISIBLE_CONTENT_DATA in data:
self.approximated_pixel_percentages[-1].append(
round(float(data[APPROXIMATED_VISIBLE_CONTENT_DATA]) /
float(data[VISIBLE_CONTENT_DATA]) * 100.0, 3))
else:
self.errors[APPROXIMATED_PIXEL_ERROR] = (
'approximated_pixel_percentages was not recorded')
if CHECKERBOARDED_VISIBLE_CONTENT_DATA in data:
self.checkerboarded_pixel_percentages[-1].append(
round(float(data[CHECKERBOARDED_VISIBLE_CONTENT_DATA]) /
float(data[VISIBLE_CONTENT_DATA]) * 100.0, 3))
else:
self.errors[CHECKERBOARDED_PIXEL_ERROR] = (
'checkerboarded_pixel_percentages was not recorded')
def _InitFrameQueueingDurationsFromTimeline(self, process, timeline_range):
try:
events = rendering_frame.GetFrameEventsInsideRange(process,
timeline_range)
new_frame_queueing_durations = [e.queueing_duration for e in events]
self.frame_queueing_durations.append(new_frame_queueing_durations)
except rendering_frame.NoBeginFrameIdException:
self.errors['frame_queueing_durations'] = (
'Current chrome version does not support the queueing delay metric.')
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Data Validation CLI tool is intended to help to build and execute
data validation runs with ease.
The Data Validator can be called either using:
data-validation -h
python -m data_validation -h
ex.
Step 1) Store Connection to be used in validation
data-validation connections add -c my_bq_conn BigQuery --project-id pso-kokoro-resources
Step 2) Run Validation using supplied connections
data-validation validate column -sc my_bq_conn -tc my_bq_conn \
-tbls bigquery-public-data.new_york_citibike.citibike_trips,bigquery-public-data.new_york_citibike.citibike_stations \
--sum '*' --count '*'
python -m data_validation validate column -sc my_bq_conn -tc my_bq_conn \
-tbls bigquery-public-data.new_york_citibike.citibike_trips \
--grouped-columns starttime \
--sum tripduration --count tripduration
data-validation validate column \
-sc my_bq_conn -tc my_bq_conn \
-tbls bigquery-public-data.new_york_citibike.citibike_trips,bigquery-public-data.new_york_citibike.citibike_stations \
--sum tripduration,start_station_name --count tripduration,start_station_name \
-bqrh pso-kokoro-resources.pso_data_validator.results
-c ex_yaml.yaml
data-validation run-config -c ex_yaml.yaml
"""
import argparse
import csv
import json
import sys
import uuid
from data_validation import consts
from data_validation import state_manager
CONNECTION_SOURCE_FIELDS = {
"BigQuery": [
["project_id", "GCP Project to use for BigQuery"],
["google_service_account_key_path", "(Optional) GCP SA Key Path"],
],
"Teradata": [
["host", "Desired Teradata host"],
["port", "Teradata port to connect on"],
["user_name", "User used to connect"],
["password", "Password for supplied user"],
],
"Oracle": [
["host", "Desired Oracle host"],
["port", "Oracle port to connect on"],
["user", "User used to connect"],
["password", "Password for supplied user"],
["database", "Database to connect to"],
],
"MSSQL": [
["host", "Desired SQL Server host (default localhost)"],
["port", "SQL Server port to connect on (default 1433)"],
["user", "User used to connect"],
["password", "Password for supplied user"],
["database", "Database to connect to (default master)"],
],
"MySQL": [
["host", "Desired MySQL host (default localhost)"],
["port", "MySQL port to connect on (default 3306)"],
["user", "User used to connect"],
["password", "Password for supplied user"],
["database", "Database to connect to (default master)"],
],
"Snowflake": [
["user", "Username to connect to"],
["password", "Password for authentication of user"],
["account", "Snowflake account to connect to"],
["database", "Database in snowflake to connect to"],
["schema", "Schema in the database to connect to"],
],
"Postgres": [
["host", "Desired Postgres host."],
["port", "Postgres port to connect on (ie. 5432)"],
["user", "Username to connect to"],
["password", "Password for authentication of user"],
["database", "Database in postgres to connect to (default postgres)"],
],
"Redshift": [
["host", "Desired Postgres host."],
["port", "Postgres port to connect on (ie. 5439)"],
["user", "Username to connect to"],
["password", "Password for authentication of user"],
["database", "Database in postgres to connect to (default postgres)"],
],
"Spanner": [
["project_id", "GCP Project to use for Spanner"],
["instance_id", "ID of Spanner instance to connect to"],
["database_id", "ID of Spanner database (schema) to connect to"],
["google_service_account_key_path", "(Optional) GCP SA Key Path"],
],
"FileSystem": [
["table_name", "Table name to use as reference for file data"],
["file_path", "The local, s3, or GCS file path to the data"],
["file_type", "The file type of the file.'csv' or 'json'"],
],
"Impala": [
["host", "Desired Impala host"],
["port", "Desired Imapala port (10000 if not provided)"],
["database", "Desired Impala database (default if not provided)"],
["auth_mechanism", "Desired Impala auth mechanism (PLAIN if not provided)"],
[
"kerberos_service_name",
"Desired Kerberos service name ('impala' if not provided)",
],
],
}
def get_parsed_args():
"""Return ArgParser with configured CLI arguments."""
parser = configure_arg_parser()
return parser.parse_args()
def configure_arg_parser():
"""Extract Args for Run."""
parser = argparse.ArgumentParser(
usage=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose logging")
subparsers = parser.add_subparsers(dest="command")
_configure_validate_parser(subparsers)
_configure_run_config_parser(subparsers)
_configure_validation_config_parser(subparsers)
_configure_connection_parser(subparsers)
_configure_find_tables(subparsers)
_configure_raw_query(subparsers)
_configure_run_parser(subparsers)
_configure_beta_parser(subparsers)
return parser
def _configure_beta_parser(subparsers):
"""Configure beta commands for the parser."""
connection_parser = subparsers.add_parser(
"beta", help="Run a Beta command for new utilities and features."
)
beta_subparsers = connection_parser.add_subparsers(dest="beta_cmd")
_configure_run_parser(beta_subparsers)
_configure_validate_parser(beta_subparsers)
_configure_deploy(beta_subparsers)
def _configure_deploy(subparsers):
"""Configure arguments for deploying as a service."""
subparsers.add_parser(
"deploy", help="Deploy Data Validation as a Service (w/ Flask)"
)
def _configure_find_tables(subparsers):
"""Configure arguments for text search table matching."""
find_tables_parser = subparsers.add_parser(
"find-tables", help="Build tables list using approx string matching"
)
find_tables_parser.add_argument(
"--source-conn", "-sc", help="Source connection name"
)
find_tables_parser.add_argument(
"--target-conn", "-tc", help="Target connection name"
)
find_tables_parser.add_argument(
"--allowed-schemas", "-as", help="List of source schemas to match."
)
find_tables_parser.add_argument(
"--score-cutoff",
"-score",
type=float,
help="The minimum distance score allowed to match tables (0 to 1).",
)
def _configure_raw_query(subparsers):
"""Configure arguments for text search table matching."""
query_parser = subparsers.add_parser(
"query", help="Run an adhoc query against the supplied connection"
)
query_parser.add_argument("--conn", "-c", help="Connection name to query")
query_parser.add_argument("--query", "-q", help="Raw query to execute")
def _configure_run_config_parser(subparsers):
"""Configure arguments to run a data validation YAML config using the legacy run-config command."""
run_config_parser = subparsers.add_parser(
"run-config",
help="Run validations stored in a YAML config file. Note: the 'configs run' command is now the recommended approach",
)
run_config_parser.add_argument(
"--config-file",
"-c",
help="YAML Config File Path to be used for building or running validations.",
)
def _configure_validation_config_parser(subparsers):
"""Configure arguments to run a data validation YAML config."""
validation_config_parser = subparsers.add_parser(
"configs", help="Run validations stored in a YAML config file"
)
configs_subparsers = validation_config_parser.add_subparsers(
dest="validation_config_cmd"
)
_ = configs_subparsers.add_parser("list", help="List your validation configs")
run_parser = configs_subparsers.add_parser(
"run", help="Run your validation configs"
)
run_parser.add_argument(
"--config-file",
"-c",
help="YAML Config File Path to be used for building or running validations.",
)
get_parser = configs_subparsers.add_parser(
"get", help="Get and print a validation config"
)
get_parser.add_argument(
"--config-file",
"-c",
help="YAML Config File Path to be used for building or running validations.",
)
def _configure_run_parser(subparsers):
"""Configure arguments to run a data validation."""
# subparsers = parser.add_subparsers(dest="command")
run_parser = subparsers.add_parser(
"run", help="Run a validation and optionally store to config (deprecated)"
)
run_parser.add_argument(
"--type",
"-t",
help="Type of Data Validation (Column, GroupedColumn, Row, Schema)",
)
run_parser.add_argument("--source-conn", "-sc", help="Source connection name")
run_parser.add_argument("--target-conn", "-tc", help="Target connection name")
run_parser.add_argument(
"--tables-list",
"-tbls",
help="Comma separated tables list in the form 'schema.table=target_schema.target_table'",
)
run_parser.add_argument(
"--result-handler-config", "-rc", help="Result handler config details"
)
run_parser.add_argument(
"--bq-result-handler", "-bqrh", help="BigQuery result handler config details"
)
run_parser.add_argument(
"--config-file",
"-c",
help="Store the validation in the YAML Config File Path specified",
)
run_parser.add_argument(
"--labels", "-l", help="Key value pair labels for validation run",
)
run_parser.add_argument(
"--hash",
"-hash",
help="Comma separated list of columns for hash 'col_a,col_b' or * for all columns",
)
run_parser.add_argument(
"--service-account",
"-sa",
help="Path to SA key file for result handler output",
)
run_parser.add_argument(
"--threshold",
"-th",
type=threshold_float,
help="Float max threshold for percent difference",
)
run_parser.add_argument(
"--filters",
"-filters",
help="Filters in the format source_filter:target_filter",
)
run_parser.add_argument(
"--format",
"-fmt",
default="table",
help="Set the format for printing command output, Supported formats are (text, csv, json, table). It defaults "
"to table",
)
run_parser.add_argument(
"--use-random-row",
"-rr",
action="store_true",
help="Finds a set of random rows of the first primary key supplied.",
)
run_parser.add_argument(
"--random-row-batch-size",
"-rbs",
help="Row batch size used for random row filters (default 10,000).",
)
def _configure_connection_parser(subparsers):
"""Configure the Parser for Connection Management."""
connection_parser = subparsers.add_parser(
"connections", help="Manage & Store connections to your Databases"
)
connect_subparsers = connection_parser.add_subparsers(dest="connect_cmd")
_ = connect_subparsers.add_parser("list", help="List your connections")
add_parser = connect_subparsers.add_parser("add", help="Store a new connection")
add_parser.add_argument(
"--connection-name", "-c", help="Name of connection used as reference"
)
_configure_database_specific_parsers(add_parser)
def _configure_database_specific_parsers(parser):
"""Configure a separate subparser for each supported DB."""
subparsers = parser.add_subparsers(dest="connect_type")
raw_parser = subparsers.add_parser(
"Raw", help="Supply Raw JSON config for a connection"
)
raw_parser.add_argument("--json", "-j", help="Json string config")
for database in CONNECTION_SOURCE_FIELDS:
db_parser = subparsers.add_parser(
database, help=f"Store a {database} connection"
)
for field_obj in CONNECTION_SOURCE_FIELDS[database]:
arg_field = "--" + field_obj[0].replace("_", "-")
help_txt = field_obj[1]
db_parser.add_argument(arg_field, help=help_txt)
def _configure_validate_parser(subparsers):
"""Configure arguments to run validations."""
validate_parser = subparsers.add_parser(
"validate", help="Run a validation and optionally store to config"
)
# Keep these in order to support data-validation run command for backwards-compatibility
validate_parser.add_argument("--type", "-t", help="Type of Data Validation")
validate_parser.add_argument(
"--result-handler-config", "-rc", help="Result handler config details"
)
validate_subparsers = validate_parser.add_subparsers(dest="validate_cmd")
column_parser = validate_subparsers.add_parser(
"column", help="Run a column validation"
)
_configure_column_parser(column_parser)
row_parser = validate_subparsers.add_parser("row", help="Run a row validation")
_configure_row_parser(row_parser)
schema_parser = validate_subparsers.add_parser(
"schema", help="Run a schema validation"
)
_configure_schema_parser(schema_parser)
def _configure_row_parser(row_parser):
"""Configure arguments to run row level validations."""
_add_common_arguments(row_parser)
row_parser.add_argument(
"--hash",
"-hash",
help="Comma separated list of columns for hash 'col_a,col_b' or * for all columns",
)
row_parser.add_argument(
"--comparison-fields",
"-comp-fields",
help="Individual columns to compare. If comparing a calculated field use the column alias.",
)
row_parser.add_argument(
"--calculated-fields",
"-calc-fields",
help="list of calculated fields to generate.",
)
row_parser.add_argument(
"--primary-keys",
"-pk",
help="Comma separated list of primary key columns 'col_a,col_b'",
)
row_parser.add_argument(
"--labels", "-l", help="Key value pair labels for validation run"
)
row_parser.add_argument(
"--threshold",
"-th",
type=threshold_float,
help="Float max threshold for percent difference",
)
row_parser.add_argument(
"--grouped-columns",
"-gc",
help="Comma separated list of columns to use in GroupBy 'col_a,col_b'",
)
row_parser.add_argument(
"--filters",
"-filters",
help="Filters in the format source_filter:target_filter",
)
row_parser.add_argument(
"--use-random-row",
"-rr",
action="store_true",
help="Finds a set of random rows of the first primary key supplied.",
)
row_parser.add_argument(
"--random-row-batch-size",
"-rbs",
help="Row batch size used for random row filters (default 10,000).",
)
def _configure_column_parser(column_parser):
"""Configure arguments to run column level validations."""
_add_common_arguments(column_parser)
column_parser.add_argument(
"--count",
"-count",
help="Comma separated list of columns for count 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--sum",
"-sum",
help="Comma separated list of columns for sum 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--avg",
"-avg",
help="Comma separated list of columns for avg 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--min",
"-min",
help="Comma separated list of columns for min 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--max",
"-max",
help="Comma separated list of columns for max 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--hash",
"-hash",
help="Comma separated list of columns for hashing a concatenate 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--bit_xor",
"-bit_xor",
help="Comma separated list of columns for hashing a concatenate 'col_a,col_b' or * for all columns",
)
column_parser.add_argument(
"--comparison-fields",
"-comp-fields",
help="list of fields to perform exact comparisons to. Use column aliases if this is calculated.",
)
column_parser.add_argument(
"--calculated-fields",
"-calc-fields",
help="list of calculated fields to generate.",
)
column_parser.add_argument(
"--grouped-columns",
"-gc",
help="Comma separated list of columns to use in GroupBy 'col_a,col_b'",
)
column_parser.add_argument(
"--primary-keys",
"-pk",
help="Comma separated list of primary key columns 'col_a,col_b'",
)
column_parser.add_argument(
"--labels", "-l", help="Key value pair labels for validation run"
)
column_parser.add_argument(
"--threshold",
"-th",
type=threshold_float,
help="Float max threshold for percent difference",
)
column_parser.add_argument(
"--filters",
"-filters",
help="Filters in the format source_filter:target_filter",
)
column_parser.add_argument(
"--use-random-row",
"-rr",
action="store_true",
help="Finds a set of random rows of the first primary key supplied.",
)
column_parser.add_argument(
"--random-row-batch-size",
"-rbs",
help="Row batch size used for random row filters (default 10,000).",
)
def _configure_schema_parser(schema_parser):
"""Configure arguments to run column level validations."""
_add_common_arguments(schema_parser)
def _add_common_arguments(parser):
parser.add_argument("--source-conn", "-sc", help="Source connection name")
parser.add_argument("--target-conn", "-tc", help="Target connection name")
parser.add_argument(
"--tables-list",
"-tbls",
help="Comma separated tables list in the form 'schema.table=target_schema.target_table'",
)
parser.add_argument(
"--bq-result-handler", "-bqrh", help="BigQuery result handler config details"
)
parser.add_argument(
"--service-account",
"-sa",
help="Path to SA key file for result handler output",
)
parser.add_argument(
"--config-file",
"-c",
help="Store the validation in the YAML Config File Path specified",
)
parser.add_argument(
"--format",
"-fmt",
default="table",
help="Set the format for printing command output, Supported formats are (text, csv, json, table). Defaults "
"to table",
)
def get_connection_config_from_args(args):
"""Return dict with connection config supplied."""
config = {consts.SOURCE_TYPE: args.connect_type}
if args.connect_type == "Raw":
return json.loads(args.json)
for field_obj in CONNECTION_SOURCE_FIELDS[args.connect_type]:
field = field_obj[0]
config[field] = getattr(args, field)
return config
def threshold_float(x):
"""Restrict threshold arg to be a positive float."""
try:
x = float(x)
except ValueError:
raise argparse.ArgumentTypeError("%r not a floating-point literal" % (x,))
if x < 0.0 or x > sys.float_info.max:
raise argparse.ArgumentTypeError(
"%r must be positive and below the max float value" % (x,)
)
elif x != x:
raise argparse.ArgumentTypeError("%r must be a number" % (x,))
return x
# def _get_data_validation_directory():
# raw_dir_path = (
# os.environ.get(consts.ENV_DIRECTORY_VAR) or consts.DEFAULT_ENV_DIRECTORY
# )
# dir_path = os.path.expanduser(raw_dir_path)
# if not os.path.exists(dir_path):
# os.makedirs(dir_path)
# return dir_path
# def _get_connection_file(connection_name):
# dir_path = _get_data_validation_directory()
# file_name = f"{connection_name}.connection.json"
# return os.path.join(dir_path, file_name)
def _generate_random_name(conn):
name = f"{conn[consts.SOURCE_TYPE]}_{str(uuid.uuid4())}"
return name
def store_connection(connection_name, conn):
"""Store the connection config under the given name."""
mgr = state_manager.StateManager()
mgr.create_connection(connection_name, conn)
# connection_name = connection_name or _generate_random_name(conn)
# file_path = _get_connection_file(connection_name)
# with open(file_path, "w") as file:
# file.write(json.dumps(conn))
# def get_connections():
# """ Return dict with connection name and path key pairs."""
# connections = {}
# dir_path = _get_data_validation_directory()
# all_config_files = os.listdir(dir_path)
# for config_file in all_config_files:
# if config_file.endswith(".connection.json"):
# config_file_path = os.path.join(dir_path, config_file)
# conn_name = config_file.split(".")[0]
# connections[conn_name] = config_file_path
# return connections
def list_connections():
"""List all saved connections."""
mgr = state_manager.StateManager()
connections = mgr.list_connections()
for conn_name in connections:
print(f"Connection Name: {conn_name}")
def get_connection(connection_name):
"""Return dict connection details for a specific connection."""
mgr = state_manager.StateManager()
return mgr.get_connection_config(connection_name)
# with open(file_path, "r") as file:
# conn_str = file.read()
# return json.loads(conn_str)
def store_validation(validation_file_name, yaml_config):
"""Store the validation YAML config under the given name."""
mgr = state_manager.StateManager()
mgr.create_validation_yaml(validation_file_name, yaml_config)
def get_validation(validation_name):
"""Return validation YAML for a specific connection."""
mgr = state_manager.StateManager()
return mgr.get_validation_config(validation_name)
def list_validations():
"""List all saved validation YAMLs."""
mgr = state_manager.StateManager()
validations = mgr.list_validations()
print("Validation YAMLs found:")
for validation_name in validations:
print(f"{validation_name}.yaml")
def get_labels(arg_labels):
"""Return list of tuples representing key-value label pairs."""
labels = []
if arg_labels:
pairs = arg_labels.split(",")
for pair in pairs:
kv = pair.split("=")
if len(kv) == 2:
labels.append((kv[0], kv[1]))
else:
raise ValueError("Labels must be comma-separated key-value pairs.")
return labels
def get_filters(filter_value):
"""Returns parsed JSON from filter file. Backwards compatible for JSON input.
filter_value (str): Filter argument specified.
"""
try:
filter_config = json.loads(filter_value)
except json.decoder.JSONDecodeError:
filter_config = []
filter_vals = filter_value.split(":")
if len(filter_vals) == 1:
filter_dict = {
"type": "custom",
"source": filter_vals[0],
"target": filter_vals[0],
}
elif len(filter_vals) == 2:
if not filter_vals[1]:
raise ValueError("Please provide valid target filter.")
filter_dict = {
"type": "custom",
"source": filter_vals[0],
"target": filter_vals[1],
}
else:
raise ValueError("Unable to parse filter arguments.")
filter_config.append(filter_dict)
return filter_config
def get_result_handler(rc_value, sa_file=None):
"""Returns dict of result handler config. Backwards compatible for JSON input.
rc_value (str): Result config argument specified.
sa_file (str): SA path argument specified.
"""
try:
result_handler = json.loads(rc_value)
except json.decoder.JSONDecodeError:
config = rc_value.split(".", 1)
if len(config) == 2:
result_handler = {
"type": "BigQuery",
"project_id": config[0],
"table_id": config[1],
}
else:
raise ValueError(f"Unable to parse result handler config: `{rc_value}`")
if sa_file:
result_handler["google_service_account_key_path"] = sa_file
return result_handler
def get_arg_list(arg_value, default_value=None):
"""Returns list of values from argument provided. Backwards compatible for JSON input.
arg_value (str): Argument supplied
default_value (Any): A default value to supply when arg_value is empty.
"""
if not arg_value:
return default_value
try:
if isinstance(arg_value, list):
arg_value = str(arg_value)
# arg_value = "hash_all"
arg_list = json.loads(arg_value)
except json.decoder.JSONDecodeError:
arg_list = arg_value.split(",")
return arg_list
def get_tables_list(arg_tables, default_value=None, is_filesystem=False):
"""Returns dictionary of tables. Backwards compatible for JSON input.
arg_table (str): tables_list argument specified
default_value (Any): A default value to supply when arg_value is empty.
is_filesystem (boolean): Boolean indicating whether source connection is a FileSystem. In this case, a schema is not required.
"""
if not arg_tables:
return default_value
try:
# Backwards compatibility for JSON input
tables_list = json.loads(arg_tables)
except json.decoder.JSONDecodeError:
tables_list = []
tables_mapping = list(csv.reader([arg_tables]))[0]
source_schema_required = False if is_filesystem else True
for mapping in tables_mapping:
tables_map = mapping.split("=")
if len(tables_map) == 1:
schema, table = split_table(
tables_map, schema_required=source_schema_required
)
table_dict = {
"schema_name": schema,
"table_name": table,
}
elif len(tables_map) == 2:
src_schema, src_table = split_table(
[tables_map[0]], schema_required=source_schema_required
)
table_dict = {
"schema_name": src_schema,
"table_name": src_table,
}
targ_schema, targ_table = split_table(
[tables_map[1]], schema_required=False
)
if targ_schema:
table_dict["target_schema_name"] = targ_schema
table_dict["target_table_name"] = targ_table
else:
raise ValueError(
"Unable to parse tables list. Please provide valid mapping."
)
tables_list.append(table_dict)
return tables_list
def split_table(table_ref, schema_required=True):
"""Returns schema and table name given list of input values.
table_ref (List): Table reference i.e ['my.schema.my_table']
schema_required (boolean): Indicates whether schema is required. A source
table reference requires schema. A target table reference does not.
"""
table_ref_list = list(csv.reader(table_ref, delimiter=".", quotechar='"'))[0]
if len(table_ref_list) == 1 and schema_required:
raise ValueError("Please provide schema in tables list.")
elif len(table_ref_list) == 1:
return None, table_ref_list[0].strip()
table = table_ref_list.pop()
schema = ".".join(table_ref_list)
return schema.strip(), table.strip()
|
|
# Time-stamp: <2009-05-08 11:38:09 Tao Liu>
"""Module to read the motif scan data file which is in binary format.
Copyright (c) 2007 Tao Liu <taoliu@jimmy.harvard.edu>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file COPYING included with
the distribution).
@status: experimental
@version: $Revision$
@author: Tao Liu
@contact: taoliu@jimmy.harvard.edu
"""
# ------------------------------------
# python modules
# ------------------------------------
import re
import taolib
from taolib.CoreLib.FeatIO import FWTrackI
import sys
from struct import unpack as upk
# ------------------------------------
# constants
# ------------------------------------
__version__ = "MR $Revision$"
__author__ = "Tao Liu <taoliu@jimmy.harvard.edu>"
__doc__ = "Calculate Relationship of Motifs"
LOG = False
GENOME_SIZE = {"mm8":2644077689L,
"hg18":3080419480L}
# ------------------------------------
# Misc functions
# ------------------------------------
def mlen (mfhd) :
"""Return the motif length from motif matrix data file.
mfhd : the file object for motif matrix file
"""
mfhd.seek(0)
return len(mfhd.readlines())-1
def mconsensus (mfhd):
"""Return the motif consensus for a motif matrix data file.
mfhd : the file object for motif matrix file
"""
mfhd.seek(0)
consensus_seq=""
headline = mfhd.readline().rstrip()
consensus_field_num = headline.split("\t").index("Consensus")
for l in mfhd.readlines():
l = l.rstrip()
consensus_seq+=l.split("\t")[consensus_field_num]
return consensus_seq
def read_motif_total_num (motif_fhd,species):
"""Only read the header of binary file, return the total number of
motif scan hits regardless of cutoff.
"""
if species == "hg18":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chr20":[0,0],"chr21":[0,0],
"chr22":[0,0],"chrX":[0,0],"chrY":[0,0]
}
elif species == "mm8":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chrX":[0,0],"chrY":[0,0]
}
else:
raise Exception("Only hg18/mm8 supported!")
chromosomes = chromosomes_fp.keys()
motif_fhd.seek(0)
# unpack the start pos
for chromosome in chromosomes:
chromosomes_fp[chromosome][0] = upk("<i",motif_fhd.read(4))[0]
motif_fhd.seek(124,1)
motif_fhd.seek(0,2)
# calculate number of hits
total_motif_hits = 0
for i in range(len(chromosomes)-1):
mh = (chromosomes_fp[chromosomes[i+1]][0]-chromosomes_fp[chromosomes[i]][0])/8
chromosomes_fp[chromosomes[i]][1] = mh
total_motif_hits += mh
# last one
mh = (motif_fhd.tell()-chromosomes_fp[chromosomes[-1]][0])/8
chromosomes_fp[chromosomes[-1]][1]=mh
total_motif_hits += mh
return total_motif_hits
# def read_motif (motif_fhd,species,cutoff=0):
# """Read motif scan result, and return a TabIO.FWTrackI object
# containing the motif locations.
# motif_fhd : a file handler for binary motif scan result
# species : must be "mm8" for mouse or "hg18" for human
# cutoff : cutoff for the motif scan score
# """
# motif_range_list = FWTrackI(fw=0)
# if species == "hg18":
# chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
# "chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
# "chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
# "chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
# "chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
# "chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
# "chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
# "chr19":[0,0],"chr20":[0,0],"chr21":[0,0],
# "chr22":[0,0],"chrX":[0,0],"chrY":[0,0]
# }
# elif species == "mm8":
# chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
# "chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
# "chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
# "chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
# "chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
# "chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
# "chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
# "chr19":[0,0],"chrX":[0,0],"chrY":[0,0]
# }
# else:
# raise Exception("Only hg18/mm8 supported!")
# chromosomes = chromosomes_fp.keys()
# motif_fhd.seek(0)
# # unpack the start pos
# for chromosome in chromosomes:
# chromosomes_fp[chromosome][0] = upk("<i",motif_fhd.read(4))[0]
# motif_fhd.seek(124,1)
# motif_fhd.seek(0,2)
# # calculate number of hits
# total_motif_hits = 0
# for i in range(len(chromosomes)-1):
# mh = (chromosomes_fp[chromosomes[i+1]][0]-chromosomes_fp[chromosomes[i]][0])/8
# chromosomes_fp[chromosomes[i]][1] = mh
# total_motif_hits += mh
# # last one
# mh = (motif_fhd.tell()-chromosomes_fp[chromosomes[-1]][0])/8
# chromosomes_fp[chromosomes[-1]][1]=mh
# total_motif_hits += mh
# # read and write
# read_motif_hits = 0
# portion = 0
# for chromosome in chromosomes:
# motif_fhd.seek(chromosomes_fp[chromosome][0],0)
# for i in range(chromosomes_fp[chromosome][1]):
# read_motif_hits += 1
# portion = float(read_motif_hits)/total_motif_hits
# if LOG:
# sys.stdout.write("\r%.1f%% %s" % (portion*100,"#"*int(portion*50)))
# sys.stdout.flush()
# loc = upk("<i",motif_fhd.read(4))[0]
# score = upk("<f",motif_fhd.read(4))[0]
# motif_fhd.read(4)
# if score < 0:
# strand = -1
# score = score*-1
# else:
# strand = 1
# #ofhd.write("%s\t%d\t%d\t%s_%s_%d\t%.2f\t%s\n" % (chromosome,loc-1,loc+motif_len-1,motif,chromosome,i,score,strand))
# if score > cutoff:
# #print score,cutoff
# motif_range_list.add_range(chromosome,RangeI(start=loc-1,end=loc,strand=strand))
# #print loc-1
# #sys.stdout.write("\n")
# motif_range_list.merge_overlap()
# return motif_range_list
def read_motif2 (motif_fhd,species,cutoff=0):
"""Read motif scan result, and return a WigTrackI object
containing the motif locations.
* If the motif scan data file is not big, use this function to
load the whole file into memory. It may be faster than
read_motif().
motif_fhd : a file handler for binary motif scan result
species : must be "mm8" for mouse or "hg18" for human
cutoff : cutoff for the motif scan score
"""
motif_range_list = FWTrackI(fw=0)
if species == "hg18":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chr20":[0,0],"chr21":[0,0],
"chr22":[0,0],"chrX":[0,0],"chrY":[0,0]
}
chromosomes = ["chr1","chr2","chr3","chr4","chr5","chr6",
"chr7","chr8","chr9","chr10","chr11","chr12",
"chr13","chr14","chr15","chr16","chr17","chr18",
"chr19","chr20","chr21","chr22","chrX","chrY"]
elif species == "mm8":
chromosomes_fp = { # store start and number of file-pos for every chromosome in bin file
"chr1":[0,0],"chr2":[0,0],"chr3":[0,0],
"chr4":[0,0],"chr5":[0,0],"chr6":[0,0],
"chr7":[0,0],"chr8":[0,0],"chr9":[0,0],
"chr10":[0,0],"chr11":[0,0],"chr12":[0,0],
"chr13":[0,0],"chr14":[0,0],"chr15":[0,0],
"chr16":[0,0],"chr17":[0,0],"chr18":[0,0],
"chr19":[0,0],"chrX":[0,0],"chrY":[0,0]
}
chromosomes = ["chr1","chr2","chr3","chr4","chr5","chr6",
"chr7","chr8","chr9","chr10","chr11","chr12",
"chr13","chr14","chr15","chr16","chr17","chr18",
"chr19","chrX","chrY"]
else:
raise Exception("Only hg18/mm8 supported!")
motif_fhd.seek(0)
data = motif_fhd.read()
# unpack the start pos
p = 0
for chromosome in chromosomes:
chromosomes_fp[chromosome][0] = upk("<i",data[p:p+4])[0]
p += 128
# calculate number of hits
total_motif_hits = 0
for i in range(len(chromosomes)-1):
mh = (chromosomes_fp[chromosomes[i+1]][0]-chromosomes_fp[chromosomes[i]][0])/8
chromosomes_fp[chromosomes[i]][1] = mh
total_motif_hits += mh
# last one
mh = (len(data)-chromosomes_fp[chromosomes[-1]][0])/8
chromosomes_fp[chromosomes[-1]][1]=mh
total_motif_hits += mh
# read and write
read_motif_hits = 0
portion = 0
p = 0
n=0
for chromosome in chromosomes:
p = chromosomes_fp[chromosome][0]
for i in range(chromosomes_fp[chromosome][1]):
read_motif_hits += 1
portion = float(read_motif_hits)/total_motif_hits
if LOG:
sys.stdout.write("\r %.1f%% %s" % (portion*100,"#"*int(portion*50)))
sys.stdout.flush()
loc = upk("<i",data[p:p+4])[0]
score = upk("<f",data[p+4:p+8])[0]
p += 8
if score < 0:
strand = 1
score = score*-1
else:
strand = 0
#ofhd.write("%s\t%d\t%d\t%s_%s_%d\t%.2f\t%s\n" % (chromosome,loc-1,loc+motif_len-1,motif,chromosome,i,score,strand))
if score > cutoff:
#print score,cutoff
n+=1
motif_range_list.add_loc(chromosome,loc-1,strand)
#print loc-1
if LOG : sys.stdout.write("\n")
data = None
motif_range_list.merge_overlap()
return motif_range_list
def motif_count (track, motif_track):
"""Count how many motif discovered in a given track.
"""
return track.include(motif_track)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay import transform
from tvm.relay.testing import run_opt_pass
def test_fuse_simple():
"""Simple testcase."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
return relay.Function([x], w)
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.exp(y)
w = relay.squeeze(z)
f1 = relay.Function([x], w)
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
return relay.Function([x], y)
z = before()
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_conv2d_fuse():
"""Test fusion case of conv2d"""
def before(dshape):
x = relay.var("x", shape=dshape)
x = relay.add(x, relay.const(1, "float32"))
y = relay.nn.conv2d(x, relay.var("w1"),
kernel_size=(3, 3),
padding=(1, 1),
channels=16)
# this is the next dominator.
y1 = relay.add(relay.const(1, "float32"), y)
y = relay.add(y, y1)
# second path
z2 = relay.nn.conv2d(y, relay.var("w2"),
kernel_size=(1, 1),
padding=(0,0),
channels=16)
z3 = relay.nn.conv2d(y, relay.var("w3"),
kernel_size=(3, 3),
padding=(1,1),
channels=16)
# add can only be fused to z1
z = relay.add(z2, z3)
return relay.Function(relay.analysis.free_vars(z), z)
def expected(dshape):
# segment 0
x = relay.var("p0", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
f0 = relay.Function([x], y)
# segment 1
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
y = relay.nn.conv2d(x, w,
kernel_size=(3, 3),
padding=(1, 1),
channels=16)
y1 = relay.add(relay.const(1, "float32"), y)
y = relay.add(y, y1)
f1 = relay.Function([x, w], y)
# segment 2
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
z2 = relay.nn.conv2d(x, w,
kernel_size=(3, 3),
padding=(1,1),
channels=16)
f2 = relay.Function([x, w], z2)
# segment 3
x = relay.var("p0", shape=dshape)
w = relay.var("p1")
offset = relay.var("p2", shape=dshape)
z3 = relay.nn.conv2d(x, w,
kernel_size=(1, 1),
padding=(0, 0),
channels=16)
z3 = relay.add(z3, offset)
f3 = relay.Function([x, w, offset], z3)
# compose
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
y = relay.Call(f1, [y, relay.var("w1")])
z2 = relay.Call(f2, [y, relay.var("w3")])
z3 = relay.Call(f3, [y, relay.var("w2"), z2])
z = z3
return relay.Function(relay.analysis.free_vars(z), z)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
after = run_opt_pass(expected(dshape), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_concatenate():
"""Test fusion case involving concat op and Tuple node"""
def before(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
upsampled = relay.nn.upsampling(pooled, scale=2, layout="NCHW")
concat = relay.concatenate((upsampled, x), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
f0 = relay.Function([x], pooled)
p0 = relay.var("p0", shape=(dshape[0], dshape[1], dshape[2]//2, dshape[3]//2))
p1 = relay.var("p1", shape=dshape)
upsampled = relay.nn.upsampling(p0, scale=2, layout="NCHW")
concat = relay.concatenate((upsampled, p1), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
f1 = relay.Function([p0, p1], out)
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y, x])
return relay.Function([x], z)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dshape), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_tuple_root():
"""Test fusion case where Tuple node is the root in its group"""
def before(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
upsampled = relay.nn.upsampling(pooled, scale=2, layout="NCHW")
out = relay.Tuple((upsampled, x))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(dshape):
x = relay.var("x", shape=dshape)
pooled = relay.nn.max_pool2d(x, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
f0 = relay.Function([x], pooled)
p0 = relay.var("p0", shape=(dshape[0], dshape[1], dshape[2]//2, dshape[3]//2))
upsampled = relay.nn.upsampling(p0, scale=2, layout="NCHW")
f1 = relay.Function([p0], upsampled)
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y])
tup = relay.Tuple((z, x))
return relay.Function([x], tup)
dshape = (1, 16, 64, 64)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dshape), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_stop_fusion():
def before(dshape):
x = relay.var("x", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
y = relay.annotation.stop_fusion(y)
z = relay.exp(y)
return relay.Function([x], z)
def expected(dshape):
x = relay.var("p0", shape=dshape)
y = relay.add(x, relay.const(1, "float32"))
f1 = relay.Function([x], y)
x = relay.var("p01", shape=dshape)
y = relay.exp(x)
f2 = relay.Function([x], y)
x = relay.var("x", shape=dshape)
y = relay.Call(f1, [x])
z = relay.Call(f2, [y])
return relay.Function([x], z)
dshape = (10, 20)
z = before(dshape)
zz = run_opt_pass(z, transform.FuseOps())
after = run_opt_pass(expected(dshape), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_fuse_myia_regression():
def before(dshape, dtype):
x = relay.var('x', shape=dshape, dtype=dtype)
y = relay.var('y', shape=dshape, dtype=dtype)
sb = relay.ScopeBuilder()
with sb.if_scope(relay.op.greater(x, y)):
sb.ret(relay.Function([], x))
with sb.else_scope():
sb.ret(relay.Function([], y))
return relay.Function([x, y],
relay.Call(sb.get(), []))
def expected(dshape, dtype):
x = relay.var('x', shape=dshape, dtype=dtype)
y = relay.var('y', shape=dshape, dtype=dtype)
sb = relay.ScopeBuilder()
p1 = relay.var('p1', shape=dshape, dtype=dtype)
p2 = relay.var('p2', shape=dshape, dtype=dtype)
fused_gt = relay.Function([p1, p2],
relay.op.greater(p1, p2))
with sb.if_scope(fused_gt(x, y)):
sb.ret(relay.Function([], x))
with sb.else_scope():
sb.ret(relay.Function([], y))
return relay.Function([x, y],
relay.Call(sb.get(), []))
dshape = ()
dtype = 'int64'
f = before(dshape, dtype)
zz = run_opt_pass(f, transform.FuseOps())
after = run_opt_pass(expected(dshape, dtype), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_fuse_tuple_get_elemwise():
def before(dim):
X = relay.var("X", shape=(1, dim))
W = relay.var("W", shape=(3 * dim, dim))
matmul = relay.nn.dense(X, W)
splitted = relay.split(matmul, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
return relay.Function([X, W], out)
def expected(dim):
p0 = relay.var("p0", shape=(1, dim))
p1 = relay.var("p1", shape=(3 * dim, dim))
matmul = relay.nn.dense(p0, p1)
f0 = relay.Function([p0, p1], matmul)
p01 = relay.var("p01", shape=(1, 3 * dim))
splitted = relay.split(p01, indices_or_sections=3, axis=1)
out = relay.sigmoid(splitted[0]) + relay.tanh(splitted[1]) * relay.exp(splitted[2])
f1 = relay.Function([p01], out)
X = relay.var("X", shape=(1, dim))
W = relay.var("W", shape=(3 * dim, dim))
y = relay.Call(f0, [X, W])
z = relay.Call(f1, [y])
return relay.Function([X, W], z)
dim = 10
z = before(dim)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dim), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
def test_tuple_get_root():
def before(dim):
X = relay.var("X", shape=(1, 3 * dim))
W = relay.var("W", shape=(dim, dim))
splitted = relay.split(X, indices_or_sections=3, axis=1)
out = relay.nn.dense(splitted[0], W)
return relay.Function([X, W], out)
def expected(dim):
p0 = relay.var("p0", shape=(1, 3 * dim))
splitted = relay.split(p0, indices_or_sections=3, axis=1)
out = splitted[0]
f0 = relay.Function([p0], out)
p01 = relay.var("p01", shape=(1, dim))
p1 = relay.var("p1", shape=(dim, dim))
out = relay.nn.dense(p01, p1)
f1 = relay.Function([p01, p1], out)
X = relay.var("X", shape=(1, 3 * dim))
W = relay.var("W", shape=(dim, dim))
y = relay.Call(f0, [X])
z = relay.Call(f1, [y, W])
return relay.Function([X, W], z)
dim = 10
z = before(dim)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(dim), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
fuse0 = relay.transform.FuseOps(fuse_opt_level=0)
fuse2 = relay.transform.FuseOps(fuse_opt_level=2)
def test_tuple_intermediate():
def before(x):
inj = relay.squeeze(x)
y1 = relay.add(inj, relay.const(1, "float32"))
tmp = relay.squeeze(inj)
tmp = relay.add(tmp, relay.const(1, "float32"))
y2 = relay.add(tmp, relay.const(1, "float32"))
y3 = relay.add(inj, relay.const(1, "float32"))
concat = relay.concatenate((y1, y2, y3), axis=1)
out_inj = relay.squeeze(concat)
out = relay.add(out_inj, relay.const(1, "float32"))
return relay.Function(relay.analysis.free_vars(out), out)
def expected(p0):
f0 = before(p0)
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
return relay.Function([x], y)
dshape = (1, 16, 64, 64)
x = relay.var("x", shape=dshape)
orig = before(x)
fuse0(relay.Module.from_expr(orig))
m = fuse2(relay.Module.from_expr(orig))
relay.build(m, 'llvm')
after = run_opt_pass(expected(x), transform.InferType())
assert relay.analysis.alpha_equal(m["main"], after)
def test_tuple_consecutive():
def gen_intermediate_tuple(x):
y1 = relay.add(x, relay.const(1, "float32"))
y2 = relay.add(x, relay.const(1, "float32"))
y3 = relay.add(x, relay.const(1, "float32"))
concat = relay.concatenate((y1, y2, y3), axis=1)
out = relay.add(concat, relay.const(1, "float32"))
return out
def gen_consecutive_tuple(x):
y1 = gen_intermediate_tuple(x)
y2 = gen_intermediate_tuple(x)
y3 = gen_intermediate_tuple(x)
concat = relay.concatenate((y1, y2, y3), axis=1)
return concat
def before(x):
concat = gen_consecutive_tuple(x)
pooled = relay.nn.max_pool2d(concat, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
out = relay.add(pooled, relay.const(1, "float32"))
out2 = relay.add(out, relay.const(1, "float32"))
out_tup = relay.Tuple((out, out2))
return relay.Function(relay.analysis.free_vars(out_tup), out_tup)
def expected(dshape):
p0 = relay.var("p0", shape=dshape)
concat = gen_consecutive_tuple(p0)
f0 = relay.Function([p0], concat)
p01 = relay.var("p01", shape=(1, dshape[1]*9, dshape[2], dshape[3]))
pooled = relay.nn.max_pool2d(p01, pool_size=(2, 2), strides=(2, 2), padding=(0, 0))
out = relay.add(pooled, relay.const(1, "float32"))
f1 = relay.Function([p01], out)
p02 = relay.var("p02", shape=(1, dshape[1]*9, dshape[2]//2, dshape[3]//2))
out = relay.add(p02, relay.const(1, "float32"))
f2 = relay.Function([p02], out)
x = relay.var("x", shape=dshape)
y = relay.Call(f0, [x])
z = relay.Call(f1, [y])
z2 = relay.Call(f2, [z])
return relay.Function([x], relay.Tuple((z, z2)))
dshape = (1, 16, 64, 64)
x = relay.var("x", shape=dshape)
orig = before(x)
fuse0(relay.Module.from_expr(orig))
m = fuse2(relay.Module.from_expr(orig))
relay.build(m, 'llvm')
after = run_opt_pass(expected(dshape), transform.InferType())
assert relay.analysis.alpha_equal(m["main"], after)
def test_inception_like():
def conv(data):
y = relay.nn.conv2d(data, relay.var("w"),
kernel_size=(3, 3),
padding=(1, 1),
channels=16)
return relay.nn.relu(data=y)
def inception_like(data):
c0 = conv(data)
c1 = conv(data)
return relay.concatenate((c0, c1), axis=1)
def before(dshape):
x = relay.var("x", shape=dshape)
in1 = inception_like(x)
in2 = inception_like(in1)
return relay.Function(relay.analysis.free_vars(in2), in2)
def expected(dshape):
p0 = relay.var("p0", shape=dshape)
c = conv(p0)
f0 = relay.Function(relay.analysis.free_vars(c), c)
p01 = relay.var("p01", shape=dshape)
c = conv(p01)
f1 = relay.Function(relay.analysis.free_vars(c), c)
p02 = relay.var("p02", shape=dshape)
p12 = relay.var("p12", shape=dshape)
concat1 = relay.concatenate((p02, p12), axis=1)
f_concat1 = relay.Function([p02, p12], concat1)
dshape2 = (dshape[0], dshape[1]*2, dshape[2], dshape[3])
p03 = relay.var("p03", shape=dshape2)
c = conv(p03)
f2 = relay.Function(relay.analysis.free_vars(c), c)
p04 = relay.var("p04", shape=dshape2)
c = conv(p04)
f3 = relay.Function(relay.analysis.free_vars(c), c)
p05 = relay.var("p05", shape=dshape)
p15 = relay.var("p15", shape=dshape)
concat2 = relay.concatenate((p05, p15), axis=1)
f_concat2 = relay.Function([p05, p15], concat2)
x = relay.var("x", shape=dshape)
c1 = relay.Call(f0, [x, relay.var("w1")])
c2 = relay.Call(f1, [x, relay.var("w2")])
concat = relay.Call(f_concat1, [c1, c2])
c3 = relay.Call(f2, [concat, relay.var("w3")])
c4 = relay.Call(f3, [concat, relay.var("w4")])
out = relay.Call(f_concat2, [c3, c4])
return relay.Function(relay.analysis.free_vars(out), out)
dshape = (1, 16, 64, 64)
orig = before(dshape)
fuse0(relay.Module.from_expr(orig))
m = fuse2(relay.Module.from_expr(orig))
relay.build(m, 'llvm')
after = run_opt_pass(expected(dshape), transform.InferType())
assert relay.analysis.alpha_equal(m["main"], after)
def test_fuse_parallel_injective():
"""Test fusing parallel injective ops to an elemwise op."""
def before():
x = relay.var("x", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.squeeze(y)
u = relay.transpose(y, axes=[0, 1])
w = relay.left_shift(z, u)
return relay.Function([x], w)
def expected():
x = relay.var("p", shape=(10, 20))
y = relay.add(x, relay.const(1, "float32"))
z = relay.squeeze(y)
u = relay.transpose(y, axes=[0, 1])
w = relay.left_shift(z, u)
f1 = relay.Function([x], w)
x = relay.var("x", shape=(10, 20))
y = relay.Call(f1, [x])
return relay.Function([x], y)
z = before()
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=0))
assert not relay.analysis.free_vars(zz)
zz = run_opt_pass(z, transform.FuseOps(fuse_opt_level=2))
assert not relay.analysis.free_vars(zz)
after = run_opt_pass(expected(), transform.InferType())
assert relay.analysis.alpha_equal(zz, after)
if __name__ == "__main__":
test_fuse_simple()
test_conv2d_fuse()
test_concatenate()
test_tuple_root()
test_stop_fusion()
test_fuse_myia_regression()
test_fuse_tuple_get_elemwise()
test_tuple_get_root()
test_tuple_intermediate()
test_tuple_consecutive()
test_inception_like()
test_fuse_parallel_injective()
|
|
# -*- coding: utf-8 -*-
"""
checkout
~~~~~~~~~~~~
Python wrapper for the Checkout Finland API.
Copyright (c) 2014 by Tuomas Blomqvist.
Copyright (c) 2013 by Janne Vanhala.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import hashlib
import hmac
import xml.etree.ElementTree as ET
import requests
__version__ = '0.2.0'
try:
text_type = unicode # This is Py2
except NameError: # Must be Py3
text_type = str
def join_as_bytes(joiner, bits, encoding="ascii"):
joined_unicode = text_type(joiner).join(text_type(bit) for bit in bits)
return joined_unicode.encode(encoding)
class CheckoutException(Exception):
"""This exception is raised when the request made to the Checkout API
is invalid, or some other error occurs in the usage of the API."""
def __init__(self, message):
#: An error description of the error in chosen localization. This error
#: description is not meant to be displayed to the end-user.
self.message = message
class Contact(object):
"""This class represents the payer of a payment.
Contact details are optional """
def __init__(self, **options):
#: Payer's first name.
self.first_name = options.get('first_name', '')[0:40]
#: Payer's surname.
self.last_name = options.get('last_name', '')[0:40]
#: Payer's email address.
self.email = options.get('email', '')[0:200]
#: Payer's telephone number.
self.phone = options.get('phone', '')[0:30]
#: Payer's street address.
self.address = options.get('address', '')[0:40]
#: Payer's postal code.
self.postcode = options.get('postcode', '')[0:14]
#: Payer's post office.
self.postoffice = options.get('postoffice', '')[0:18]
#: Payer's country. 3-letter ISO code.
self.country = options.get('country', '')[0:3]
@property
def dict(self):
"""Dict of this contact in fields specified by Checkout API and clipped accordingly."""
return {
'PHONE': self.phone,
'EMAIL': self.email,
'FIRSTNAME': self.first_name,
'FAMILYNAME': self.last_name,
'ADDRESS': self.address,
'POSTCODE': self.postcode,
'POSTOFFICE': self.postoffice,
'COUNTRY': self.country
}
class Payment(object):
def __init__(self, order_number, reference_number, amount, delivery_date, return_url, cancel_url, **options):
#: Order number is a string of characters identifying the customer's
#: purchase and the used webshop software creates it. Mandatory.
if len(order_number) > 20:
raise CheckoutException("order_number over maximum allowed 20 characters")
else:
self.order_number = order_number
#: Reference number is sent to bank by default and is automatically
#: created. In those payment methods that are used as an interface,
#: this field can contain own reference number, which is sent to the
#: bank service instead of the automatically generated reference
#: number. Mandatory.
if len(reference_number) > 20:
raise CheckoutException("reference_number over maximum allowed 20 characters")
else:
self.reference_number = reference_number
#: Order amount in cents. Mandatory.
if len(amount) > 8:
raise CheckoutException("amount over maximum allowed 8 characters")
else:
self.amount = amount
#: Delivery date of order in format YYYYMMDD. Mandatory
if len(delivery_date) > 8:
raise CheckoutException("delivery_date over maximum allowed 8 characters")
else:
self.delivery_date = delivery_date
#: Any data about the order in text format can be sent to the payment
#: system. They are shown in the Merchant's Panel in payment details. Optional.
self.message = options.get('message', '')[0:1000]
#: Payment currency. Value must EUR for the Finnish banks, otherwise
#: the payment will not be accepted. Mandatory, defaults to 'EUR'.
self.currency = options.get('currency', 'EUR')
#: Language defines default language for the payment method
#: selection page. Optional, 2-letter ISO code.
self.language = options.get('language', 'FI')
#: Contact object for the Payment. Optional, if supplied with None blank contact is used.
self.contact = options.get('contact', Contact())
#: Payment content. "1" for normal content and "10" for adult content. Mandatory, default 1.
self.content = options.get('content', '1')[0:2]
#: URL to which user is redirected after a successful payment. Mandatory.
if len(return_url) > 300:
raise CheckoutException("return_url over maximum allowed 300 characters")
else:
self.return_url = return_url
#: URL to which user is redirected after a cancelled or failed payment. Mandatory.
if len(cancel_url) > 300:
raise CheckoutException("cancel_url over maximum allowed 300 characters")
else:
self.cancel_url = cancel_url
#: URL to which user is directed, if the payment is pending.
#: After the actual payment, the payment acknowledged as received by Checkout
#: with fetching this URL along with same parameters as used in normal return_url.
#: Optional.
self.delayed_url = options.get('delayed_url', '')
if len(self.delayed_url) > 300:
raise CheckoutException("delayed_url over maximum allowed 300 characters")
#: URL requested when the payment is marked as rejected. The URL is
#: requested with the same GET parameters as return address when the
#: payment is made. Optional.
self.reject_url = options.get('reject_url', '')
if len(self.reject_url) > 300:
raise CheckoutException("reject_url over maximum allowed 300 characters")
@property
def currency(self):
return self._currency
@currency.setter
def currency(self, value):
if value != 'EUR':
raise CheckoutException("Currently EUR is the only supported currency.")
self._currency = value
@property
def language(self):
return self._language
@language.setter
def language(self, value):
if value not in ('FI', 'SE', 'EN'):
raise CheckoutException("Given language is not supported: %r" % value)
self._language = value
@property
def dict(self):
returndict = {
'VERSION': "0001", #: Version of the API.
'STAMP': self.order_number,
'AMOUNT': self.amount,
'REFERENCE': self.reference_number,
'MESSAGE': self.message,
'LANGUAGE': self.language,
'RETURN': self.return_url,
'CANCEL': self.cancel_url,
'REJECT': self.reject_url,
'DELAYED': self.delayed_url,
'CURRENCY': self.currency,
'CONTENT': self.content,
'TYPE': "0", #: Static field.
'ALGORITHM': "3", #: Return AUTHCODE algorithm, "3" for HMAC-SHA256.
'DELIVERY_DATE': self.delivery_date
}
#: Merge with Contact values
returndict.update(self.contact.dict)
return returndict
class Checkout(object):
SERVICE_URL = "https://payment.checkout.fi/"
def __init__(self, merchant_id='375917',
merchant_secret='SAIPPUAKAUPPIAS'):
"""
Initialize Checkout with your own merchant id and merchant secret.
:param merchant_id: Mercant ID is given to you by Checkout
when you make the contract. Default is the test merchant_id.
:param merchant_secret: Merchant secret is given to you by Checkout.
Default is the test merchant_secret.
"""
self.merchant_id = merchant_id
self.merchant_secret = merchant_secret
self.session = requests.Session()
def get_onsite_button_data(self, payment):
"""
Creates a new payment and returns a `list` with the following data for each payment method:
{ 'bank': bankname, 'url': posturl, 'icon': iconurl, formfields: {} }
:param payment: a `Payment` object
"""
postdict = payment.dict
postdict['MERCHANT'] = self.merchant_id
postdict['DEVICE'] = "10" #: "10" to get XML data for payment methods back
postdict['MAC'] = self._calculate_payment_md5(postdict, self.merchant_secret)
response = self.session.post(self.SERVICE_URL, data=postdict)
return self.parse_xml_response(response.content)
def get_offsite_button_data(self, payment):
"""
Returns form fields for off-page payment where user is sent to checkout.fi and shown
all the payment options there instead of showing them onsite.
:param payment: a `Payment` object
"""
paymentdict = payment.dict
paymentdict['MERCHANT'] = self.merchant_id
paymentdict['DEVICE'] = "1" #: "1" to get payment method selection form from Checkout.fi
paymentdict['MAC'] = self._calculate_payment_md5(paymentdict, self.merchant_secret)
return paymentdict
def parse_xml_response(self, xmlraw):
"""
Parses XML-response for onsite payment method
:param xmlraw: Raw XML data returned by checkout.fi
"""
payment_list = []
XML = ET.fromstring(xmlraw)
banks = XML.findall(".//payment/banks/*")
for bank in banks:
bankdict = dict(bank.items())
fielddict = {}
for fieldname in bank:
fielddict[fieldname.tag] = fieldname.text
bankdict["fields"] = fielddict
payment_list.append(bankdict)
return payment_list
def _calculate_payment_md5(self, params, merchant_secret):
fields = [params["VERSION"], params["STAMP"], params["AMOUNT"], params["REFERENCE"],
params["MESSAGE"], params["LANGUAGE"], params["MERCHANT"], params["RETURN"],
params["CANCEL"], params["REJECT"], params["DELAYED"], params["COUNTRY"],
params["CURRENCY"], params["DEVICE"], params["CONTENT"], params["TYPE"],
params["ALGORITHM"], params["DELIVERY_DATE"], params["FIRSTNAME"], params["FAMILYNAME"],
params["ADDRESS"], params["POSTCODE"], params["POSTOFFICE"], merchant_secret]
base = join_as_bytes("+", fields)
return hashlib.md5(base).hexdigest().upper()
def validate_payment_return(self, mac, version, order_number, order_reference, payment, status, algorithm):
"""
Validates parameters sent by Checkout Finland to the success/cancel URL or
delayed/reject URL after a payment. The parameters must be validated
in order to avoid hacking attempts to confirm payment. Returns `True`
when the parameters are valid, and `False` otherwise.
:param mac: A hash value calculated by payment system and sent to return url.
:param version: Payment version number. GET parameter 'VERSION'.
:param order_number: The same order number that was previously sent to
the payment system. GET parameter 'STAMP'.
:param order_reference: The same order reference that was previously sent
to the payment system. GET parameter 'REFERENCE'.
:param payment: A payment identified produced by Checkout Finland used
for calculating the hash.
:param status: Payment status code, which is part of payment
confirmation. '2'/'5'/'6'/'8'/'9'/'10' = success. '3' = delayed. '-1' = cancelled.
'7' = manual activation required. GET parameter 'STATUS'.
Specified here: http://checkout.fi/uploads/sopimukset/Checkout_1_4_rajapinta_api-v1.7.pdf
:param status: Payment return algorithm version. This library uses version 3.
GET parameter 'ALGORITHM'.
"""
fields = [version, order_number, order_reference, payment, status, algorithm]
base = join_as_bytes("&", fields)
key = text_type(self.merchant_secret).encode("ascii")
return mac == hmac.new(key, base, hashlib.sha256).hexdigest().upper()
|
|
import asyncio
from collections import namedtuple
import contextlib
import os
import subprocess
import tempfile
import textwrap
import yaml
from .async import create_subprocess_with_handle
from . import cache
from . import compat
from .compat import makedirs
from .error import PrintableError
DEFAULT_PARALLEL_FETCH_LIMIT = 10
DEBUG_PARALLEL_COUNT = 0
DEBUG_PARALLEL_MAX = 0
PluginDefinition = namedtuple(
'PluginDefinition',
['type', 'sync_exe', 'reup_exe', 'fields', 'required_fields',
'optional_fields', 'cache_fields'])
PluginContext = namedtuple(
'PluginContext',
['cwd', 'plugin_cache_root', 'parallelism_semaphore', 'plugin_cache_locks',
'tmp_root'])
@asyncio.coroutine
def plugin_fetch(plugin_context, module_type, module_fields, dest,
display_handle):
env = {'PERU_SYNC_DEST': dest}
yield from _plugin_job(plugin_context, module_type, module_fields, 'sync',
env, display_handle)
@asyncio.coroutine
def plugin_get_reup_fields(plugin_context, module_type, module_fields,
display_handle):
with tmp_dir(plugin_context) as output_file_dir:
output_path = os.path.join(output_file_dir, 'reup_output')
env = {'PERU_REUP_OUTPUT': output_path}
yield from _plugin_job(
plugin_context, module_type, module_fields, 'reup', env,
display_handle)
with open(output_path) as output_file:
fields = yaml.safe_load(output_file) or {}
for key, value in fields.items():
if not isinstance(key, str):
raise PluginModuleFieldError(
'reup field name must be a string: {}'.format(key))
if not isinstance(value, str):
raise PluginModuleFieldError(
'reup field value must be a string: {}'.format(value))
return fields
@asyncio.coroutine
def _plugin_job(plugin_context, module_type, module_fields, command, env,
display_handle):
# We take several locks and other context managers in here. Using an
# ExitStack saves us from indentation hell.
with contextlib.ExitStack() as stack:
definition = _get_plugin_definition(module_type, module_fields,
command)
exe = _get_plugin_exe(definition, command)
# For Windows to run scripts with the right interpreter, we need to run
# as a shell command, rather than exec.
shell_command_line = subprocess.list2cmdline([exe])
complete_env = _plugin_env(
plugin_context, definition, module_fields, command, stack)
complete_env.update(env)
# Use a lock to protect the plugin cache. It would be unsafe for two
# jobs to read/write to the same plugin cache dir at the same time. The
# lock (and the cache dir) are both keyed off the module's "cache
# fields" as defined by plugin.yaml. For plugins that don't define
# cacheable fields, there is no cache dir (it's set to /dev/null) and
# the cache lock is a no-op.
stack.enter_context((yield from _plugin_cache_lock(
plugin_context, definition, module_fields)))
# Use a semaphore to limit the number of jobs that can run in parallel.
# Most plugin fetches hit the network, and for performance reasons we
# don't want to fire off too many network requests at once. See
# DEFAULT_PARALLEL_FETCH_LIMIT. This also lets the user control
# parallelism with the --jobs flag. It's important that this is the
# last lock taken before starting a job, otherwise we might waste a job
# slot just waiting on other locks.
stack.enter_context((yield from plugin_context.parallelism_semaphore))
# We use this debug counter for our parallelism tests. It's important
# that it comes after all locks have been taken (so the job it's
# counting is actually running).
stack.enter_context(debug_parallel_count_context())
try:
yield from create_subprocess_with_handle(
shell_command_line, display_handle, cwd=plugin_context.cwd,
env=complete_env, shell=True)
except subprocess.CalledProcessError as e:
raise PluginRuntimeError(
module_type, module_fields, e.returncode, e.output)
def _get_plugin_exe(definition, command):
if command == 'sync':
exe = definition.sync_exe
elif command == 'reup':
exe = definition.reup_exe
else:
raise RuntimeError('Unrecognized command name: ' + repr(command))
if not os.path.exists(exe):
raise PluginPermissionsError('Plugin exe does not exist: ' + exe)
if not os.access(exe, os.X_OK):
raise PluginPermissionsError('Plugin exe is not executable: ' + exe)
return exe
def _format_module_fields(module_fields):
return {'PERU_MODULE_{}'.format(name.upper()): value for
name, value in module_fields.items()}
def _validate_plugin_definition(definition, module_fields):
field_names_not_strings = [name for name in definition.fields
if not isinstance(name, str)]
if field_names_not_strings:
raise PluginModuleFieldError(
'Metadata field names must be strings: ' +
', '.join(repr(name) for name in field_names_not_strings))
missing_module_fields = definition.required_fields - module_fields.keys()
if missing_module_fields:
raise PluginModuleFieldError(
'Required module field missing: ' +
', '.join(missing_module_fields))
unknown_module_fields = module_fields.keys() - definition.fields
if unknown_module_fields:
raise PluginModuleFieldError(
'Unknown module fields: ' + ', '.join(unknown_module_fields))
def _plugin_env(plugin_context, plugin_definition, module_fields, command,
exit_stack):
env = os.environ.copy()
# First, blank out all module field vars. This prevents the calling
# environment from leaking in when optional fields are undefined.
blank_module_vars = {field: '' for field in plugin_definition.fields}
env.update(_format_module_fields(blank_module_vars))
# Then add in the fields that are actually defined.
env.update(_format_module_fields(module_fields))
# Disable buffering by default in Python subprocesses. Without this,
# plugins would usually need to do something like
# print(..., flush=True)
# or else all their progress output would get held up in the stdout buffer
# until the plugin finally exited. Plugins in other languages will need to
# be careful about this.
env['PYTHONUNBUFFERED'] = 'true'
# For plugins that use the same exe for sync and reup, make the command
# name available in the environment.
env['PERU_PLUGIN_COMMAND'] = command
# Create a directory for plugins' temporary files.
env['PERU_PLUGIN_TMP'] = exit_stack.enter_context(tmp_dir(plugin_context))
# Create a persistent cache dir for saved files, like repo clones.
env['PERU_PLUGIN_CACHE'] = _plugin_cache_path(
plugin_context, plugin_definition, module_fields)
return env
@asyncio.coroutine
def _noop_lock():
return contextlib.ExitStack() # a no-op context manager
def _plugin_cache_lock(plugin_context, definition, module_fields):
if not definition.cache_fields:
# This plugin is not cacheable.
return _noop_lock()
key = _plugin_cache_key(definition, module_fields)
return plugin_context.plugin_cache_locks[key]
def _plugin_cache_path(plugin_context, definition, module_fields):
if not definition.cache_fields:
# This plugin is not cacheable.
return os.devnull
key = _plugin_cache_key(definition, module_fields)
plugin_cache = os.path.join(
plugin_context.plugin_cache_root, definition.type, key)
makedirs(plugin_cache)
return plugin_cache
def _plugin_cache_key(definition, module_fields):
assert definition.cache_fields, "Can't compute key for uncacheable type."
return cache.compute_key({
'type': definition.type,
'cacheable_fields': {field: module_fields.get(field, None)
for field in definition.cache_fields},
})
def _get_plugin_definition(module_type, module_fields, command):
root = _find_plugin_dir(module_type)
metadata_path = os.path.join(root, 'plugin.yaml')
if not os.path.isfile(metadata_path):
raise PluginMetadataMissingError(
'No metadata file found for plugin at path: {}'.format(root))
# Read the metadata document.
with open(metadata_path) as metafile:
metadoc = yaml.safe_load(metafile) or {}
sync_exe = os.path.join(root, metadoc.pop('sync exe'))
reup_exe = (None if 'reup exe' not in metadoc
else os.path.join(root, metadoc.pop('reup exe')))
required_fields = frozenset(metadoc.pop('required fields'))
optional_fields = frozenset(metadoc.pop('optional fields', []))
cache_fields = frozenset(metadoc.pop('cache fields', []))
fields = required_fields | optional_fields
# TODO: All of these checks need to be tested.
if metadoc:
raise RuntimeError('Unknown metadata in {} plugin: {}'.format(
module_type, metadoc))
overlap = required_fields & optional_fields
if overlap:
raise RuntimeError('Fields in {} are both required and optional: {}'
.format(module_type, overlap))
invalid = cache_fields - fields
if invalid:
raise RuntimeError(
'"cache fields" must also be either required or optional: ' +
str(invalid))
definition = PluginDefinition(
module_type, sync_exe, reup_exe, fields, required_fields,
optional_fields, cache_fields)
_validate_plugin_definition(definition, module_fields)
return definition
def _find_plugin_dir(module_type):
'''Find the directory containing the plugin definition for the given type.
Do this by searching all the paths where plugins can live for a dir that
matches the type name.'''
for install_dir in _get_plugin_install_dirs():
candidate = os.path.join(install_dir, module_type)
if os.path.isdir(candidate):
return candidate
else:
raise PluginCandidateError(
'No plugin found for `{}` module in paths:\n{}'.format(
module_type,
'\n'.join(_get_plugin_install_dirs())))
def _get_plugin_install_dirs():
'''Return all the places on the filesystem where we should look for plugin
definitions. Order is significant here: user-installed plugins should be
searched first, followed by system-installed plugins, and last of all peru
builtins.'''
builtins_dir = os.path.join(compat.MODULE_ROOT, 'resources', 'plugins')
if os.name == 'nt':
# Windows
local_data_dir = os.path.expandvars('%LOCALAPPDATA%')
program_files_dir = os.path.expandvars('%PROGRAMFILES%')
return (
os.path.join(local_data_dir, 'peru', 'plugins'),
os.path.join(program_files_dir, 'peru', 'plugins'),
builtins_dir,
)
else:
# non-Windows
default_config_dir = os.path.expanduser('~/.config')
config_dir = os.environ.get('XDG_CONFIG_HOME', default_config_dir)
return (
os.path.join(config_dir, 'peru', 'plugins'),
'/usr/local/lib/peru/plugins',
'/usr/lib/peru/plugins',
builtins_dir,
)
def debug_assert_clean_parallel_count():
assert DEBUG_PARALLEL_COUNT == 0, \
"parallel count should be 0 but it's " + str(DEBUG_PARALLEL_COUNT)
@contextlib.contextmanager
def debug_parallel_count_context():
global DEBUG_PARALLEL_COUNT, DEBUG_PARALLEL_MAX
DEBUG_PARALLEL_COUNT += 1
DEBUG_PARALLEL_MAX = max(DEBUG_PARALLEL_COUNT, DEBUG_PARALLEL_MAX)
try:
yield
finally:
DEBUG_PARALLEL_COUNT -= 1
def tmp_dir(context):
return tempfile.TemporaryDirectory(dir=context.tmp_root)
class PluginCandidateError(PrintableError):
pass
class PluginCommandCandidateError(PrintableError):
pass
class PluginModuleFieldError(PrintableError):
pass
class PluginMetadataMissingError(PrintableError):
pass
class PluginPermissionsError(PrintableError):
pass
class PluginRuntimeError(PrintableError):
def __init__(self, type, fields, errorcode, output):
formatted_fields = '\n'.join(' {}: {}'.format(name, val)
for name, val in fields.items())
super().__init__(textwrap.dedent('''\
{} plugin exited with error code {}.
Fields:
{}
Output:
{}''').format(type, errorcode, formatted_fields, output))
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo.config import cfg
from six import moves
import sqlalchemy as sa
from neutron.common import constants as q_const
from neutron.common import exceptions as exc
from neutron.common import utils
from neutron.db import api as db_api
from neutron.db import model_base
from neutron.openstack.common import log
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import helpers
LOG = log.getLogger(__name__)
vlan_opts = [
cfg.ListOpt('network_vlan_ranges',
default=[],
help=_("List of <physical_network>:<vlan_min>:<vlan_max> or "
"<physical_network> specifying physical_network names "
"usable for VLAN provider and tenant networks, as "
"well as ranges of VLAN tags on each available for "
"allocation to tenant networks."))
]
cfg.CONF.register_opts(vlan_opts, "ml2_type_vlan")
class VlanAllocation(model_base.BASEV2):
"""Represent allocation state of a vlan_id on a physical network.
If allocated is False, the vlan_id on the physical_network is
available for allocation to a tenant network. If allocated is
True, the vlan_id on the physical_network is in use, either as a
tenant or provider network.
When an allocation is released, if the vlan_id for the
physical_network is inside the pool described by
VlanTypeDriver.network_vlan_ranges, then allocated is set to
False. If it is outside the pool, the record is deleted.
"""
__tablename__ = 'ml2_vlan_allocations'
physical_network = sa.Column(sa.String(64), nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False)
class VlanTypeDriver(helpers.TypeDriverHelper):
"""Manage state for VLAN networks with ML2.
The VlanTypeDriver implements the 'vlan' network_type. VLAN
network segments provide connectivity between VMs and other
devices using any connected IEEE 802.1Q conformant
physical_network segmented into virtual networks via IEEE 802.1Q
headers. Up to 4094 VLAN network segments can exist on each
available physical_network.
"""
def __init__(self):
super(VlanTypeDriver, self).__init__(VlanAllocation)
self._parse_network_vlan_ranges()
def _parse_network_vlan_ranges(self):
try:
self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
cfg.CONF.ml2_type_vlan.network_vlan_ranges)
# TODO(rkukura): Validate that each physical_network name
# is neither empty nor too long.
except Exception:
LOG.exception(_("Failed to parse network_vlan_ranges. "
"Service terminated!"))
sys.exit(1)
LOG.info(_("Network VLAN ranges: %s"), self.network_vlan_ranges)
def _sync_vlan_allocations(self):
session = db_api.get_session()
with session.begin(subtransactions=True):
# get existing allocations for all physical networks
allocations = dict()
allocs = (session.query(VlanAllocation).
with_lockmode('update'))
for alloc in allocs:
if alloc.physical_network not in allocations:
allocations[alloc.physical_network] = set()
allocations[alloc.physical_network].add(alloc)
# process vlan ranges for each configured physical network
for (physical_network,
vlan_ranges) in self.network_vlan_ranges.items():
# determine current configured allocatable vlans for
# this physical network
vlan_ids = set()
for vlan_min, vlan_max in vlan_ranges:
vlan_ids |= set(moves.xrange(vlan_min, vlan_max + 1))
# remove from table unallocated vlans not currently
# allocatable
if physical_network in allocations:
for alloc in allocations[physical_network]:
try:
# see if vlan is allocatable
vlan_ids.remove(alloc.vlan_id)
except KeyError:
# it's not allocatable, so check if its allocated
if not alloc.allocated:
# it's not, so remove it from table
LOG.debug(_("Removing vlan %(vlan_id)s on "
"physical network "
"%(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network':
physical_network})
session.delete(alloc)
del allocations[physical_network]
# add missing allocatable vlans to table
for vlan_id in sorted(vlan_ids):
alloc = VlanAllocation(physical_network=physical_network,
vlan_id=vlan_id,
allocated=False)
session.add(alloc)
# remove from table unallocated vlans for any unconfigured
# physical networks
for allocs in allocations.itervalues():
for alloc in allocs:
if not alloc.allocated:
LOG.debug(_("Removing vlan %(vlan_id)s on physical "
"network %(physical_network)s from pool"),
{'vlan_id': alloc.vlan_id,
'physical_network':
alloc.physical_network})
session.delete(alloc)
def get_type(self):
return p_const.TYPE_VLAN
def initialize(self):
self._sync_vlan_allocations()
LOG.info(_("VlanTypeDriver initialization complete"))
def is_partial_segment(self, segment):
return segment.get(api.SEGMENTATION_ID) is None
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
segmentation_id = segment.get(api.SEGMENTATION_ID)
if physical_network:
if physical_network not in self.network_vlan_ranges:
msg = (_("physical_network '%s' unknown "
" for VLAN provider network") % physical_network)
raise exc.InvalidInput(error_message=msg)
if segmentation_id:
if not utils.is_valid_vlan_tag(segmentation_id):
msg = (_("segmentation_id out of range (%(min)s through "
"%(max)s)") %
{'min': q_const.MIN_VLAN_TAG,
'max': q_const.MAX_VLAN_TAG})
raise exc.InvalidInput(error_message=msg)
elif segmentation_id:
msg = _("segmentation_id requires physical_network for VLAN "
"provider network")
raise exc.InvalidInput(error_message=msg)
for key, value in segment.items():
if value and key not in [api.NETWORK_TYPE,
api.PHYSICAL_NETWORK,
api.SEGMENTATION_ID]:
msg = _("%s prohibited for VLAN provider network") % key
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
filters = {}
physical_network = segment.get(api.PHYSICAL_NETWORK)
if physical_network is not None:
filters['physical_network'] = physical_network
vlan_id = segment.get(api.SEGMENTATION_ID)
if vlan_id is not None:
filters['vlan_id'] = vlan_id
if self.is_partial_segment(segment):
alloc = self.allocate_partially_specified_segment(
session, **filters)
if not alloc:
raise exc.NoNetworkAvailable
else:
alloc = self.allocate_fully_specified_segment(
session, **filters)
if not alloc:
raise exc.VlanIdInUse(**filters)
return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: alloc.physical_network,
api.SEGMENTATION_ID: alloc.vlan_id}
def allocate_tenant_segment(self, session):
alloc = self.allocate_partially_specified_segment(session)
if not alloc:
return
return {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: alloc.physical_network,
api.SEGMENTATION_ID: alloc.vlan_id}
def release_segment(self, session, segment):
physical_network = segment[api.PHYSICAL_NETWORK]
vlan_id = segment[api.SEGMENTATION_ID]
ranges = self.network_vlan_ranges.get(physical_network, [])
inside = any(lo <= vlan_id <= hi for lo, hi in ranges)
with session.begin(subtransactions=True):
query = (session.query(VlanAllocation).
filter_by(physical_network=physical_network,
vlan_id=vlan_id))
if inside:
count = query.update({"allocated": False})
if count:
LOG.debug("Releasing vlan %(vlan_id)s on physical "
"network %(physical_network)s to pool",
{'vlan_id': vlan_id,
'physical_network': physical_network})
else:
count = query.delete()
if count:
LOG.debug("Releasing vlan %(vlan_id)s on physical "
"network %(physical_network)s outside pool",
{'vlan_id': vlan_id,
'physical_network': physical_network})
if not count:
LOG.warning(_("No vlan_id %(vlan_id)s found on physical "
"network %(physical_network)s"),
{'vlan_id': vlan_id,
'physical_network': physical_network})
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for testing `LinearOperator` and sub-classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import itertools
import numpy as np
import six
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.platform import test
class OperatorShapesInfo(object):
"""Object encoding expected shape for a test.
Encodes the expected shape of a matrix for a test. Also
allows additional metadata for the test harness.
"""
def __init__(self, shape, **kwargs):
self.shape = shape
self.__dict__.update(kwargs)
class CheckTapeSafeSkipOptions(object):
# Skip checking this particular method.
DETERMINANT = "determinant"
DIAG_PART = "diag_part"
LOG_ABS_DETERMINANT = "log_abs_determinant"
TRACE = "trace"
@six.add_metaclass(abc.ABCMeta) # pylint: disable=no-init
class LinearOperatorDerivedClassTest(test.TestCase):
"""Tests for derived classes.
Subclasses should implement every abstractmethod, and this will enable all
test methods to work.
"""
# Absolute/relative tolerance for tests.
_atol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
_rtol = {
dtypes.float16: 1e-3,
dtypes.float32: 1e-6,
dtypes.float64: 1e-12,
dtypes.complex64: 1e-6,
dtypes.complex128: 1e-12
}
def assertAC(self, x, y):
"""Derived classes can set _atol, _rtol to get different tolerance."""
dtype = dtypes.as_dtype(x.dtype)
atol = self._atol[dtype]
rtol = self._rtol[dtype]
self.assertAllClose(x, y, atol=atol, rtol=rtol)
@staticmethod
def adjoint_options():
return [False, True]
@staticmethod
def adjoint_arg_options():
return [False, True]
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@staticmethod
def use_placeholder_options():
return [False, True]
@staticmethod
def operator_shapes_infos():
"""Returns list of OperatorShapesInfo, encapsulating the shape to test."""
raise NotImplementedError("operator_shapes_infos has not been implemented.")
@abc.abstractmethod
def operator_and_matrix(
self, shapes_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
"""Build a batch matrix and an Operator that should have similar behavior.
Every operator acts like a (batch) matrix. This method returns both
together, and is used by tests.
Args:
shapes_info: `OperatorShapesInfo`, encoding shape information about the
operator.
dtype: Numpy dtype. Data type of returned array/operator.
use_placeholder: Python bool. If True, initialize the operator with a
placeholder of undefined shape and correct dtype.
ensure_self_adjoint_and_pd: If `True`,
construct this operator to be Hermitian Positive Definite, as well
as ensuring the hints `is_positive_definite` and `is_self_adjoint`
are set.
This is useful for testing methods such as `cholesky`.
Returns:
operator: `LinearOperator` subclass instance.
mat: `Tensor` representing operator.
"""
# Create a matrix as a numpy array with desired shape/dtype.
# Create a LinearOperator that should have the same behavior as the matrix.
raise NotImplementedError("Not implemented yet.")
@abc.abstractmethod
def make_rhs(self, operator, adjoint, with_batch=True):
"""Make a rhs appropriate for calling operator.solve(rhs).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making a 'rhs' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `rhs` with the same batch
shape as operator, and otherwise create a matrix without any batch
shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_rhs is not defined.")
@abc.abstractmethod
def make_x(self, operator, adjoint, with_batch=True):
"""Make an 'x' appropriate for calling operator.matmul(x).
Args:
operator: A `LinearOperator`
adjoint: Python `bool`. If `True`, we are making an 'x' value for the
adjoint operator.
with_batch: Python `bool`. If `True`, create `x` with the same batch shape
as operator, and otherwise create a matrix without any batch shape.
Returns:
A `Tensor`
"""
raise NotImplementedError("make_x is not defined.")
@staticmethod
def skip_these_tests():
"""List of test names to skip."""
# Subclasses should over-ride if they want to skip some tests.
# To skip "test_foo", add "foo" to this list.
return []
def assertRaisesError(self, msg):
"""assertRaisesRegexp or OpError, depending on context.executing_eagerly."""
if context.executing_eagerly():
return self.assertRaisesRegexp(Exception, msg)
return self.assertRaisesOpError(msg)
def check_tape_safe(self, operator, skip_options=None):
"""Check gradients are not None w.r.t. operator.variables.
Meant to be called from the derived class.
This ensures grads are not w.r.t every variable in operator.variables. If
more fine-grained testing is needed, a custom test should be written.
Args:
operator: LinearOperator. Exact checks done will depend on hints.
skip_options: Optional list of CheckTapeSafeSkipOptions.
Makes this test skip particular checks.
"""
skip_options = skip_options or []
if not operator.variables:
raise AssertionError("`operator.variables` was empty")
def _assert_not_none(iterable):
for item in iterable:
self.assertIsNotNone(item)
# Tape tests that can be run on every operator below.
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.to_dense(), operator.variables))
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.adjoint().to_dense(), operator.variables))
x = math_ops.cast(
array_ops.ones(shape=operator.H.shape_tensor()[:-1]), operator.dtype)
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.matvec(x), operator.variables))
# Tests for square, but possibly non-singular operators below.
if not operator.is_square:
return
for option in [
CheckTapeSafeSkipOptions.DETERMINANT,
CheckTapeSafeSkipOptions.LOG_ABS_DETERMINANT,
CheckTapeSafeSkipOptions.DIAG_PART,
CheckTapeSafeSkipOptions.TRACE,
]:
with backprop.GradientTape() as tape:
if option not in skip_options:
_assert_not_none(
tape.gradient(getattr(operator, option)(), operator.variables))
# Tests for non-singular operators below.
if operator.is_non_singular is False: # pylint: disable=g-bool-id-comparison
return
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.inverse().to_dense(), operator.variables))
with backprop.GradientTape() as tape:
_assert_not_none(tape.gradient(operator.solvevec(x), operator.variables))
# Tests for SPD operators below.
if not (operator.is_self_adjoint and operator.is_positive_definite):
return
with backprop.GradientTape() as tape:
_assert_not_none(
tape.gradient(operator.cholesky().to_dense(), operator.variables))
# pylint:disable=missing-docstring
def _test_to_dense(use_placeholder, shapes_info, dtype):
def test_to_dense(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_dense = operator.to_dense()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_dense.shape)
op_dense_v, mat_v = sess.run([op_dense, mat])
self.assertAC(op_dense_v, mat_v)
return test_to_dense
def _test_det(use_placeholder, shapes_info, dtype):
def test_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_det = operator.determinant()
if not use_placeholder:
self.assertAllEqual(shapes_info.shape[:-2], op_det.shape)
op_det_v, mat_det_v = sess.run(
[op_det, linalg_ops.matrix_determinant(mat)])
self.assertAC(op_det_v, mat_det_v)
return test_det
def _test_log_abs_det(use_placeholder, shapes_info, dtype):
def test_log_abs_det(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_log_abs_det = operator.log_abs_determinant()
_, mat_log_abs_det = linalg.slogdet(mat)
if not use_placeholder:
self.assertAllEqual(
shapes_info.shape[:-2], op_log_abs_det.shape)
op_log_abs_det_v, mat_log_abs_det_v = sess.run(
[op_log_abs_det, mat_log_abs_det])
self.assertAC(op_log_abs_det_v, mat_log_abs_det_v)
return test_log_abs_det
def _test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
x = self.make_x(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, compute A X^H^H = A X.
if adjoint_arg:
op_matmul = operator.matmul(
linalg.adjoint(x),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_matmul = operator.matmul(x, adjoint=adjoint)
mat_matmul = math_ops.matmul(mat, x, adjoint_a=adjoint)
if not use_placeholder:
self.assertAllEqual(op_matmul.shape,
mat_matmul.shape)
op_matmul_v, mat_matmul_v = sess.run(
[op_matmul, mat_matmul])
self.assertAC(op_matmul_v, mat_matmul_v)
def _test_matmul(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg):
def test_matmul(self):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=True)
return test_matmul
def _test_matmul_with_broadcast(
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg):
def test_matmul_with_broadcast(self):
_test_matmul_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=True)
return test_matmul_with_broadcast
def _test_adjoint(use_placeholder, shapes_info, dtype):
def test_adjoint(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_adjoint = operator.adjoint().to_dense()
op_adjoint_h = operator.H.to_dense()
mat_adjoint = linalg.adjoint(mat)
op_adjoint_v, op_adjoint_h_v, mat_adjoint_v = sess.run(
[op_adjoint, op_adjoint_h, mat_adjoint])
self.assertAC(mat_adjoint_v, op_adjoint_v)
self.assertAC(mat_adjoint_v, op_adjoint_h_v)
return test_adjoint
def _test_cholesky(use_placeholder, shapes_info, dtype):
def test_cholesky(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
op_chol = operator.cholesky().to_dense()
mat_chol = linalg_ops.cholesky(mat)
op_chol_v, mat_chol_v = sess.run([op_chol, mat_chol])
self.assertAC(mat_chol_v, op_chol_v)
return test_cholesky
def _test_eigvalsh(use_placeholder, shapes_info, dtype):
def test_eigvalsh(self):
with self.test_session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder,
ensure_self_adjoint_and_pd=True)
# Eigenvalues are real, so we'll cast these to float64 and sort
# for comparison.
op_eigvals = sort_ops.sort(
math_ops.cast(operator.eigvals(), dtype=dtypes.float64), axis=-1)
mat_eigvals = sort_ops.sort(
math_ops.cast(
linalg_ops.self_adjoint_eigvals(mat), dtype=dtypes.float64),
axis=-1)
op_eigvals_v, mat_eigvals_v = sess.run([op_eigvals, mat_eigvals])
atol = self._atol[dtype] # pylint: disable=protected-access
rtol = self._rtol[dtype] # pylint: disable=protected-access
if dtype == dtypes.float32 or dtype == dtypes.complex64:
atol = 1e-4
rtol = 1e-4
self.assertAllClose(op_eigvals_v, mat_eigvals_v, atol=atol, rtol=rtol)
return test_eigvalsh
def _test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch):
# If batch dimensions are omitted, but there are
# no batch dimensions for the linear operator, then
# skip the test case. This is already checked with
# with_batch=True.
if not with_batch and len(shapes_info.shape) <= 2:
return
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
rhs = self.make_rhs(
operator, adjoint=adjoint, with_batch=with_batch)
# If adjoint_arg, solve A X = (rhs^H)^H = rhs.
if adjoint_arg:
op_solve = operator.solve(
linalg.adjoint(rhs),
adjoint=adjoint,
adjoint_arg=adjoint_arg)
else:
op_solve = operator.solve(
rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)
mat_solve = linear_operator_util.matrix_solve_with_broadcast(
mat, rhs, adjoint=adjoint)
if not use_placeholder:
self.assertAllEqual(op_solve.shape,
mat_solve.shape)
op_solve_v, mat_solve_v = sess.run([op_solve, mat_solve])
self.assertAC(op_solve_v, mat_solve_v)
def _test_solve(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg):
def test_solve(self):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=True)
return test_solve
def _test_solve_with_broadcast(
use_placeholder, shapes_info, dtype, adjoint, adjoint_arg):
def test_solve_with_broadcast(self):
_test_solve_base(
self,
use_placeholder,
shapes_info,
dtype,
adjoint,
adjoint_arg,
with_batch=False)
return test_solve_with_broadcast
def _test_inverse(use_placeholder, shapes_info, dtype):
def test_inverse(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_inverse_v, mat_inverse_v = sess.run([
operator.inverse().to_dense(), linalg.inv(mat)])
self.assertAC(op_inverse_v, mat_inverse_v)
return test_inverse
def _test_trace(use_placeholder, shapes_info, dtype):
def test_trace(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_trace = operator.trace()
mat_trace = math_ops.trace(mat)
if not use_placeholder:
self.assertAllEqual(op_trace.shape, mat_trace.shape)
op_trace_v, mat_trace_v = sess.run([op_trace, mat_trace])
self.assertAC(op_trace_v, mat_trace_v)
return test_trace
def _test_add_to_tensor(use_placeholder, shapes_info, dtype):
def test_add_to_tensor(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_plus_2mat = operator.add_to_tensor(2 * mat)
if not use_placeholder:
self.assertAllEqual(shapes_info.shape, op_plus_2mat.shape)
op_plus_2mat_v, mat_v = sess.run([op_plus_2mat, mat])
self.assertAC(op_plus_2mat_v, 3 * mat_v)
return test_add_to_tensor
def _test_diag_part(use_placeholder, shapes_info, dtype):
def test_diag_part(self):
with self.session(graph=ops.Graph()) as sess:
sess.graph.seed = random_seed.DEFAULT_GRAPH_SEED
operator, mat = self.operator_and_matrix(
shapes_info, dtype, use_placeholder=use_placeholder)
op_diag_part = operator.diag_part()
mat_diag_part = array_ops.matrix_diag_part(mat)
if not use_placeholder:
self.assertAllEqual(mat_diag_part.shape,
op_diag_part.shape)
op_diag_part_, mat_diag_part_ = sess.run(
[op_diag_part, mat_diag_part])
self.assertAC(op_diag_part_, mat_diag_part_)
return test_diag_part
# pylint:enable=missing-docstring
def add_tests(test_cls):
"""Add tests for LinearOperator methods."""
test_name_dict = {
"add_to_tensor": _test_add_to_tensor,
"cholesky": _test_cholesky,
"det": _test_det,
"diag_part": _test_diag_part,
"eigvalsh": _test_eigvalsh,
"inverse": _test_inverse,
"log_abs_det": _test_log_abs_det,
"matmul": _test_matmul,
"matmul_with_broadcast": _test_matmul_with_broadcast,
"solve": _test_solve,
"solve_with_broadcast": _test_solve_with_broadcast,
"to_dense": _test_to_dense,
"trace": _test_trace,
}
tests_with_adjoint_args = [
"matmul",
"matmul_with_broadcast",
"solve",
"solve_with_broadcast",
]
for name, test_template_fn in test_name_dict.items():
if name in test_cls.skip_these_tests():
continue
for dtype, use_placeholder, shape_info in itertools.product(
test_cls.dtypes_to_test(),
test_cls.use_placeholder_options(),
test_cls.operator_shapes_infos()):
base_test_name = "_".join([
"test", name, "_shape={},dtype={},use_placeholder={}".format(
shape_info.shape, dtype, use_placeholder)])
if name in tests_with_adjoint_args:
for adjoint in test_cls.adjoint_options():
for adjoint_arg in test_cls.adjoint_arg_options():
test_name = base_test_name + ",adjoint={},adjoint_arg={}".format(
adjoint, adjoint_arg)
if hasattr(test_cls, test_name):
raise RuntimeError("Test %s defined more than once" % test_name)
setattr(
test_cls,
test_name,
test_util.run_deprecated_v1(test_template_fn(
use_placeholder,
shape_info,
dtype,
adjoint,
adjoint_arg)))
else:
if hasattr(test_cls, base_test_name):
raise RuntimeError("Test %s defined more than once" % base_test_name)
setattr(
test_cls,
base_test_name,
test_util.run_deprecated_v1(test_template_fn(
use_placeholder, shape_info, dtype)))
@six.add_metaclass(abc.ABCMeta)
class SquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for square operators.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((0, 0)),
shapes_info((1, 1)),
shapes_info((1, 3, 3)),
shapes_info((3, 4, 4)),
shapes_info((2, 1, 4, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# This operator is square, so rhs and x will have same shape.
# adjoint value makes no difference because the operator shape doesn't
# change since it is square, but be pedantic.
return self.make_x(operator, adjoint=not adjoint, with_batch=with_batch)
def make_x(self, operator, adjoint, with_batch=True):
# Value of adjoint makes no difference because the operator is square.
# Return the number of systems to solve, R, equal to 1 or 2.
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, N, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
@six.add_metaclass(abc.ABCMeta)
class NonSquareLinearOperatorDerivedClassTest(LinearOperatorDerivedClassTest):
"""Base test class appropriate for generic rectangular operators.
Square shapes are never tested by this class, so if you want to test your
operator with a square shape, create two test classes, the other subclassing
SquareLinearOperatorFullMatrixTest.
Sub-classes must still define all abstractmethods from
LinearOperatorDerivedClassTest that are not defined here.
"""
@staticmethod
def skip_these_tests():
"""List of test names to skip."""
return [
"cholesky",
"eigvalsh",
"inverse",
"solve",
"solve_with_broadcast",
"det",
"log_abs_det"
]
@staticmethod
def operator_shapes_infos():
shapes_info = OperatorShapesInfo
# non-batch operators (n, n) and batch operators.
return [
shapes_info((2, 1)),
shapes_info((1, 2)),
shapes_info((1, 3, 2)),
shapes_info((3, 3, 4)),
shapes_info((2, 1, 2, 4))]
def make_rhs(self, operator, adjoint, with_batch=True):
# TODO(langmore) Add once we're testing solve_ls.
raise NotImplementedError(
"make_rhs not implemented because we don't test solve")
def make_x(self, operator, adjoint, with_batch=True):
# Return the number of systems for the argument 'x' for .matmul(x)
r = self._get_num_systems(operator)
# If operator.shape = [B1,...,Bb, M, N] this returns a random matrix of
# shape [B1,...,Bb, N, R], R = 1 or 2.
if operator.shape.is_fully_defined():
batch_shape = operator.batch_shape.as_list()
if adjoint:
n = operator.range_dimension.value
else:
n = operator.domain_dimension.value
if with_batch:
x_shape = batch_shape + [n, r]
else:
x_shape = [n, r]
else:
batch_shape = operator.batch_shape_tensor()
if adjoint:
n = operator.range_dimension_tensor()
else:
n = operator.domain_dimension_tensor()
if with_batch:
x_shape = array_ops.concat((batch_shape, [n, r]), 0)
else:
x_shape = [n, r]
return random_normal(x_shape, dtype=operator.dtype)
def _get_num_systems(self, operator):
"""Get some number, either 1 or 2, depending on operator."""
if operator.tensor_rank is None or operator.tensor_rank % 2:
return 1
else:
return 2
def random_positive_definite_matrix(shape, dtype, force_well_conditioned=False):
"""[batch] positive definite matrix.
Args:
shape: `TensorShape` or Python list. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype.
force_well_conditioned: Python bool. If `True`, returned matrix has
eigenvalues with modulus in `(1, 4)`. Otherwise, eigenvalues are
chi-squared random variables.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
if not tensor_util.is_tensor(shape):
shape = tensor_shape.TensorShape(shape)
# Matrix must be square.
shape.dims[-1].assert_is_compatible_with(shape.dims[-2])
with ops.name_scope("random_positive_definite_matrix"):
tril = random_tril_matrix(
shape, dtype, force_well_conditioned=force_well_conditioned)
return math_ops.matmul(tril, tril, adjoint_b=True)
def random_tril_matrix(shape,
dtype,
force_well_conditioned=False,
remove_upper=True):
"""[batch] lower triangular matrix.
Args:
shape: `TensorShape` or Python `list`. Shape of the returned matrix.
dtype: `TensorFlow` `dtype` or Python dtype
force_well_conditioned: Python `bool`. If `True`, returned matrix will have
eigenvalues with modulus in `(1, 2)`. Otherwise, eigenvalues are unit
normal random variables.
remove_upper: Python `bool`.
If `True`, zero out the strictly upper triangle.
If `False`, the lower triangle of returned matrix will have desired
properties, but will not have the strictly upper triangle zero'd out.
Returns:
`Tensor` with desired shape and dtype.
"""
with ops.name_scope("random_tril_matrix"):
# Totally random matrix. Has no nice properties.
tril = random_normal(shape, dtype=dtype)
if remove_upper:
tril = array_ops.matrix_band_part(tril, -1, 0)
# Create a diagonal with entries having modulus in [1, 2].
if force_well_conditioned:
maxval = ops.convert_to_tensor(np.sqrt(2.), dtype=dtype.real_dtype)
diag = random_sign_uniform(
shape[:-1], dtype=dtype, minval=1., maxval=maxval)
tril = array_ops.matrix_set_diag(tril, diag)
return tril
def random_normal(shape, mean=0.0, stddev=1.0, dtype=dtypes.float32, seed=None):
"""Tensor with (possibly complex) Gaussian entries.
Samples are distributed like
```
N(mean, stddev^2), if dtype is real,
X + iY, where X, Y ~ N(mean, stddev^2) if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_normal"):
samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 1234
more_samples = random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) Uniform entries.
Samples are distributed like
```
Uniform[minval, maxval], if dtype is real,
X + iY, where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_uniform"):
samples = random_ops.random_uniform(
shape, dtype=dtype.real_dtype, minval=minval, maxval=maxval, seed=seed)
if dtype.is_complex:
if seed is not None:
seed += 12345
more_samples = random_ops.random_uniform(
shape,
dtype=dtype.real_dtype,
minval=minval,
maxval=maxval,
seed=seed)
samples = math_ops.complex(samples, more_samples)
return samples
def random_sign_uniform(shape,
minval=None,
maxval=None,
dtype=dtypes.float32,
seed=None):
"""Tensor with (possibly complex) random entries from a "sign Uniform".
Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
Samples from this `Op` are distributed like
```
Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
Z * (X + iY), where X, Y ~ Uniform[minval, maxval], if dtype is complex.
```
Args:
shape: `TensorShape` or Python list. Shape of the returned tensor.
minval: `0-D` `Tensor` giving the minimum values.
maxval: `0-D` `Tensor` giving the maximum values.
dtype: `TensorFlow` `dtype` or Python dtype
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
"""
dtype = dtypes.as_dtype(dtype)
with ops.name_scope("random_sign_uniform"):
unsigned_samples = random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
if seed is not None:
seed += 12
signs = math_ops.sign(
random_ops.random_uniform(shape, minval=-1., maxval=1., seed=seed))
return unsigned_samples * math_ops.cast(signs, unsigned_samples.dtype)
def random_normal_correlated_columns(shape,
mean=0.0,
stddev=1.0,
dtype=dtypes.float32,
eps=1e-4,
seed=None):
"""Batch matrix with (possibly complex) Gaussian entries and correlated cols.
Returns random batch matrix `A` with specified element-wise `mean`, `stddev`,
living close to an embedded hyperplane.
Suppose `shape[-2:] = (M, N)`.
If `M < N`, `A` is a random `M x N` [batch] matrix with iid Gaussian entries.
If `M >= N`, then the colums of `A` will be made almost dependent as follows:
```
L = random normal N x N-1 matrix, mean = 0, stddev = 1 / sqrt(N - 1)
B = random normal M x N-1 matrix, mean = 0, stddev = stddev.
G = (L B^H)^H, a random normal M x N matrix, living on N-1 dim hyperplane
E = a random normal M x N matrix, mean = 0, stddev = eps
mu = a constant M x N matrix, equal to the argument "mean"
A = G + E + mu
```
Args:
shape: Python list of integers.
Shape of the returned tensor. Must be at least length two.
mean: `Tensor` giving mean of normal to sample from.
stddev: `Tensor` giving stdev of normal to sample from.
dtype: `TensorFlow` `dtype` or numpy dtype
eps: Distance each column is perturbed from the low-dimensional subspace.
seed: Python integer seed for the RNG.
Returns:
`Tensor` with desired shape and dtype.
Raises:
ValueError: If `shape` is not at least length 2.
"""
dtype = dtypes.as_dtype(dtype)
if len(shape) < 2:
raise ValueError(
"Argument shape must be at least length 2. Found: %s" % shape)
# Shape is the final shape, e.g. [..., M, N]
shape = list(shape)
batch_shape = shape[:-2]
m, n = shape[-2:]
# If there is only one column, "they" are by definition correlated.
if n < 2 or n < m:
return random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
# Shape of the matrix with only n - 1 columns that we will embed in higher
# dimensional space.
smaller_shape = batch_shape + [m, n - 1]
# Shape of the embedding matrix, mapping batch matrices
# from [..., N-1, M] to [..., N, M]
embedding_mat_shape = batch_shape + [n, n - 1]
# This stddev for the embedding_mat ensures final result has correct stddev.
stddev_mat = 1 / np.sqrt(n - 1)
with ops.name_scope("random_normal_correlated_columns"):
smaller_mat = random_normal(
smaller_shape, mean=0.0, stddev=stddev_mat, dtype=dtype, seed=seed)
if seed is not None:
seed += 1287
embedding_mat = random_normal(embedding_mat_shape, dtype=dtype, seed=seed)
embedded_t = math_ops.matmul(embedding_mat, smaller_mat, transpose_b=True)
embedded = array_ops.matrix_transpose(embedded_t)
mean_mat = array_ops.ones_like(embedded) * mean
return embedded + random_normal(shape, stddev=eps, dtype=dtype) + mean_mat
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.