gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
"""
Utilities for interpreting CSS from Stylers for formatting non-HTML outputs.
"""
import re
from typing import Dict, Optional
import warnings
class CSSWarning(UserWarning):
"""
This CSS syntax cannot currently be parsed.
"""
def _side_expander(prop_fmt: str):
def expand(self, prop, value: str):
tokens = value.split()
try:
mapping = self.SIDE_SHORTHANDS[len(tokens)]
except KeyError:
warnings.warn(f'Could not expand "{prop}: {value}"', CSSWarning)
return
for key, idx in zip(self.SIDES, mapping):
yield prop_fmt.format(key), tokens[idx]
return expand
class CSSResolver:
"""
A callable for parsing and resolving CSS to atomic properties.
"""
UNIT_RATIOS = {
"rem": ("pt", 12),
"ex": ("em", 0.5),
# 'ch':
"px": ("pt", 0.75),
"pc": ("pt", 12),
"in": ("pt", 72),
"cm": ("in", 1 / 2.54),
"mm": ("in", 1 / 25.4),
"q": ("mm", 0.25),
"!!default": ("em", 0),
}
FONT_SIZE_RATIOS = UNIT_RATIOS.copy()
FONT_SIZE_RATIOS.update(
{
"%": ("em", 0.01),
"xx-small": ("rem", 0.5),
"x-small": ("rem", 0.625),
"small": ("rem", 0.8),
"medium": ("rem", 1),
"large": ("rem", 1.125),
"x-large": ("rem", 1.5),
"xx-large": ("rem", 2),
"smaller": ("em", 1 / 1.2),
"larger": ("em", 1.2),
"!!default": ("em", 1),
}
)
MARGIN_RATIOS = UNIT_RATIOS.copy()
MARGIN_RATIOS.update({"none": ("pt", 0)})
BORDER_WIDTH_RATIOS = UNIT_RATIOS.copy()
BORDER_WIDTH_RATIOS.update(
{
"none": ("pt", 0),
"thick": ("px", 4),
"medium": ("px", 2),
"thin": ("px", 1),
# Default: medium only if solid
}
)
SIDE_SHORTHANDS = {
1: [0, 0, 0, 0],
2: [0, 1, 0, 1],
3: [0, 1, 2, 1],
4: [0, 1, 2, 3],
}
SIDES = ("top", "right", "bottom", "left")
def __call__(
self,
declarations_str: str,
inherited: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
"""
The given declarations to atomic properties.
Parameters
----------
declarations_str : str
A list of CSS declarations
inherited : dict, optional
Atomic properties indicating the inherited style context in which
declarations_str is to be resolved. ``inherited`` should already
be resolved, i.e. valid output of this method.
Returns
-------
dict
Atomic CSS 2.2 properties.
Examples
--------
>>> resolve = CSSResolver()
>>> inherited = {'font-family': 'serif', 'font-weight': 'bold'}
>>> out = resolve('''
... border-color: BLUE RED;
... font-size: 1em;
... font-size: 2em;
... font-weight: normal;
... font-weight: inherit;
... ''', inherited)
>>> sorted(out.items()) # doctest: +NORMALIZE_WHITESPACE
[('border-bottom-color', 'blue'),
('border-left-color', 'red'),
('border-right-color', 'red'),
('border-top-color', 'blue'),
('font-family', 'serif'),
('font-size', '24pt'),
('font-weight', 'bold')]
"""
props = dict(self.atomize(self.parse(declarations_str)))
if inherited is None:
inherited = {}
props = self._update_initial(props, inherited)
props = self._update_font_size(props, inherited)
return self._update_other_units(props)
def _update_initial(
self,
props: Dict[str, str],
inherited: Dict[str, str],
) -> Dict[str, str]:
# 1. resolve inherited, initial
for prop, val in inherited.items():
if prop not in props:
props[prop] = val
new_props = props.copy()
for prop, val in props.items():
if val == "inherit":
val = inherited.get(prop, "initial")
if val in ("initial", None):
# we do not define a complete initial stylesheet
del new_props[prop]
else:
new_props[prop] = val
return new_props
def _update_font_size(
self,
props: Dict[str, str],
inherited: Dict[str, str],
) -> Dict[str, str]:
# 2. resolve relative font size
if props.get("font-size"):
props["font-size"] = self.size_to_pt(
props["font-size"],
self._get_font_size(inherited),
conversions=self.FONT_SIZE_RATIOS,
)
return props
def _get_font_size(self, props: Dict[str, str]) -> Optional[float]:
if props.get("font-size"):
font_size_string = props["font-size"]
return self._get_float_font_size_from_pt(font_size_string)
return None
def _get_float_font_size_from_pt(self, font_size_string: str) -> float:
assert font_size_string.endswith("pt")
return float(font_size_string.rstrip("pt"))
def _update_other_units(self, props: Dict[str, str]) -> Dict[str, str]:
font_size = self._get_font_size(props)
# 3. TODO: resolve other font-relative units
for side in self.SIDES:
prop = f"border-{side}-width"
if prop in props:
props[prop] = self.size_to_pt(
props[prop],
em_pt=font_size,
conversions=self.BORDER_WIDTH_RATIOS,
)
for prop in [f"margin-{side}", f"padding-{side}"]:
if prop in props:
# TODO: support %
props[prop] = self.size_to_pt(
props[prop],
em_pt=font_size,
conversions=self.MARGIN_RATIOS,
)
return props
def size_to_pt(self, in_val, em_pt=None, conversions=UNIT_RATIOS):
def _error():
warnings.warn(f"Unhandled size: {repr(in_val)}", CSSWarning)
return self.size_to_pt("1!!default", conversions=conversions)
match = re.match(r"^(\S*?)([a-zA-Z%!].*)", in_val)
if match is None:
return _error()
val, unit = match.groups()
if val == "":
# hack for 'large' etc.
val = 1
else:
try:
val = float(val)
except ValueError:
return _error()
while unit != "pt":
if unit == "em":
if em_pt is None:
unit = "rem"
else:
val *= em_pt
unit = "pt"
continue
try:
unit, mul = conversions[unit]
except KeyError:
return _error()
val *= mul
val = round(val, 5)
if int(val) == val:
size_fmt = f"{int(val):d}pt"
else:
size_fmt = f"{val:f}pt"
return size_fmt
def atomize(self, declarations):
for prop, value in declarations:
attr = "expand_" + prop.replace("-", "_")
try:
expand = getattr(self, attr)
except AttributeError:
yield prop, value
else:
for prop, value in expand(prop, value):
yield prop, value
expand_border_color = _side_expander("border-{:s}-color")
expand_border_style = _side_expander("border-{:s}-style")
expand_border_width = _side_expander("border-{:s}-width")
expand_margin = _side_expander("margin-{:s}")
expand_padding = _side_expander("padding-{:s}")
def parse(self, declarations_str: str):
"""
Generates (prop, value) pairs from declarations.
In a future version may generate parsed tokens from tinycss/tinycss2
Parameters
----------
declarations_str : str
"""
for decl in declarations_str.split(";"):
if not decl.strip():
continue
prop, sep, val = decl.partition(":")
prop = prop.strip().lower()
# TODO: don't lowercase case sensitive parts of values (strings)
val = val.strip().lower()
if sep:
yield prop, val
else:
warnings.warn(
f"Ill-formatted attribute: expected a colon in {repr(decl)}",
CSSWarning,
)
|
|
# -*- coding: utf-8 -*-
"""Unit testing for django-generic-confirmation."""
import time
from django import VERSION
from django import forms
from django.utils import timezone
from django.test import TestCase, override_settings
from django.test.client import Client
from django.contrib.auth.models import User, Group
from django.db import models
from django.core import mail
from django.core.urlresolvers import reverse
from django.conf import settings
from django.template import Template, Context, TemplateDoesNotExist
from django.http.request import QueryDict
from generic_confirmation.fields import PickledObjectField
from generic_confirmation.forms import DeferredForm, ConfirmationForm
from generic_confirmation.models import DeferredAction
from generic_confirmation.main import LONG, SHORT, SHORT_UPPER
from generic_confirmation import signals
if VERSION < (1, 9):
TEST_SERVER_PREFIX = "http://testserver"
else:
TEST_SERVER_PREFIX = ""
try:
_u = unicode
except NameError:
_u = str
class TokenTestForm(DeferredForm):
token_format = ('a', 1)
class Meta:
model = User
fields = ('email',)
class EmailChangeForm(DeferredForm):
class Meta:
model = User
fields = ('email',)
class GroupNameChangeForm(DeferredForm):
token_format = SHORT
class Meta:
model = Group
fields = ('name',)
class UserCreateForm(DeferredForm):
token_format = SHORT_UPPER
class Meta:
model = User
fields = ('username', 'email', 'password')
class GroupChangeForm(DeferredForm):
class Meta:
model = User
fields = ('groups',)
class EmailChangeWithMailForm(DeferredForm):
def send_notification(self, user=None, instance=None):
mail.send_mail("please confirm your new address", "Please confirm %s" % instance.token,
settings.DEFAULT_FROM_EMAIL, [self.cleaned_data['email'],])
class Meta:
model = User
fields = ('email',)
class TokenGeneratorTestCase(TestCase):
def setUp(self):
self.user1 = User.objects.create_user('userX', 'userX@example.com', '123456')
self.user2 = User.objects.create_user('userY', 'userY@example.com', '123456')
def testCollision(self):
form1 = TokenTestForm({'email': 'xxx@example.com'}, instance=self.user1)
self.assertTrue(form1.is_valid())
defered1 = form1.save()
form2 = TokenTestForm({'email': 'yyy@example.com'}, instance=self.user2)
self.assertTrue(form2.is_valid())
# the token format only allows one possible token, so the second attempt
# to generate one must fail because it's a not recoverable error for us
self.assertRaises(Exception, form2.save)
class DeferFormTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('user1', 'user1@example.com', '123456')
def testEmailChange(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), LONG[1])
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username=self.user.username)
self.assertEquals(user_obj.email, 'user1@example.com')
# ========================
# in practice this is the boundary, where code excution will be defered.
# ========================
obj = DeferredAction.objects.confirm(token=defered)
#x = obj.resume_form_save()
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username=self.user.username)
self.assertEquals(user_obj.email, 'xxx@example.com')
def testUserCreation(self):
form = UserCreateForm({'username': 'user2', 'email': 'user2@example.com', 'password': '123456'})
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), SHORT_UPPER[1])
# at this point the object must not exist.
self.assertRaises(User.DoesNotExist, User.objects.get, username='user2')
# ========================
# in practice this is the boundary, where code excution will be defered.
# ========================
obj = DeferredAction.objects.confirm(token=defered)
#x = obj.resume_form_save()
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username='user2')
self.assertEquals(user_obj.email, 'user2@example.com')
def testConfirmViaForm(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), LONG[1])
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username=self.user.username)
self.assertEquals(user_obj.email, 'user1@example.com')
# ========================
# in practice this is the boundary, where code excution will be defered.
# ========================
confirm_form = ConfirmationForm({'token': defered})
self.assertTrue(confirm_form.is_valid())
obj = confirm_form.save()
self.assertNotEqual(obj, False)
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username=self.user.username)
self.assertEquals(user_obj.email, 'xxx@example.com')
def testConfirmBogus(self):
result = DeferredAction.objects.confirm('some-bogus-token-1')
self.assertEquals(result, False)
def testConfirmBogusViaForm(self):
confirm_form = ConfirmationForm({'token': 'some-bogus-token-2'})
self.assertFalse(confirm_form.is_valid())
def testCustomValidUntil(self):
# very similar to self.testEmailChange
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
valid_date = timezone.now()+timezone.timedelta(hours=1)
defered = form.save(valid_until=valid_date)
obj = DeferredAction.objects.get(token=defered)
# the token must be valid until ``valid_date``
self.assertEquals(obj.valid_until, valid_date)
def testConfirmExpired(self):
# similar to self.testCustomValidUntil
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
valid_date = timezone.now() - timezone.timedelta(hours=1)
defered = form.save(valid_until=valid_date)
result = DeferredAction.objects.confirm(defered)
self.assertEquals(result, False)
def testSaveDescription(self):
test_description = "This is a Test Description %s" % 'user2@example.com'
form = UserCreateForm({'username': 'user2', 'email': 'user2@example.com', 'password': '123456'})
self.assertTrue(form.is_valid())
defered = form.save(description=test_description)
# at this point the object must not exist.
self.assertRaises(User.DoesNotExist, User.objects.get, username='user2')
action = DeferredAction.objects.get(token=defered)
self.assertEquals(action.description, test_description)
def testSaveUser(self):
form = UserCreateForm({'username': 'user2', 'email': 'user2@example.com', 'password': '123456'})
self.assertTrue(form.is_valid())
defered = form.save(user=self.user)
# at this point the object must not exist.
self.assertRaises(User.DoesNotExist, User.objects.get, username='user2')
action = DeferredAction.objects.get(token=defered)
self.assertEquals(action.user, self.user)
class ManyToManyTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('user3', 'user3@example.com', '123456')
self.group1 = Group.objects.create(name='first_test_group')
self.group2 = Group.objects.create(name='second_test_group')
def testGroupChange(self):
form = GroupChangeForm({'groups': [self.group1.pk, self.group2.pk]}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), LONG[1])
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username=self.user.username)
self.assertEquals(list(user_obj.groups.all()), [])
# ========================
# in practice this is the boundary, where code excution will be defered.
# ========================
obj = DeferredAction.objects.confirm(token=defered)
#x = obj.resume_form_save()
# refetch user-object from db before checking the email,
# because django objects don't reflect db changes done
# elsewhere
user_obj = User.objects.get(username=self.user.username)
self.assertEquals(list(user_obj.groups.all()), [self.group1, self.group2])
class SignalTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('user4', 'user4@example.com', '123456')
self.group = Group.objects.create(name='test_group_one')
def testCatchAllListener(self):
def dummy_listener(sender, instance, testcase=self, **kwargs):
""" a signal receiver which does some tests """
testcase.assertEquals(instance.__class__, DeferredAction)
signals.confirmation_required.connect(dummy_listener)
# just triggr a confirmation request by changing the email
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), LONG[1])
form = GroupNameChangeForm({'name': 'new_name'}, instance=self.group)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), SHORT[1])
def testClassOnlyListener(self):
def dummy_listener(sender, instance, testcase=self, **kwargs):
""" a signal receiver which does some tests """
testcase.assertEquals(instance.__class__, DeferredAction)
testcase.assertEquals(sender, User)
signals.confirmation_required.connect(dummy_listener, sender=User)
# just triggr a confirmation request by changing the email
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), LONG[1])
form = GroupNameChangeForm({'name': 'new_name'}, instance=self.group)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), SHORT[1])
def testUserPassing(self):
def dummy_listener(sender, instance, user, testcase=self, **kwargs):
""" a signal receiver which does some tests """
testcase.assertEquals(instance.__class__, DeferredAction)
testcase.assertEquals(user, self.user)
signals.confirmation_required.connect(dummy_listener)
# just triggr a confirmation request by changing the email
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save(self.user)
self.assertEquals(len(defered), LONG[1])
def testNoUser(self):
def dummy_listener(sender, instance, user, testcase=self, **kwargs):
""" a signal receiver which does some tests """
testcase.assertEquals(instance.__class__, DeferredAction)
testcase.assertEquals(user, None)
signals.confirmation_required.connect(dummy_listener)
form = GroupNameChangeForm({'name': 'new_name'}, instance=self.group)
self.assertTrue(form.is_valid())
defered = form.save()
self.assertEquals(len(defered), SHORT[1])
def testChangeConfirmedSignal(self):
def dummy_listener(sender, instance, testcase=self, **kwargs):
""" a signal receiver which does some tests """
testcase.assertEquals(instance.__class__, DeferredAction)
testcase.assertEquals(sender, User)
signals.change_confirmed.connect(dummy_listener)
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
defered = form.save(self.user)
self.assertEquals(len(defered), LONG[1])
self.assertTrue(bool(DeferredAction.objects.confirm(defered)))
class NotificationTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('user4', 'user4@example.com', '123456')
def testMailNotification(self):
form = EmailChangeWithMailForm({'email': 'new@example.com'}, instance=self.user)
self.assertTrue(form.is_valid())
self.assertEquals(len(mail.outbox), 0)
token = form.save(self.user)
self.assertEquals(len(mail.outbox), 1)
# make sure the right token is in the body of the message
self.assertTrue(token in mail.outbox[0].body)
class TemplatetagTestCase(TestCase):
def setUp(self):
self.user5 = User.objects.create_user('user5', 'user5@example.com', '123456')
self.user6 = User.objects.create_user('user6', 'user6@example.com', '123456')
self.user7 = User.objects.create_user('user7', 'user7@example.com', '123456')
def testHasPendingTokens(self):
# generate a Token
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user5)
self.assertTrue(form.is_valid())
defered = form.save()
#self.assertEquals(DeferredAction.objects.pending_for(self.user5), 1)
t = Template("""{% load generic_confirmation_tags %}{% pending_confirmations object %}""")
html = t.render(Context({'object': self.user5}))
self.assertEquals(html, "1")
def testHasNoPendingTokens(self):
t = Template("""{% load generic_confirmation_tags %}{% pending_confirmations object %}""")
html = t.render(Context({'object': self.user6}))
self.assertEquals(html, "0")
def testHasAlreadyExpiredToken(self):
# generate a Token
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user7)
self.assertTrue(form.is_valid())
valid_date = timezone.now() - timezone.timedelta(hours=1)
defered = form.save(valid_until=valid_date)
#self.assertEquals(DeferredAction.objects.pending_for(self.user7), 0)
t = Template("""{% load generic_confirmation_tags %}{% pending_confirmations object %}""")
html = t.render(Context({'object': self.user7}))
self.assertEquals(html, "0")
@override_settings(ROOT_URLCONF="generic_confirmation.tests.urls")
class ViewTestCase(TestCase):
"""
without bundled templates this does not make too much sense, but
at least we can have a better test-coverage buy running the parts and
catching the proper exceptions.
"""
def setUp(self):
self.client = Client()
self.user8 = User.objects.create_user('user8', 'user8@example.com', '123456')
self.user9 = User.objects.create_user('user9', 'user9@example.com', '123456')
def testBogusConfirmByGet(self):
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.get ,reverse('generic_confirmation_by_get', kwargs={'token': 'somebogustoken3'}))
def testValidConfirmByGet(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user8)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.get ,reverse('generic_confirmation_by_get', kwargs={'token': defered}))
def testValidConfirmByGetWithCustomSuccessMessage(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user8)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.get ,reverse('generic_confirmation_by_get_with_message', kwargs={'token': defered}))
def testValidConfirmByGetWithCustomSuccessUrl(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user8)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
response = self.client.get(reverse('generic_confirmation_by_get_with_url', kwargs={'token': defered}))
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], TEST_SERVER_PREFIX + '/success/')
def testValidConfirmByGetWithCustomSuccessUrlAndMessage(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user8)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
response = self.client.get(reverse('generic_confirmation_by_get_with_url_and_message', kwargs={'token': defered}))
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], TEST_SERVER_PREFIX + '/success/')
def testConfirmByFormGET(self):
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.get ,reverse('generic_confirmation_by_form'))
def testBogusConfirmByFormPOST(self):
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.post ,reverse('generic_confirmation_by_form'), {'token': 'some-bogus-token-4',})
def testValidConfirmByFormPOST(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user9)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.post ,reverse('generic_confirmation_by_form'), {'token': defered,})
def testValidConfirmByFormPOSTWithCustomSuccessMessage(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user9)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
self.assertRaises(TemplateDoesNotExist, self.client.post ,reverse('generic_confirmation_by_form_with_message'), {'token': defered})
def testValidConfirmByFormPOSTWithCustomSuccessUrl(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user9)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
response = self.client.post(reverse('generic_confirmation_by_form_with_url'), {'token': defered})
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], TEST_SERVER_PREFIX + '/success/')
def testValidConfirmByFormPOSTWithCustomSuccessUrlAndMessage(self):
form = EmailChangeForm({'email': 'xxx@example.com'}, instance=self.user9)
self.assertTrue(form.is_valid())
defered = form.save()
# currently there is no bundled template
# should be fixed in a future version
response = self.client.post(reverse('generic_confirmation_by_form_with_url_and_message'), {'token': defered})
self.assertEquals(response.status_code, 302)
self.assertEquals(response['Location'], TEST_SERVER_PREFIX + '/success/')
# taken (but modified) from djangosnippets.org/snippets/513 by obeattie
class TestingModel(models.Model):
pickle_field = PickledObjectField()
class TestCustomDataType(str):
pass
class PickledObjectFieldTests(TestCase):
def setUp(self):
self.testing_data = (
{1:1, 2:4, 3:6, 4:8, 5:10},
'Hello World',
(1, 2, 3, 4, 5),
[1, 2, 3, 4, 5],
TestCustomDataType('Hello World'),
_u(u"\xf3"), # regression test for non-latin1 encodings in pickled data
QueryDict("email=test@example.com&test=1&test=2"), # mostly used datatype
)
return super(PickledObjectFieldTests, self).setUp()
def testDataIntegrity(self):
"""Tests that data remains the same when saved to and fetched from the database."""
for value in self.testing_data:
model_test = TestingModel(pickle_field=value)
model_test.save()
model_test = TestingModel.objects.get(id__exact=model_test.id)
self.assertEquals(value, model_test.pickle_field)
model_test.delete()
def testExactLookups(self):
"""Tests that lookups can be performed on data once stored in the database."""
for value in self.testing_data:
model_test = TestingModel(pickle_field=value)
model_test.save()
self.assertEquals(value, TestingModel.objects.get(pickle_field__exact=value).pickle_field)
model_test.delete()
def testInLookups(self):
"""Tests that lookups can be performed on data once stored in the database."""
for value in self.testing_data:
model_test = TestingModel(pickle_field=value)
model_test.save()
self.assertEquals(value, TestingModel.objects.filter(pickle_field__in=[value,])[0].pickle_field)
model_test.delete()
def testBogusLookup(self):
"""Tests that bogus lookups raise an TypeError."""
for value in self.testing_data:
model_test = TestingModel(pickle_field=value)
model_test.save()
self.assertRaises(TypeError, TestingModel.objects.filter, pickle_field__contains=[value,])
model_test.delete()
class TestingModelForm(DeferredForm):
class Meta:
model = TestingModel
exclude = ()
class FormPrefixTests(TestCase):
def testFormPrefix(self):
"""
Testing the situation when deferred form has prefix. Prefixes also
should be saved, otherwise form_input will not be accepted when
save operation will be resumed.
"""
PREFIX = 'test_prefix'
model = TestingModel.objects.create(pickle_field='none')
model_form = TestingModelForm(instance=model, prefix=PREFIX)
data = {f.html_name: str(f.value() + '-changed') for f in model_form}
unprefixed_form = TestingModelForm(data, instance=model)
self.assertFalse(unprefixed_form.is_valid())
model_form = TestingModelForm(data, instance=model, prefix=PREFIX)
self.assertTrue(model_form.is_valid())
self.assertTrue(model_form.has_changed())
token = model_form.save()
form = ConfirmationForm({'token': token})
self.assertTrue(form.is_valid())
action = DeferredAction.objects.get(token=token)
resume_form = action.get_resume_form()
self.assertEqual(
resume_form.prefix, model_form.prefix,
msg="Resume form has to have the same prefix as initial form")
model = TestingModel.objects.get(pk=model.pk)
self.assertEqual(model.pickle_field, 'none')
form.save()
model = TestingModel.objects.get(pk=model.pk)
self.assertEqual(model.pickle_field, 'none-changed')
|
|
from rpython.memory.gctransform.transform import GCTransformer, mallocHelpers
from rpython.memory.gctransform.support import find_gc_ptrs_in_type, \
get_rtti, _static_deallocator_body_for_type, LLTransformerOp, ll_call_destructor
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.translator.backendopt.support import var_needsgc
from rpython.rtyper import rmodel
from rpython.memory.gcheader import GCHeaderBuilder
from rpython.rlib.rarithmetic import ovfcheck
from rpython.rtyper.rbuiltin import gen_cast
import sys
counts = {}
ADDRESS_VOID_FUNC = lltype.FuncType([llmemory.Address], lltype.Void)
class RefcountingGCTransformer(GCTransformer):
malloc_zero_filled = True
HDR = lltype.Struct("header", ("refcount", lltype.Signed))
def __init__(self, translator):
super(RefcountingGCTransformer, self).__init__(translator, inline=True)
self.gcheaderbuilder = GCHeaderBuilder(self.HDR)
gc_header_offset = self.gcheaderbuilder.size_gc_header
self.deallocator_graphs_needing_transforming = []
# create incref, etc graph
memoryError = MemoryError()
HDRPTR = lltype.Ptr(self.HDR)
def ll_incref(adr):
if adr:
gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
gcheader.refcount = gcheader.refcount + 1
def ll_decref(adr, dealloc):
if adr:
gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
refcount = gcheader.refcount - 1
gcheader.refcount = refcount
if refcount == 0:
dealloc(adr)
def ll_decref_simple(adr):
if adr:
gcheader = llmemory.cast_adr_to_ptr(adr - gc_header_offset, HDRPTR)
refcount = gcheader.refcount - 1
if refcount == 0:
llop.gc_free(lltype.Void, adr)
else:
gcheader.refcount = refcount
def ll_no_pointer_dealloc(adr):
llop.gc_free(lltype.Void, adr)
mh = mallocHelpers()
mh.allocate = llmemory.raw_malloc
def ll_malloc_fixedsize(size):
size = gc_header_offset + size
result = mh._ll_malloc_fixedsize(size)
llmemory.raw_memclear(result, size)
result += gc_header_offset
return result
def ll_malloc_varsize_no_length(length, size, itemsize):
try:
fixsize = gc_header_offset + size
varsize = ovfcheck(itemsize * length)
tot_size = ovfcheck(fixsize + varsize)
except OverflowError:
raise MemoryError()
result = mh._ll_malloc_fixedsize(tot_size)
llmemory.raw_memclear(result, tot_size)
result += gc_header_offset
return result
mh.ll_malloc_varsize_no_length = ll_malloc_varsize_no_length
ll_malloc_varsize = mh.ll_malloc_varsize
def ll_identityhash(addr):
h = llmemory.cast_adr_to_int(addr)
return h
if self.translator:
self.increfptr = self.inittime_helper(
ll_incref, [llmemory.Address], lltype.Void)
self.decref_ptr = self.inittime_helper(
ll_decref, [llmemory.Address, lltype.Ptr(ADDRESS_VOID_FUNC)],
lltype.Void)
self.decref_simple_ptr = self.inittime_helper(
ll_decref_simple, [llmemory.Address], lltype.Void)
self.no_pointer_dealloc_ptr = self.inittime_helper(
ll_no_pointer_dealloc, [llmemory.Address], lltype.Void)
self.malloc_fixedsize_ptr = self.inittime_helper(
ll_malloc_fixedsize, [lltype.Signed], llmemory.Address)
self.malloc_varsize_no_length_ptr = self.inittime_helper(
ll_malloc_varsize_no_length, [lltype.Signed]*3, llmemory.Address)
self.malloc_varsize_ptr = self.inittime_helper(
ll_malloc_varsize, [lltype.Signed]*4, llmemory.Address)
self.identityhash_ptr = self.inittime_helper(
ll_identityhash, [llmemory.Address], lltype.Signed,
inline=False)
self.mixlevelannotator.finish()
self.mixlevelannotator.backend_optimize()
# cache graphs:
self.decref_funcptrs = {}
self.static_deallocator_funcptrs = {}
self.dynamic_deallocator_funcptrs = {}
self.queryptr2dynamic_deallocator_funcptr = {}
def finish_helpers(self, **kwds):
GCTransformer.finish_helpers(self, **kwds)
from rpython.translator.backendopt.malloc import remove_mallocs
seen = {}
graphs = []
for fptr in self.static_deallocator_funcptrs.itervalues():
graph = fptr._obj.graph
if graph in seen:
continue
seen[graph] = True
graphs.append(graph)
remove_mallocs(self.translator, graphs)
def var_needs_set_transform(self, var):
return var_needsgc(var)
def push_alive(self, var, llops):
v_adr = gen_cast(llops, llmemory.Address, var)
llops.genop("direct_call", [self.increfptr, v_adr])
def pop_alive(self, var, llops):
PTRTYPE = var.concretetype
v_adr = gen_cast(llops, llmemory.Address, var)
dealloc_fptr = self.dynamic_deallocation_funcptr_for_type(PTRTYPE.TO)
if dealloc_fptr is self.no_pointer_dealloc_ptr.value:
# simple case
llops.genop("direct_call", [self.decref_simple_ptr, v_adr])
else:
cdealloc_fptr = rmodel.inputconst(
lltype.typeOf(dealloc_fptr), dealloc_fptr)
llops.genop("direct_call", [self.decref_ptr, v_adr, cdealloc_fptr])
def gct_fv_gc_malloc(self, hop, flags, TYPE, c_size):
v_raw = hop.genop("direct_call", [self.malloc_fixedsize_ptr, c_size],
resulttype=llmemory.Address)
return v_raw
def gct_fv_gc_malloc_varsize(self, hop, flags, TYPE, v_length, c_const_size, c_item_size,
c_offset_to_length):
if c_offset_to_length is None:
v_raw = hop.genop("direct_call",
[self.malloc_varsize_no_length_ptr, v_length,
c_const_size, c_item_size],
resulttype=llmemory.Address)
else:
v_raw = hop.genop("direct_call",
[self.malloc_varsize_ptr, v_length,
c_const_size, c_item_size, c_offset_to_length],
resulttype=llmemory.Address)
return v_raw
def gct_gc_deallocate(self, hop):
TYPE = hop.spaceop.args[0].value
v_addr = hop.spaceop.args[1]
dealloc_fptr = self.dynamic_deallocation_funcptr_for_type(TYPE)
cdealloc_fptr = rmodel.inputconst(
lltype.typeOf(dealloc_fptr), dealloc_fptr)
hop.genop("direct_call", [cdealloc_fptr, v_addr])
def consider_constant(self, TYPE, value):
if value is not lltype.top_container(value):
return
if isinstance(TYPE, (lltype.GcStruct, lltype.GcArray)):
p = value._as_ptr()
if not self.gcheaderbuilder.get_header(p):
hdr = self.gcheaderbuilder.new_header(p)
hdr.refcount = sys.maxint // 2
def static_deallocation_funcptr_for_type(self, TYPE):
if TYPE in self.static_deallocator_funcptrs:
return self.static_deallocator_funcptrs[TYPE]
#print_call_chain(self)
rtti = get_rtti(TYPE)
if rtti is not None and hasattr(rtti._obj, 'destructor_funcptr'):
destrptr = rtti._obj.destructor_funcptr
DESTR_ARG = lltype.typeOf(destrptr).TO.ARGS[0]
else:
destrptr = None
DESTR_ARG = None
if destrptr is None and not find_gc_ptrs_in_type(TYPE):
p = self.no_pointer_dealloc_ptr.value
self.static_deallocator_funcptrs[TYPE] = p
return p
if destrptr is not None:
body = '\n'.join(_static_deallocator_body_for_type('v', TYPE, 3))
src = """
def ll_deallocator(addr):
exc_instance = llop.gc_fetch_exception(EXC_INSTANCE_TYPE)
try:
v = cast_adr_to_ptr(addr, PTR_TYPE)
gcheader = cast_adr_to_ptr(addr - gc_header_offset, HDRPTR)
# refcount is at zero, temporarily bump it to 1:
gcheader.refcount = 1
destr_v = cast_pointer(DESTR_ARG, v)
ll_call_destructor(destrptr, destr_v, %r)
refcount = gcheader.refcount - 1
gcheader.refcount = refcount
if refcount == 0:
%s
llop.%s_free(lltype.Void, addr)
except:
pass
llop.gc_restore_exception(lltype.Void, exc_instance)
pop_alive(exc_instance)
# XXX layering of exceptiontransform versus gcpolicy
""" % (TYPE.__name__, body, TYPE._gckind)
else:
call_del = None
body = '\n'.join(_static_deallocator_body_for_type('v', TYPE))
src = ('def ll_deallocator(addr):\n v = cast_adr_to_ptr(addr, PTR_TYPE)\n' +
body + '\n llop.%s_free(lltype.Void, addr)\n' % (TYPE._gckind,))
d = {'pop_alive': LLTransformerOp(self.pop_alive),
'llop': llop,
'lltype': lltype,
'destrptr': destrptr,
'gc_header_offset': self.gcheaderbuilder.size_gc_header,
'cast_adr_to_ptr': llmemory.cast_adr_to_ptr,
'cast_pointer': lltype.cast_pointer,
'PTR_TYPE': lltype.Ptr(TYPE),
'DESTR_ARG': DESTR_ARG,
'EXC_INSTANCE_TYPE': self.translator.rtyper.exceptiondata.lltype_of_exception_value,
'll_call_destructor': ll_call_destructor,
'HDRPTR':lltype.Ptr(self.HDR)}
exec src in d
this = d['ll_deallocator']
fptr = self.annotate_finalizer(this, [llmemory.Address], lltype.Void)
self.static_deallocator_funcptrs[TYPE] = fptr
for p in find_gc_ptrs_in_type(TYPE):
self.static_deallocation_funcptr_for_type(p.TO)
return fptr
def dynamic_deallocation_funcptr_for_type(self, TYPE):
if TYPE in self.dynamic_deallocator_funcptrs:
return self.dynamic_deallocator_funcptrs[TYPE]
#print_call_chain(self)
rtti = get_rtti(TYPE)
if rtti is None:
p = self.static_deallocation_funcptr_for_type(TYPE)
self.dynamic_deallocator_funcptrs[TYPE] = p
return p
queryptr = rtti._obj.query_funcptr
if queryptr._obj in self.queryptr2dynamic_deallocator_funcptr:
return self.queryptr2dynamic_deallocator_funcptr[queryptr._obj]
RTTI_PTR = lltype.Ptr(lltype.RuntimeTypeInfo)
QUERY_ARG_TYPE = lltype.typeOf(queryptr).TO.ARGS[0]
gc_header_offset = self.gcheaderbuilder.size_gc_header
HDRPTR = lltype.Ptr(self.HDR)
def ll_dealloc(addr):
# bump refcount to 1
gcheader = llmemory.cast_adr_to_ptr(addr - gc_header_offset, HDRPTR)
gcheader.refcount = 1
v = llmemory.cast_adr_to_ptr(addr, QUERY_ARG_TYPE)
rtti = queryptr(v)
gcheader.refcount = 0
llop.gc_call_rtti_destructor(lltype.Void, rtti, addr)
fptr = self.annotate_helper(ll_dealloc, [llmemory.Address], lltype.Void)
self.dynamic_deallocator_funcptrs[TYPE] = fptr
self.queryptr2dynamic_deallocator_funcptr[queryptr._obj] = fptr
return fptr
def gct_gc_identityhash(self, hop):
v_obj = hop.spaceop.args[0]
v_adr = hop.genop("cast_ptr_to_adr", [v_obj],
resulttype=llmemory.Address)
hop.genop("direct_call", [self.identityhash_ptr, v_adr],
resultvar=hop.spaceop.result)
def gcheader_initdata(self, obj):
top = lltype.top_container(obj)
return self.gcheaderbuilder.header_of_object(top)._obj
|
|
import base64
import json
import logging
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from consts.event_type import EventType
from controllers.api.api_status_controller import ApiStatusController
from datafeeds.datafeed_base import DatafeedBase
from models.event_team import EventTeam
from models.sitevar import Sitevar
from parsers.fms_api.fms_api_awards_parser import FMSAPIAwardsParser
from parsers.fms_api.fms_api_event_alliances_parser import FMSAPIEventAlliancesParser
from parsers.fms_api.fms_api_event_list_parser import FMSAPIEventListParser
from parsers.fms_api.fms_api_event_rankings_parser import FMSAPIEventRankingsParser
from parsers.fms_api.fms_api_match_parser import FMSAPIHybridScheduleParser, FMSAPIMatchDetailsParser
from parsers.fms_api.fms_api_team_details_parser import FMSAPITeamDetailsParser
class DatafeedFMSAPI(object):
EVENT_SHORT_EXCEPTIONS = {
'arc': 'archimedes',
'cars': 'carson',
'carv': 'carver',
'cur': 'curie',
'gal': 'galileo',
'hop': 'hopper',
'new': 'newton',
'tes': 'tesla',
}
SUBDIV_TO_DIV = {
'arc': 'cmp-arte',
'cars': 'cmp-gaca',
'carv': 'cmp-cuca',
'cur': 'cmp-cuca',
'gal': 'cmp-gaca',
'hop': 'cmp-neho',
'new': 'cmp-neho',
'tes': 'cmp-arte',
}
def __init__(self, version):
fms_api_secrets = Sitevar.get_by_id('fmsapi.secrets')
if fms_api_secrets is None:
raise Exception("Missing sitevar: fmsapi.secrets. Can't access FMS API.")
fms_api_username = fms_api_secrets.contents['username']
fms_api_authkey = fms_api_secrets.contents['authkey']
self._fms_api_authtoken = base64.b64encode('{}:{}'.format(fms_api_username, fms_api_authkey))
self._is_down_sitevar = Sitevar.get_by_id('apistatus.fmsapi_down')
if not self._is_down_sitevar:
self._is_down_sitevar = Sitevar(id="apistatus.fmsapi_down", description="Is FMSAPI down?")
if version == 'v1.0':
FMS_API_URL_BASE = 'https://frc-api.firstinspires.org/api/v1.0'
self.FMS_API_AWARDS_URL_PATTERN = FMS_API_URL_BASE + '/awards/%s/%s' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/schedule/%s/%s/qual/hybrid' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/schedule/%s/%s/playoff/hybrid' # (year, event_short)
self.FMS_API_EVENT_RANKINGS_URL_PATTERN = FMS_API_URL_BASE + '/rankings/%s/%s' # (year, event_short)
self.FMS_API_EVENT_ALLIANCES_URL_PATTERN = FMS_API_URL_BASE + '/alliances/%s/%s' # (year, event_short)
self.FMS_API_TEAM_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/teams/%s/?teamNumber=%s' # (year, teamNumber)
self.FMS_API_EVENT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/events/season=%s'
self.FMS_API_EVENTTEAM_LIST_URL_PATTERN = FMS_API_URL_BASE + '/teams/?season=%s&eventCode=%s&page=%s' # (year, eventCode, page)
elif version == 'v2.0':
FMS_API_URL_BASE = 'https://frc-api.firstinspires.org/v2.0'
self.FMS_API_AWARDS_URL_PATTERN = FMS_API_URL_BASE + '/%s/awards/%s' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/%s/schedule/%s/qual/hybrid' # (year, event_short)
self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/%s/schedule/%s/playoff/hybrid' # (year, event_short)
self.FMS_API_MATCH_DETAILS_QUAL_URL_PATTERN = FMS_API_URL_BASE + '/%s/scores/%s/qual' # (year, event_short)
self.FMS_API_MATCH_DETAILS_PLAYOFF_URL_PATTERN = FMS_API_URL_BASE + '/%s/scores/%s/playoff' # (year, event_short)
self.FMS_API_EVENT_RANKINGS_URL_PATTERN = FMS_API_URL_BASE + '/%s/rankings/%s' # (year, event_short)
self.FMS_API_EVENT_ALLIANCES_URL_PATTERN = FMS_API_URL_BASE + '/%s/alliances/%s' # (year, event_short)
self.FMS_API_TEAM_DETAILS_URL_PATTERN = FMS_API_URL_BASE + '/%s/teams/?teamNumber=%s' # (year, teamNumber)
self.FMS_API_EVENT_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/events' # year
self.FMS_API_EVENTTEAM_LIST_URL_PATTERN = FMS_API_URL_BASE + '/%s/teams/?eventCode=%s&page=%s' # (year, eventCode, page)
else:
raise Exception("Unknown FMS API version: {}".format(version))
def _get_event_short(self, event_short):
return self.EVENT_SHORT_EXCEPTIONS.get(event_short, event_short)
@ndb.tasklet
def _parse_async(self, url, parser):
headers = {
'Authorization': 'Basic {}'.format(self._fms_api_authtoken),
'Cache-Control': 'no-cache, max-age=10',
'Pragma': 'no-cache',
}
try:
rpc = urlfetch.create_rpc(deadline=10)
result = yield urlfetch.make_fetch_call(rpc, url, headers=headers)
except Exception, e:
logging.error("URLFetch failed for: {}".format(url))
logging.info(e)
raise ndb.Return(None)
old_status = self._is_down_sitevar.contents
if result.status_code == 200:
self._is_down_sitevar.contents = False
self._is_down_sitevar.put()
ApiStatusController.clear_cache_if_needed(old_status, self._is_down_sitevar.contents)
raise ndb.Return(parser.parse(json.loads(result.content)))
elif result.status_code % 100 == 5:
# 5XX error - something is wrong with the server
logging.warning('URLFetch for %s failed; Error code %s' % (url, result.status_code))
self._is_down_sitevar.contents = True
self._is_down_sitevar.put()
ApiStatusController.clear_cache_if_needed(old_status, self._is_down_sitevar.contents)
raise ndb.Return(None)
else:
logging.warning('URLFetch for %s failed; Error code %s' % (url, result.status_code))
raise ndb.Return(None)
@ndb.toplevel
def _parse(self, url, parser):
result = yield self._parse_async(url, parser)
raise ndb.Return(result)
def getAwards(self, event):
awards = []
if event.event_type_enum == EventType.CMP_DIVISION and event.year >= 2015: # 8 subdivisions from 2015+ have awards listed under 4 divisions
event_team_keys = EventTeam.query(EventTeam.event == event.key).fetch(keys_only=True)
valid_team_nums = set([int(etk.id().split('_')[1][3:]) for etk in event_team_keys])
awards += self._parse(self.FMS_API_AWARDS_URL_PATTERN % (event.year, self._get_event_short(self.SUBDIV_TO_DIV[event.event_short])), FMSAPIAwardsParser(event, valid_team_nums))
awards += self._parse(self.FMS_API_AWARDS_URL_PATTERN % (event.year, self._get_event_short(event.event_short)), FMSAPIAwardsParser(event))
return awards
def getEventAlliances(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
alliances = self._parse(self.FMS_API_EVENT_ALLIANCES_URL_PATTERN % (year, self._get_event_short(event_short)), FMSAPIEventAlliancesParser())
return alliances
def getMatches(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
hs_parser = FMSAPIHybridScheduleParser(year, event_short)
detail_parser = FMSAPIMatchDetailsParser(year, event_short)
qual_matches_future = self._parse_async(self.FMS_API_HYBRID_SCHEDULE_QUAL_URL_PATTERN % (year, self._get_event_short(event_short)), hs_parser)
playoff_matches_future = self._parse_async(self.FMS_API_HYBRID_SCHEDULE_PLAYOFF_URL_PATTERN % (year, self._get_event_short(event_short)), hs_parser)
qual_details_future = self._parse_async(self.FMS_API_MATCH_DETAILS_QUAL_URL_PATTERN % (year, self._get_event_short(event_short)), detail_parser)
playoff_details_future = self._parse_async(self.FMS_API_MATCH_DETAILS_PLAYOFF_URL_PATTERN % (year, self._get_event_short(event_short)), detail_parser)
matches_by_key = {}
qual_matches = qual_matches_future.get_result()
if qual_matches is not None:
for match in qual_matches:
matches_by_key[match.key.id()] = match
playoff_matches = playoff_matches_future.get_result()
if playoff_matches is not None:
for match in playoff_matches:
matches_by_key[match.key.id()] = match
qual_details = qual_details_future.get_result()
qual_details_items = qual_details.items() if qual_details is not None else []
playoff_details = playoff_details_future.get_result()
playoff_details_items = playoff_details.items() if playoff_details is not None else []
for match_key, match_details in qual_details_items + playoff_details_items:
if match_key in matches_by_key:
matches_by_key[match_key].score_breakdown_json = json.dumps(match_details)
return matches_by_key.values()
def getEventRankings(self, event_key):
year = int(event_key[:4])
event_short = event_key[4:]
rankings = self._parse(self.FMS_API_EVENT_RANKINGS_URL_PATTERN % (year, self._get_event_short(event_short)), FMSAPIEventRankingsParser(year))
return rankings
def getTeamDetails(self, year, team_key):
team_number = team_key[3:] # everything after 'frc'
result = self._parse(self.FMS_API_TEAM_DETAILS_URL_PATTERN % (year, team_number), FMSAPITeamDetailsParser(year))
if result:
return result[0] # (team, districtteam, robot)
else:
return None
def getEventList(self, year):
events = self._parse(self.FMS_API_EVENT_LIST_URL_PATTERN % (year), FMSAPIEventListParser(year))
return events
# Returns list of tuples (team, districtteam, robot)
def getEventTeams(self, event_key):
year = int(event_key[:4])
event_code = self._get_event_short(event_key[4:])
parser = FMSAPITeamDetailsParser(year)
models = [] # will be list of tuples (team, districtteam, robot) model
for page in range(1, 9): # Ensure this won't loop forever. 8 pages should be more than enough
url = self.FMS_API_EVENTTEAM_LIST_URL_PATTERN % (year, event_code, page)
result = self._parse(url, parser)
if result is None:
break
partial_models, more_pages = result
models.extend(partial_models)
if not more_pages:
break
return models
|
|
"""File Utils."""
__author__ = 'pramodg@room77.com (Pramod Gupta)'
__copyright__ = 'Copyright 2012 Room77, Inc.'
from contextlib import contextmanager
from datetime import datetime
import distutils.dir_util as ddu
import os
import re
import stat
import sys
from pylib.base.term_color import TermColor
class FileUtils:
"""File Utility class."""
@classmethod
@contextmanager
def PushDir(cls, dir):
"""Used to temporarily change directory to execute a certain file command.
Args:
dir: string: The directory to switch to temporarily.
Usage:
print os.getcwd() # "path/to/old"
with FileUtils.PushDir('path/to/new'):
print os.getcwd() # "path/to/new"
print os.getcwd() # "path/to/old"
"""
previous_dir = os.getcwd()
os.chdir(dir)
yield
os.chdir(previous_dir)
@classmethod
def MakeDirs(cls, dir):
"""Creates a dir and all intermediate dirs if necessary.
Args:
dir: string: The directory to build.
"""
if not os.path.exists(dir): os.makedirs(dir)
@classmethod
def CopyDirTree(cls, src, dst):
"""Copies the entire directory tree.
Args:
dir: string: The directory to build.
Return:
boolean: Returns True on success and False otherwise.
"""
try:
ddu.copy_tree(src, dst)
return True
except (DistutilsFileError, OSError) as e:
TermColor.Error('Cannot copy %s to %s. %s: %s' % (src, dst, type(e), e))
return False
@classmethod
def CreateLink(cls, linkdir, dir):
"""Creates a link to the build or template directories if it does
not already exist OR if it is invalid. AND creates the directory
Args:
linkdir: string: the location to create the link
dir: string: the location where the buildfiles or template are stored
"""
cls.MakeDirs(dir)
if not os.path.exists(linkdir) or os.path.realpath(linkdir) != os.path.abspath(dir):
if os.path.lexists(linkdir): os.remove(linkdir)
os.symlink(dir, linkdir)
@classmethod
def GetSrcRoot(cls):
"""Returns the src root."""
# R77_SRC_ROOT is set by flash when building
try:
return os.environ['R77_SRC_ROOT']
except KeyError:
pass
dir = os.getcwd()
while (dir and dir != '/' and os.path.isdir(dir) and not
os.path.exists(os.path.join(dir, '.git'))):
dir = os.path.dirname(dir)
return dir
@classmethod
def GetGenDir(cls):
return os.path.join(cls.GetSrcRoot(), 'gen')
@classmethod
def GetOutRoot(cls):
"""Returns the Bin dir where all the build output is generated."""
# TODO: add the ability to override this with a config
return '/localdisk'
@classmethod
def GetOutDir(cls, subpath):
"""Returns the output dir for the subpath."""
src_dir = cls.GetSrcRoot()
# Prefer to output to the out root (localdisk) if it exists
if os.path.exists(cls.GetOutRoot()):
src_dir = cls.GetOutRoot() + src_dir
return os.path.join(src_dir, subpath)
@classmethod
def GetBinDir(cls):
"""Returns the Bin dir where all the build output is generated."""
return cls.GetOutDir('e')
@classmethod
def GetEDir(cls):
"""Returns the 'e' dir which is a soft link to the bin dir.
Note: This is only there as a user friendly directory and none of the flash
code should depend on it.
"""
return os.path.join(cls.GetSrcRoot(), 'e')
@classmethod
def GetPipelineDir(cls):
"""Returns the pipeline dir where all the pipeline output is generated."""
return cls.GetOutDir(os.path.join('pipeline', 'out'))
@classmethod
def GetPipelineLinkDir(cls):
"""Returns the 'pipeline' dir which is a soft link to the pipeline dir.
Note: This is only there as a user friendly directory and none of the code should depend on it.
"""
return os.path.join(cls.GetSrcRoot(), 'pipeline', 'out')
@classmethod
def FromAbsoluteToRepoRootPath(cls, abspath):
"""Converts an absolute path to a path relative to the repo root.
For instance, /home/r77/src/walle/foo/bar/baz is converted to
foo/bar/baz
"""
return os.path.relpath(abspath, cls.GetSrcRoot())
@classmethod
def GetWebTestHtmlDir(cls):
"""Returns the Bin dir where all the build output is generated."""
return os.path.join(cls.GetBinDir(), 'html%s' % cls.GetWebTestHtmlUrlPath())
@classmethod
def GetWebTestHtmlLink(cls):
"""
Returns the dir which is a soft link to the web test html directory.
"""
return os.path.join(cls.GetSrcRoot(), 'html%s' % cls.GetWebTestHtmlUrlPath())
@classmethod
def GetWebTestHtmlUrlPath(cls):
"""
Returns the web test path to access these files from a browser url
"""
return '/ngtest'
@classmethod
def GetBinPathForFile(cls, filename):
"""Returns the bin path for the file. The bin path is generated by
replacing the src root with the bin dir.
Args:
filename: string: The file for which the bin path is needed.
Return:
string: The bin path for the file.
"""
if not filename: return None
return filename.replace(cls.GetSrcRoot(), cls.GetBinDir(), 1)
@classmethod
def GetAbsPathForFile(cls, filename):
"""Returns the absolute path for the filename.
Args:
filename: string: The file for which the abs path is needed.
Return:
string: The abs path for the file if it exists.
"""
if not filename: return None
if os.path.exists(filename):
return os.path.normpath(os.path.abspath(filename))
if filename[0] == '/':
filename = filename[1:]
abs_path = os.path.normpath(os.path.join(cls.GetSrcRoot(), filename))
if os.path.exists(abs_path):
return abs_path
return None
@classmethod
def CreateFileWithData(cls, filename, data='\n'):
"""Resets a file with the input data.
Args:
filename: string: The name of the file to reset.
data: string: The default data to write to the file.
"""
f = open(filename, 'w')
f.write(data)
f.close()
@classmethod
def UnixBasename(cls, path):
"""In unix, /foo/bar/ basename returns bar, but in
python os.path.basename returns an empty string.
change the behavior to act like unix
"""
# remove the trailing slash
if path[-1:] == "/":
path = path[0:-1]
return os.path.basename(path)
@classmethod
def GetSubDirsInDir(cls, dir, recurse=True, ignore_list=[]):
"""Given a directory, returns all the subdirectories.
Args:
dir: string: The directory to walk.
recurse: boolean: If we should recurse the directory tree.
ignore_list: list: List of strings to ignore.
Return:
list: List of subdirs.
"""
out_dirs = []
if not os.path.isdir(dir):
TermColor.Warning('Not a directory: %s' % dir)
return out_dirs
for (root, subdirs, files) in os.walk(dir):
ignore = cls.IgnorePath(root, ignore_list)
if ignore:
TermColor.Info('Ignored dirs in %s as anything with [%s] is ignored' % (root, ignore))
continue
out_dirs += [os.path.join(root, x) for x in subdirs]
# Check if we should continue the walk.
if not recurse: break
return out_dirs
@classmethod
def GetFilesInDir(cls, dir, recurse=True, ignore_list=[]):
"""Given a directory, returns all the files in it and sub directories.
Args:
dir: string: The directory to walk.
recurse: boolean: If we should recurse the directory tree.
ignore_list: list: List of strings to ignore.
Return:
list: List of files.
"""
out_files = []
if not os.path.isdir(dir):
TermColor.Warning('Not a directory: %s' % dir)
return out_files
for (root, subdirs, files) in os.walk(dir):
ignore = cls.IgnorePath(root, ignore_list)
if ignore:
TermColor.Info('Ignored dirs in %s as anything with [%s] is ignored' % (root, ignore))
continue
out_files += [os.path.join(root, x) for x in files]
# Check if we should continue the walk.
if not recurse: break
return out_files
@classmethod
def IgnorePath(cls, path, ignore_list):
"""Check if a given path can be ignored.
Args:
path: string: The path to check.
ignore_list: list: List of strings to ignore.
Return:
string: Returns the string because of which the task is ignored and None
otherwise.
"""
return next((x for x in ignore_list if path.find(x) != -1), None)
@classmethod
def GetAllItemsSortedByDate(cls, dir, pattern='.*', filter=stat.S_ISREG):
"""Returns all the files in a directory sorted by date.
Args:
path: string: The path to check.
pattern: string: The the pattern the item name must match.
filter: bool method(stat mode): Filter that returns true or false for a stat mode.
Only modes for which the filter returns true are kept.
Return:
list: List of files sorted by creation date.
"""
# get all entries in the directory w/ stats
entries = (os.path.join(dir, x) for x in os.listdir(dir) if re.match(pattern, x))
entries = ((os.stat(path), path) for path in entries)
# leave only filtered files, insert creation date
entries = ((s[stat.ST_CTIME], path)
for s, path in entries if filter(s[stat.ST_MODE]))
return [path for (cdate, path) in sorted(entries, reverse=True)]
@classmethod
def GetAllFilesSortedByDate(cls, dir, pattern='.*'):
"""Returns all the files in a directory sorted by date.
Args:
path: string: The path to check.
pattern: string: The the pattern the file name must match.
ignore_list: list: List of strings to ignore.
Return:
string: Returns the string because of which the task is ignored and None
otherwise.
"""
return cls.GetAllItemsSortedByDate(dir, pattern, filter=stat.S_ISREG)
@classmethod
def GetAllDirsSortedByDate(cls, dir, pattern='.*'):
"""Returns all the files in a directory sorted by date.
Args:
path: string: The path to check.
pattern: string: The the pattern the subdir name must match.
ignore_list: list: List of strings to ignore.
Return:
string: Returns the string because of which the task is ignored and None
otherwise.
"""
return cls.GetAllItemsSortedByDate(dir, pattern, filter=stat.S_ISDIR)
@classmethod
def GetLatestFile(cls, dir):
"""Returns the latest file in a folder.
Args:
dir: string: The dir.
Return:
string: The latest file if one exists and None otherwise.
"""
res = cls.GetAllFilesSortedByDate(dir)
if not res: return None
return res[0]
@classmethod
def GetLatestDir(cls, dir):
"""Returns the latest directory in a folder.
Args:
dir: string: The dir.
Return:
string: The latest subdir if one exists and None otherwise.
"""
res = cls.GetAllDirsSortedByDate(dir)
if not res: return None
return res[0]
@classmethod
def GetPreviousDir(cls, path, pattern='.*'):
"""Given a path, returns the sibling directory that was created before it.
Args:
path: string: The path for the directory.
pattern: string: The the pattern the sibling dirnames must match.
Return:
string: Returns the previous dir if one exists and None otherwise.
"""
(parent, dir_name) = os.path.split(path)
take_next = 0
for dir in cls.GetAllDirsSortedByDate(parent, pattern):
if take_next == 1:
return dir
if os.path.basename(dir) == dir_name:
take_next = 1
return None
@classmethod
def GetPreviousDatedDir(cls, path):
"""Given a path, returns the sibling directory that was created with a date before it.
Args:
path: string: The path.
Return:
string: Returns the previous dir if one exists and None otherwise.
"""
# Use the pattern to match names containing only 8 digits, e.g. 20140107
return cls.GetPreviousDir(path, '^\d{8}$')
@classmethod
def RemoveFiles(cls, files):
"""List of files to remove.
Args:
files: list[string]: A list of files to remove.
"""
for i in files:
try:
os.remove(i)
except OSError:
pass
@classmethod
def FileContents(cls, file):
"""Reads the contents of a file if present.
Args:
file: string: The file to read.
Returns:
string: The contents of the file. None if the file is not present.
"""
if not os.path.isfile(file): return None
res = None
with open(file, 'r') as fp:
res = fp.read()
return res
@classmethod
def IsSameDevice(cls, path_a, path_b):
"""Determines if the two paths are on the same. The two paths are expected
to exist
Args:
path_a: string: The path to compare
path_b: string: The path being compared
Returns:
boolean: Returns True if the two paths are on the same device and False
otherwise.
"""
return os.stat(path_a)[stat.ST_DEV] == os.stat(path_b)[stat.ST_DEV]
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for eager execution using XLA."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import convolutional
from tensorflow.python.layers import pooling
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
from tensorflow.python.training import adam
class EagerTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
def testGradientTape(self):
with self.test_scope():
x = constant_op.constant(1.0)
y = constant_op.constant(10.0)
with backprop.GradientTape(persistent=True) as tape:
tape.watch(x)
tape.watch(y)
a = x + y + x * y
da_dx = tape.gradient(a, x)
da_dy = tape.gradient(a, y)
self.assertEqual(11.0, da_dx.numpy())
self.assertEqual(2.0, da_dy.numpy())
def testExecuteListOutputLen0(self):
with self.test_scope():
empty = constant_op.constant([], dtype=dtypes.float32)
result = array_ops.unstack(empty, 0)
self.assertTrue(isinstance(result, list))
self.assertEqual(0, len(result))
def testExecuteListOutputLen1(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 1, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(1, len(result))
self.assertAllEqual([[0, 1, 2], [3, 4, 5]], result[0])
def testExecuteListOutputLen3(self):
with self.test_scope():
split_dim = constant_op.constant(1)
value = constant_op.constant([[0., 1., 2.], [3., 4., 5.]])
result = array_ops.split(value, 3, axis=split_dim)
self.assertTrue(isinstance(result, list))
self.assertEqual(3, len(result))
self.assertAllEqual([[0], [3]], result[0])
self.assertAllEqual([[1], [4]], result[1])
self.assertAllEqual([[2], [5]], result[2])
def testBasicGraph(self):
# Run some ops eagerly
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, product)
# Run some ops graphly
with context.graph_mode(), self.test_session() as sess:
with self.test_scope():
three = constant_op.constant(3)
five = constant_op.constant(5)
product = three * five
self.assertAllEqual(15, sess.run(product))
def testDegenerateSlices(self):
with self.test_scope():
npt = np.arange(1, 19, dtype=np.float32).reshape(3, 2, 3)
t = constant_op.constant(npt)
# degenerate by offering a forward interval with a negative stride
self.assertAllEqual(npt[0:-1:-1, :, :], t[0:-1:-1, :, :])
# degenerate with a reverse interval with a positive stride
self.assertAllEqual(npt[-1:0, :, :], t[-1:0, :, :])
# empty interval in every dimension
self.assertAllEqual(npt[-1:0, 2:2, 2:3:-1], t[-1:0, 2:2, 2:3:-1])
def testIdentity(self):
with self.test_scope():
self.assertAllEqual(2, array_ops.identity(2))
def testIdentityOnVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(True)
i = array_ops.identity(v)
self.assertAllEqual(True, i.numpy())
def testAssignAddVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
v.assign_add(2.0)
self.assertEqual(3.0, v.numpy())
def testReadAssignRead(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
val1 = v.read_value()
v.assign_add(2.0)
val2 = v.read_value()
self.assertEqual(1.0, val1.numpy())
self.assertEqual(3.0, val2.numpy())
def testGradient(self):
def f(x):
return x
with self.test_scope():
grad_fn = backprop.gradients_function(f)
self.assertAllEqual(2., grad_fn(1., dy=2.)[0])
def testVariableGradient(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(1.0)
def f():
x = v0 * v0
return x
grads = backprop.implicit_grad(f)()
self.assertEqual(2., grads[0][0].numpy())
def testMultipleVariableReads(self):
# This test makes sure consecutive variable reads don't copy
# the underlying memory.
with self.test_scope():
# Create 128MiB variables
var = resource_variable_ops.ResourceVariable(
array_ops.ones([32, 1024, 1024]))
# Read the same variable 100 times. If the underlying tensor
# is not copied, this is a trivial operation. If it is copied,
# this will eat over 13GB and OOM.
values = []
for _ in range(100):
values.append(var.value())
# The shape, shape_n, size, and rank are tested here because their
# execution kernels (as opposed to compilation only tf2xla kernels)
# are distincts from tf2xla kernels.
def testShape(self):
def const(value):
return array_ops.shape(
constant_op.constant(value)).numpy()
def ones(value):
return array_ops.shape(
array_ops.ones(value)).numpy()
with self.test_scope():
# Shapes of directly constructed tensors
self.assertAllEqual([], const(3))
self.assertAllEqual([3], const([1.0, 2.0, 3.0]))
self.assertAllEqual([2, 2], const([[1.0, 2.0], [3.0, 4.0]]))
self.assertAllEqual([2, 1, 2], const([[[1.0, 2.0]], [[3.0, 4.0]]]))
# Shapes of tensors created by op running on device
# We make this distinction because directly constructed tensors
# are treated differently in a few places that can influence shape:
# - they always have on_host_tensor
# - they and their shapes can be cached
# - they end up on device via a copy, instead of as program output
self.assertAllEqual([], ones([]))
self.assertAllEqual([3], ones([3]))
self.assertAllEqual([2, 2], ones([2, 2]))
self.assertAllEqual([2, 1, 2], ones([2, 1, 2]))
def testShapeN(self):
with self.test_scope():
# Shapes of directly constructed tensors
shapes = array_ops.shape_n([
constant_op.constant(1.0),
constant_op.constant([1.0, 2.0, 3.0]),
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
# Shapes of tensors created by op running on device
shapes = array_ops.shape_n([
array_ops.ones([]),
array_ops.ones([3]),
array_ops.ones([2, 2])])
self.assertAllEqual(
[[], [3], [2, 2]],
[x.numpy().tolist() for x in shapes])
def testSize(self):
with self.test_scope():
self.assertEqual(
1, array_ops.size(constant_op.constant(1.0)).numpy())
self.assertEqual(
3, array_ops.size(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
4, array_ops.size(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testRank(self):
with self.test_scope():
self.assertEqual(
0, array_ops.rank(constant_op.constant(1.0)).numpy())
self.assertEqual(
1, array_ops.rank(constant_op.constant([1.0, 2.0, 3.0])).numpy())
self.assertEqual(
2, array_ops.rank(
constant_op.constant([[1.0, 2.0], [3.0, 4.0]])).numpy())
def testAdam(self):
with self.test_scope():
optimizer = adam.AdamOptimizer(0.1)
x = resource_variable_ops.ResourceVariable(10.0)
with backprop.GradientTape() as tape:
y = x * x
dy_dx = tape.gradient(y, x)
optimizer.apply_gradients([(dy_dx, x)])
self.assertAlmostEqual(9.9, x.numpy(), places=3)
def testAdamSparse(self):
with ops.device('/cpu:0'):
# Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
# are not implemented on TPU.
embedding_matrix = resource_variable_ops.ResourceVariable(
array_ops.ones([3, 2]))
with self.test_scope():
with backprop.GradientTape() as tape:
embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
y = math_ops.reduce_sum(embedding)
dy_dx = tape.gradient(y, embedding_matrix)
self.assertIsInstance(dy_dx, ops.IndexedSlices)
optimizer = adam.AdamOptimizer(0.1)
# The gradient application operations will run on CPU because optimizer
# updates are always collocated with the variable.
optimizer.apply_gradients([(dy_dx, embedding_matrix)])
# This assign_add will run on CPU because when an input to an
# operation is a resource, this operation is placed on the resource's
# device by the eager runtime.
embedding_matrix.assign_add(array_ops.ones([3, 2]))
self.assertAllClose([[2.0, 2.0],
[1.9, 1.9],
[2.0, 2.0]], embedding_matrix.numpy())
class EagerFunctionTest(xla_test.XLATestCase):
def testBasic(self):
with self.test_scope():
matmul = function.defun(math_ops.matmul)
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
sq = matmul(t, t, transpose_a=True)
self.assertAllEqual(sq.numpy().reshape(-1), [10, 14, 14, 20])
def testConv(self):
if 'GPU' in self.device:
# TODO(b/32333178)
self.skipTest('Current implementation of RandomStandardNormal kernel '
'is very slow on GPU, and has been blacklisted.')
with self.test_scope():
data_format = 'channels_last'
conv = convolutional.Conv2D(
filters=1, kernel_size=2, padding='VALID',
data_format=data_format, activation=nn_ops.relu,
kernel_initializer=init_ops.ones_initializer(),
bias_initializer=init_ops.zeros_initializer())
pool = pooling.MaxPooling2D(2, 2, data_format=data_format)
def model(x):
x = conv(x)
return pool(x)
model = function.defun(model)
x = array_ops.ones([1, 4, 4, 1])
y = model(x)
self.assertAllEqual(y.numpy(), [[[[4.]]]])
def testReadVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
@function.defun
def f():
return v.read_value()
var = f()
self.assertEqual(1.0, var.numpy())
def testUpdateVariable(self):
with self.test_scope():
v = resource_variable_ops.ResourceVariable(1.0)
def f(v):
v.assign_add(1.0)
return v
f = function.defun(f)
var = f(v)
self.assertEqual(2.0, var.numpy())
def testAllArgumentKinds(self):
"""Test a complex function that takes different argument kinds.
tf2xla machinery that translates, compiles, and runs defuns
classifies arguments into: compile-time constants, regular tensors,
and resources. This test creates a function with a mix of all these
kinds. Moreover, the order of function arguments is intentionally mixed up.
This also tests the case when the same argument is a compile-time constant
as well as used in an operation that normally expects its inputs to be
in device memory - addition in this case.
"""
with self.test_scope():
def foo(c1, r1, v1, c2, v2, r2):
# c1 and c2 are compile-time constants
# r1 and r2 are regular tensors
# v1 and v2 are resource variables
a = c1 + r1
b = math_ops.cast(c2, dtypes.float32) + v2
c = array_ops.slice(v1, c1, c2)
d = r2 * v2
return a, b, c, d
foo = function.defun(foo)
c1 = [0, 0]
c2 = array_ops.ones([2], dtype=dtypes.int32)
r1 = array_ops.ones([2])
r2 = [[2., 2.], [3., 3.]]
v1 = resource_variable_ops.ResourceVariable([[1., 2.], [3., 4.]])
v2 = resource_variable_ops.ResourceVariable([[10., 20.], [30., 40.]])
a, b, c, d = foo(c1, r1, v1, c2, v2, r2)
self.assertAllEqual([1, 1], a.numpy())
self.assertAllEqual([[11., 21.], [31., 41.]], b.numpy())
self.assertAllEqual([[1.]], c.numpy())
self.assertAllEqual([[20., 40.], [90., 120.]], d.numpy())
def testDefunInGradientTape(self):
with self.test_scope():
v0 = resource_variable_ops.ResourceVariable(5.0)
@function.defun
def f(x):
x = v0 * v0 * x
return x
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = f(x)
dy = tape.gradient(y, v0)
self.assertEqual(75, y.numpy())
self.assertEqual(30, dy.numpy())
def testSliceInDefun(self):
with self.test_scope():
@function.defun
def f(x, y):
return x[0::2, y:, ...]
x = array_ops.ones([2, 3, 4])
y = array_ops.ones([], dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(x)
tape.watch(y)
z = f(x, y)
dz = tape.gradient(z, x)
self.assertAllEqual(np.ones([1, 2, 4]), z.numpy())
self.assertAllEqual((2, 3, 4), dz.shape.as_list())
def testNestedDefun(self):
self.skipTest('Nested defuns do not work on TPU at the moment')
with self.test_scope():
@function.defun
def times_two(x):
return 2 * x
@function.defun
def two_x_plus_1(x):
return times_two(x) + 1
x = constant_op.constant([2, 3, 4])
y = two_x_plus_1(x)
self.assertAllEqual([5, 7, 9], y.numpy())
class ExcessivePaddingTest(xla_test.XLATestCase):
"""Test that eager execution works with TPU flattened tensors.
Tensors that would normally be excessively padded when written
to TPU memory are reshaped to 1-D flat tensors.
This test case verifies that such tensors work with eager execution.
The flattening currently only happens on TPU, but tests should work
fine with all backends as flattening is transparent.
"""
def testFromConstant(self):
with self.test_scope():
# Create constant of shape [100, 2, 1]. This tensor would be
# excessively padded on TPU.
tensor = constant_op.constant(100 * [[[10.0], [2.0]]])
# Use reduce_sum since it requires correctly working with
# a particular dimension.
reduced = math_ops.reduce_sum(tensor, axis=1)
self.assertAllEqual(100 * [[12.0]], reduced)
def testFromOperation(self):
with self.test_scope():
tensor = array_ops.ones([3, 100, 2, 2])
reduced = math_ops.reduce_sum(tensor, axis=[0, 2, 3])
self.assertAllEqual(100 * [12.0], reduced)
def testAsFunctionInput(self):
with self.test_scope():
@function.defun
def f(x):
return math_ops.reduce_sum(x, axis=2)
tensor = constant_op.constant(100 * [[[10.0, 2.0]]])
reduced = f(tensor)
self.assertAllEqual(100 * [[12.0]], reduced)
def testAsFunctionOutput(self):
with self.test_scope():
@function.defun
def f(x):
return x * constant_op.constant(100 * [[[10.0, 2.0]]])
y = f(3)
reduced = math_ops.reduce_sum(y, axis=2)
self.assertAllEqual(100 * [[36.0]], reduced)
def multiple_tpus():
devices = context.context().devices()
return len([d for d in devices if 'device:TPU:' in d]) > 1
class MultiDeviceTest(xla_test.XLATestCase):
"""Test running TPU computation on more than one core."""
def testBasic(self):
if not multiple_tpus():
self.skipTest('MultiDeviceTest requires multiple TPU devices.')
# Compute 10 on TPU core 0
with ops.device('device:TPU:0'):
two = constant_op.constant(2)
five = constant_op.constant(5)
ten = two * five
self.assertAllEqual(10, ten)
# Compute 6 on TPU core 1
with ops.device('device:TPU:1'):
two = constant_op.constant(2)
three = constant_op.constant(3)
six = two * three
self.assertAllEqual(6, six)
# Copy 10 and 6 to CPU and sum them
self.assertAllEqual(16, ten + six)
if __name__ == '__main__':
ops.enable_eager_execution(
config=config_pb2.ConfigProto(log_device_placement=True))
googletest.main()
|
|
# Copyright (c) 2015-2022 Vector 35 Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import ctypes
# Binary Ninja components
from . import _binaryninjacore as core
from .enums import SettingsScope
class Settings:
"""
:class:`Settings` provides a way to define and access settings in a hierarchical fashion. The value of a setting can \
be defined for each hierarchical level, where each level overrides the preceding level. The backing-store for setting \
values at each level is also configurable. This allows for ephemeral or platform-independent persistent settings storage \
for components within Binary Ninja or consumers of the Binary Ninja API.
Each :class:`Settings` instance has an ``instance_id`` which identifies a schema. The schema defines the settings contents \
and the way in which settings are retrieved and manipulated. A new :class:`Settings` instance defaults to using a value of *'default'* \
for the ``instance_id``. The *'default'* settings schema defines all of the settings available for the active Binary Ninja components \
which include at a minimum, the settings defined by the Binary Ninja core. The *'default'* schema may additionally define settings \
for the UI and/or installed plugins. Extending existing schemas, or defining new ones is accomplished by calling :func:`register_group` \
and :func:`register_setting` methods, or by deserializing an existing schema with :func:`deserialize_schema`.
.. note:: All settings in the *'default'* settings schema are rendered with UI elements in the Settings View of Binary Ninja UI.
Allowing setting overrides is an important feature and Binary Ninja accomplishes this by allowing one to override a setting at various \
levels. The levels and their associated storage are shown in the following table. Default setting values are optional, and if specified, \
saved in the schema itself.
================= ========================== ============== ==============================================
Setting Level Settings Scope Preference Storage
================= ========================== ============== ==============================================
Default SettingsDefaultScope Lowest Settings Schema
User SettingsUserScope - <User Directory>/settings.json
Project SettingsProjectScope - <Project Directory>/.binaryninja/settings.json
Resource SettingsResourceScope Highest Raw BinaryView (Storage in BNDB)
================= ========================== ============== ==============================================
Settings are identified by a key, which is a string in the form of **'<group>.<name>'** or **'<group>.<subGroup>.<name>'**. Groups provide \
a simple way to categorize settings. Sub-groups are optional and multiple sub-groups are allowed. When defining a settings group, the \
:func:`register_group` method allows for specifying a UI friendly title for use in the Binary Ninja UI. Defining a new setting requires a \
unique setting key and a JSON string of property, value pairs. The following table describes the available properties and values.
================== ====================================== ================== ======== =======================================================================
Property JSON Data Type Prerequisite Optional {Allowed Values} and Notes
================== ====================================== ================== ======== =======================================================================
"title" string None No Concise Setting Title
"type" string None No {"array", "boolean", "number", "string"}
"elementType" string "type" is "array" No {"string"}
"enum" array : {string} "type" is "array" Yes Enumeration definitions
"enumDescriptions" array : {string} "type" is "array" Yes Enumeration descriptions that match "enum" array
"minValue" number "type" is "number" Yes Specify 0 to infer unsigned (default is signed)
"maxValue" number "type" is "number" Yes Values less than or equal to INT_MAX result in a QSpinBox UI element
"precision" number "type" is "number" Yes Specify precision for a QDoubleSpinBox
"default" {array, boolean, number, string, null} None Yes Specify optimal default value
"aliases" array : {string} None Yes Array of deprecated setting key(s)
"description" string None No Detailed setting description
"ignore" array : {string} None Yes {"SettingsUserScope", "SettingsProjectScope", "SettingsResourceScope"}
"message" string None Yes An optional message with additional emphasis
"readOnly" boolean None Yes Only enforced by UI elements
"optional" boolean None Yes Indicates setting can be null
"requiresRestart boolean None Yes Enable restart notification in the UI upon change
================== ====================================== ================== ======== =======================================================================
.. note:: In order to facilitate deterministic analysis results, settings from the *'default'* schema that impact analysis are serialized \
from Default, User, and Project scope into Resource scope during initial BinaryView analysis. This allows an analysis database to be opened \
at a later time with the same settings, regardless if Default, User, or Project settings have been modified.
.. note:: Settings that do not impact analysis (e.g. many UI settings) should use the *"ignore"* property to exclude \
*"SettingsProjectScope"* and *"SettingsResourceScope"* from the applicable scopes for the setting.
Example analysis plugin setting:
>>> my_settings = Settings()
>>> title = "My Pre-Analysis Plugin"
>>> description = "Enable extra analysis before core analysis."
>>> properties = f'{{"title" : "{title}", "description" : "{description}", "type" : "boolean", "default" : false}}'
>>> my_settings.register_group("myPlugin", "My Plugin")
True
>>> my_settings.register_setting("myPlugin.enablePreAnalysis", properties)
True
>>> my_bv = open_view("/bin/ls", options={'myPlugin.enablePreAnalysis' : True})
>>> Settings().get_bool("myPlugin.enablePreAnalysis")
False
>>> Settings().get_bool("myPlugin.enablePreAnalysis", my_bv)
True
Example UI plugin setting:
>>> my_settings = Settings()
>>> title = "My UI Plugin"
>>> description = "Enable My UI Plugin table display."
>>> properties = f'{{"title" : "{title}", "description" : "{description}", "type" : "boolean", "default" : true, "ignore" : ["SettingsProjectScope", "SettingsResourceScope"]}}'
>>> my_settings.register_group("myPlugin", "My Plugin")
True
>>> my_settings.register_setting("myPlugin.enableTableView", properties)
True
>>> my_bv = open_view("/bin/ls", options={'myPlugin.enablePreAnalysis' : True})
>>> Settings().get_bool("myPlugin.enableTableView")
True
"""
default_handle = core.BNCreateSettings("default")
def __init__(self, instance_id: str = "default", handle=None):
if handle is None:
if instance_id is None or instance_id == "":
instance_id = "default"
self._instance_id = instance_id
if instance_id == "default":
assert Settings.default_handle is not None
_handle = Settings.default_handle
else:
_handle = core.BNCreateSettings(instance_id)
else:
instance_id = core.BNGetUniqueIdentifierString()
_handle = handle
assert _handle is not None
self.handle = _handle
def __del__(self):
if self.handle is not Settings.default_handle:
core.BNFreeSettings(self.handle)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return ctypes.addressof(self.handle.contents) == ctypes.addressof(other.handle.contents)
def __ne__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return not (self == other)
def __hash__(self):
return hash((self.instance_id, ctypes.addressof(self.handle.contents)))
@property
def instance_id(self):
"""Returns the ``instance_id`` for this :class:`Settings` repository (read-only)"""
return self._instance_id
def set_resource_id(self, resource_id=None):
"""
``set_resource_id`` Sets the resource identifier for this class:`Settings` instance. When accessing setting values at the \
``SettingsResourceScope`` level, the resource identifier is passed along through the backing store interface.
.. note:: Currently the only available backing store for ``SettingsResourceScope`` is a :class:`BinaryView` object. In the context \
of a :class:`BinaryView` the resource identifier is the :class:`BinaryViewType` name. All settings for this type of backing store \
are saved in the *'Raw'* :class:`BinaryViewType`. This enables the configuration of setting values such that they are available \
during :class:`BinaryView` creation and initialization.
:param str resource_id: a unique identifier
:rtype: None
"""
if resource_id is None:
resource_id = ""
core.BNSettingsSetResourceId(self.handle, resource_id)
def register_group(self, group, title):
"""
``register_group`` registers a group in the schema for this :class:`Settings` instance
:param str group: a unique identifier
:param str title: a user friendly name appropriate for UI presentation
:return: True on success, False on failure.
:rtype: bool
:Example:
>>> Settings().register_group("solver", "Solver")
True
>>>
"""
return core.BNSettingsRegisterGroup(self.handle, group, title)
def register_setting(self, key, properties):
"""
``register_setting`` registers a new setting with this :class:`Settings` instance
:param str key: a unique setting identifier in the form **'<group>.<name>'**
:param str properties: a JSON string describes the setting schema
:return: True on success, False on failure.
:rtype: bool
:Example:
>>> Settings().register_group("solver", "Solver")
True
>>> Settings().register_setting("solver.basicBlockSlicing", '{"description" : "Enable the basic block slicing in the solver.", "title" : "Basic Block Slicing", "default" : true, "type" : "boolean"}')
True
"""
return core.BNSettingsRegisterSetting(self.handle, key, properties)
def contains(self, key):
"""
``contains`` determine if a setting identifier exists in the active settings schema
:param str key: the setting identifier
:return: True if the identifier exists in this active settings schema, False otherwise
:rtype: bool
"""
return core.BNSettingsContains(self.handle, key)
def is_empty(self):
"""
``is_empty`` determine if the active settings schema is empty
:return: True if the active settings schema is empty, False otherwise
:rtype: bool
"""
return core.BNSettingsIsEmpty(self.handle)
def keys(self):
"""
``keys`` retrieve the list of setting identifiers in the active settings schema
:return: list of setting identifiers
:rtype: list(str)
"""
length = ctypes.c_ulonglong()
result = core.BNSettingsKeysList(self.handle, ctypes.byref(length))
assert result is not None, "core.BNSettingsKeysList returned None"
out_list = []
for i in range(length.value):
out_list.append(result[i].decode('utf8'))
core.BNFreeStringList(result, length)
return out_list
def query_property_string_list(self, key, property_name):
length = ctypes.c_ulonglong()
result = core.BNSettingsQueryPropertyStringList(self.handle, key, property_name, ctypes.byref(length))
assert result is not None, "core.BNSettingsQueryPropertyStringList returned None"
out_list = []
for i in range(length.value):
out_list.append(result[i].decode('utf8'))
core.BNFreeStringList(result, length)
return out_list
def update_property(self, key, setting_property):
return core.BNSettingsUpdateProperty(self.handle, key, setting_property)
def deserialize_schema(self, schema, scope=SettingsScope.SettingsAutoScope, merge=True):
return core.BNSettingsDeserializeSchema(self.handle, schema, scope, merge)
def serialize_schema(self):
return core.BNSettingsSerializeSchema(self.handle)
def deserialize_settings(self, contents, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNDeserializeSettings(self.handle, contents, view, scope)
def serialize_settings(self, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSerializeSettings(self.handle, view, scope)
def reset(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsReset(self.handle, key, view, scope)
def reset_all(self, view=None, scope=SettingsScope.SettingsAutoScope, schema_only=True):
if view is not None:
view = view.handle
return core.BNSettingsResetAll(self.handle, view, scope, schema_only)
def get_bool(self, key, view=None):
if view is not None:
view = view.handle
return core.BNSettingsGetBool(self.handle, key, view, None)
def get_double(self, key, view=None):
if view is not None:
view = view.handle
return core.BNSettingsGetDouble(self.handle, key, view, None)
def get_integer(self, key, view=None):
if view is not None:
view = view.handle
return core.BNSettingsGetUInt64(self.handle, key, view, None)
def get_string(self, key, view=None):
if view is not None:
view = view.handle
return core.BNSettingsGetString(self.handle, key, view, None)
def get_string_list(self, key, view=None):
if view is not None:
view = view.handle
length = ctypes.c_ulonglong()
result = core.BNSettingsGetStringList(self.handle, key, view, None, ctypes.byref(length))
assert result is not None, "core.BNSettingsGetStringList returned None"
out_list = []
for i in range(length.value):
out_list.append(result[i].decode('utf8'))
core.BNFreeStringList(result, length)
return out_list
def get_json(self, key, view=None):
if view is not None:
view = view.handle
return core.BNSettingsGetJson(self.handle, key, view, None)
def get_bool_with_scope(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetBool(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_double_with_scope(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetDouble(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_integer_with_scope(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetUInt64(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_string_with_scope(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetString(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def get_string_list_with_scope(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
length = ctypes.c_ulonglong()
result = core.BNSettingsGetStringList(self.handle, key, view, ctypes.byref(c_scope), ctypes.byref(length))
assert result is not None, "core.BNSettingsGetStringList returned None"
out_list = []
for i in range(length.value):
out_list.append(result[i].decode('utf8'))
core.BNFreeStringList(result, length)
return (out_list, SettingsScope(c_scope.value))
def get_json_with_scope(self, key, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
c_scope = core.SettingsScopeEnum(scope)
result = core.BNSettingsGetJson(self.handle, key, view, ctypes.byref(c_scope))
return (result, SettingsScope(c_scope.value))
def set_bool(self, key, value, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetBool(self.handle, view, scope, key, value)
def set_double(self, key, value, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetDouble(self.handle, view, scope, key, value)
def set_integer(self, key, value, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetUInt64(self.handle, view, scope, key, value)
def set_string(self, key, value, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetString(self.handle, view, scope, key, value)
def set_string_list(self, key, value, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
length = ctypes.c_ulonglong()
length.value = len(value)
string_list = (ctypes.c_char_p * len(value))()
for i in range(len(value)):
string_list[i] = value[i].encode('charmap')
return core.BNSettingsSetStringList(self.handle, view, scope, key, string_list, length)
def set_json(self, key, value, view=None, scope=SettingsScope.SettingsAutoScope):
if view is not None:
view = view.handle
return core.BNSettingsSetJson(self.handle, view, scope, key, value)
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
import itertools
from fabric.api import env, run, runs_once, sudo, task
from fabric.utils import puts, fastprint
import six
from dockermap.utils import expand_path
from . import cli
from .api import docker_fabric
from .utils.net import get_ip4_address, get_ip6_address
from .utils.output import stdout_result
IMAGE_COLUMNS = ('Id', 'RepoTags', 'ParentId', 'Created', 'VirtualSize', 'Size')
CONTAINER_COLUMNS = ('Id', 'Names', 'Image', 'Command', 'Ports', 'Status', 'Created')
NETWORK_COLUMNS = ('Id', 'Name', 'Driver', 'Scope')
VOLUME_COLUMNS = ('Name', 'Driver')
def _format_output_table(data_dict, columns, full_ids=False, full_cmd=False, short_image=False):
def _format_port(port_dict):
if 'PublicPort' in port_dict and 'IP' in port_dict:
return '{IP}:{PublicPort}->{PrivatePort}/{Type}'.format(**port_dict)
return '{PrivatePort}/{Type}'.format(**port_dict)
def _get_column(item, column):
data = item.get(column, '')
if isinstance(data, list):
if column == 'Ports':
return map(_format_port, data)
return data
if column in ('Id', 'ParentId') and not full_ids:
return data[:12],
if column == 'Created':
return datetime.utcfromtimestamp(data).isoformat(),
if column == 'Command' and not full_cmd:
return data[:25],
if column == 'Image' and short_image:
__, __, i_name = data.rpartition('/')
return i_name,
return unicode(data),
def _max_len(col_data):
if col_data:
return max(map(len, col_data))
return 0
puts('')
rows = [[[c] for c in columns]]
rows.extend([_get_column(i, col) for col in columns] for i in data_dict)
col_lens = map(max, (map(_max_len, c) for c in zip(*rows)))
row_format = ' '.join('{{{0}:{1}}}'.format(i, l) for i, l in enumerate(col_lens))
for row in rows:
for c in itertools.izip_longest(*row, fillvalue=''):
fastprint(row_format.format(*c), end='\n', flush=False)
fastprint('', flush=True)
@task
def reset_socat(use_sudo=False):
"""
Finds and closes all processes of `socat`.
:param use_sudo: Use `sudo` command. As Docker-Fabric does not run `socat` with `sudo`, this is by default set to
``False``. Setting it to ``True`` could unintentionally remove instances from other users.
:type use_sudo: bool
"""
output = stdout_result('ps -o pid -C socat', quiet=True)
pids = output.split('\n')[1:]
puts("Removing process(es) with id(s) {0}.".format(', '.join(pids)))
which = sudo if use_sudo else run
which('kill {0}'.format(' '.join(pids)), quiet=True)
@task
def version():
"""
Shows version information of the remote Docker service, similar to ``docker version``.
"""
output = docker_fabric().version()
col_len = max(map(len, output.keys())) + 1
puts('')
for k, v in six.iteritems(output):
fastprint('{0:{1}} {2}'.format(''.join((k, ':')), col_len, v), end='\n', flush=False)
fastprint('', flush=True)
@task
def get_ip(interface_name='docker0'):
"""
Shows the IP4 address of a network interface.
:param interface_name: Name of the network interface. Default is ``docker0``.
:type interface_name: unicode
"""
puts(get_ip4_address(interface_name))
@task
def get_ipv6(interface_name='docker0', expand=False):
"""
Shows the IP6 address of a network interface.
:param interface_name: Name of the network interface. Default is ``docker0``.
:type interface_name: unicode
:param expand: Expand the abbreviated IP6 address. Default is ``False``.
:type expand: bool
"""
puts(get_ip6_address(interface_name, expand=expand))
@task
def list_images(list_all=False, full_ids=False):
"""
Lists images on the Docker remote host, similar to ``docker images``.
:param list_all: Lists all images (e.g. dependencies). Default is ``False``, only shows named images.
:type list_all: bool
:param full_ids: Shows the full ids. When ``False`` (default) only shows the first 12 characters.
:type full_ids: bool
"""
images = docker_fabric().images(all=list_all)
_format_output_table(images, IMAGE_COLUMNS, full_ids)
@task
def list_containers(list_all=True, short_image=True, full_ids=False, full_cmd=False):
"""
Lists containers on the Docker remote host, similar to ``docker ps``.
:param list_all: Shows all containers. Default is ``False``, which omits exited containers.
:type list_all: bool
:param short_image: Hides the repository prefix for preserving space. Default is ``True``.
:type short_image: bool
:param full_ids: Shows the full image ids. When ``False`` (default) only shows the first 12 characters.
:type full_ids: bool
:param full_cmd: Shows the full container command. When ``False`` (default) only shows the first 25 characters.
:type full_cmd: bool
"""
containers = docker_fabric().containers(all=list_all)
_format_output_table(containers, CONTAINER_COLUMNS, full_ids, full_cmd, short_image)
@task
def list_networks(full_ids=False):
"""
Lists networks on the Docker remote host, similar to ``docker network ls``.
:param full_ids: Shows the full network ids. When ``False`` (default) only shows the first 12 characters.
:type full_ids: bool
"""
networks = docker_fabric().networks()
_format_output_table(networks, NETWORK_COLUMNS, full_ids)
@task
def list_volumes():
"""
Lists volumes on the Docker remote host, similar to ``docker volume ls``.
"""
volumes = docker_fabric().volumes()['Volumes'] or ()
_format_output_table(volumes, VOLUME_COLUMNS)
@task
def cleanup_containers(**kwargs):
"""
Removes all containers that have finished running. Similar to the ``prune`` functionality in newer Docker versions.
"""
containers = docker_fabric().cleanup_containers(**kwargs)
if kwargs.get('list_only'):
puts('Existing containers:')
for c_id, c_name in containers:
fastprint('{0} {1}'.format(c_id, c_name), end='\n')
@task
def cleanup_images(remove_old=False, **kwargs):
"""
Removes all images that have no name, and that are not references as dependency by any other named image. Similar
to the ``prune`` functionality in newer Docker versions, but supports more filters.
:param remove_old: Also remove images that do have a name, but no `latest` tag.
:type remove_old: bool
"""
keep_tags = env.get('docker_keep_tags')
if keep_tags is not None:
kwargs.setdefault('keep_tags', keep_tags)
removed_images = docker_fabric().cleanup_images(remove_old=remove_old, **kwargs)
if kwargs.get('list_only'):
puts('Unused images:')
for image_name in removed_images:
fastprint(image_name, end='\n')
@task
def remove_all_containers(**kwargs):
"""
Stops and removes all containers from the remote. Use with caution outside of a development environment!
:return:
"""
containers = docker_fabric().remove_all_containers(**kwargs)
if kwargs.get('list_only'):
puts('Existing containers:')
for c_id in containers[1]:
fastprint(c_id, end='\n')
@task
@runs_once
def save_image(image, filename=None):
"""
Saves a Docker image from the remote to a local files. For performance reasons, uses the Docker command line client
on the host, generates a gzip-tarball and downloads that.
:param image: Image name or id.
:type image: unicode
:param filename: File name to store the local file. If not provided, will use ``<image>.tar.gz`` in the current
working directory.
:type filename: unicode
"""
local_name = filename or '{0}.tar.gz'.format(image)
cli.save_image(image, local_name)
@task
def load_image(filename, timeout=120):
"""
Uploads an image from a local file to a Docker remote. Note that this temporarily has to extend the service timeout
period.
:param filename: Local file name.
:type filename: unicode
:param timeout: Timeout in seconds to set temporarily for the upload.
:type timeout: int
"""
c = docker_fabric()
with open(expand_path(filename), 'r') as f:
_timeout = c._timeout
c._timeout = timeout
try:
c.load_image(f)
finally:
c._timeout = _timeout
|
|
#!/usr/bin/env python
"""Routine to make it easy to read command line arguments in a keyword=value
style format. Also has support for --help, help=, -h.
lists can be given as e.g.: 1-3;5,6 which becomes: [[1,2,3],[5,6]]
File globbing and expansion of ~ is supported for strings
@filename will read filename and put each line as an element in a list
Available variables:
self.name # name of calling program
self.validkeys # list of allowed keywords given as strings
"""
#
# 15-mar-2003 Created as miriad.py PJT
# 16-apr-2003 added run,keyr,keyi,keya\n
# 05-mar-2004 Added help comments nemo style NLC
# 16-may-2004 Deleted all the code we don't use for map2 NLC
# 19-feb-2008 Changed keya, keyi, and keyr to not use the
# keypresent function. This allows the programmer to
# specify a default keyword value of nothing. However,
# error checking was added to keyini so that if the user
# tries to input an empty keyword on the command line,
# than an error message will still occur. Also, some cleanups
# to remove extraneous comments, combined the badkeyword
# function with keya, and added the keyf function which works
# the same as keyr.
# 07-may-2008 Cleanup and simplifying the code. Also made a blank keyword
# value return None or False. False is returned for keyb, all
# others return None
# 19-nov-2008 More code cleanup. Also added the write_keyval function and
# added spacing so the help text lines up with show_keyval
# 09-dec-2008 Sort the keywords when printing them
# 04-aug-2010 Added checking for required number of arguments in keyl
# 04-oct-2010 Added the check_existance and check_nonexistance. I may
# move these to optional keywords in keya()
# 20-oct-2010 check_existance and check_nonexistance can be called from
# keya() and keyl(). keyl() also has more smarts about ignoring
# comment lines and is more pythonic now.
# 8-may-2011 keyl() now accepts *,?, and [] shell-globbing now for input
# lists of files, and ~ for home directories. You can also
# make mixed lists of name, wildcards, tildes, and @files.
# 15-july-2011 keyl() will return a blank list instead of None.
# 18-july-2011 _at_file() will skip blank lines from input files
# 08-aug-2011 Just added some comments to various docstrings
# 26-sep-2011 Allow ranges to use dashes for keyl(val='i').
# 7-feb-2012 Two improvements. First, keyl() now allows semicolons to
# define nested lists. Second, writekeys() can format the
# output to 80 characters per line.
# 24-april-2012 show_keyval() will sort output by listing required keywords
# first, then remaining keywords
# 26 April 2012 check_existance() and check_nonexistance() will now allow
# input filenames to be a - or . for sys.stdin/sys.stdout and
# /dev/null. Also sort keywords in writekeys(), reorganized
# functions alphabetically, and deleted keyr().
# 16 Aug 2012 Added new arguments to check for min/max allowed values,
# and allowed options for keywords. Also made error(),
# warning(), check_existance(), and check_nonexistance()
# hidden functions. Lastly, commented out dprintf() since
# it shouldn't be part of this module. debug() statement
# might also disappear at a future date.
# 20 Aug 2012 Switch to raising errors rather than _error() function.
# Ditto for _warning(). Both have deprecation warnings
# printed when used.
# 24 Aug 2012 Reimplemented code as readcmd.py. Removed debug and dprintf,
# changed the way keywords specifications are done (see example
# in __main__).
# 6 Sep 2012 Fixed bug in getbool(). I forgot to return the value. Duh.
# 17 Sep 2012 Fixed bug with reading @files. Also fixed typo in naming
# of _checkMinMax()
# 6 Nov 2012 Sort validkeys
# 20 Nov 2012 Allowed values to have spaces, which should have been
# obvious from the start.
# 18 Dec 2012 Changed so format keyword in getkeys() is a number for line
# length, not just true/false
# 01 Apr 2013 Added ignorecase option to getstr() and getliststr() methods.
# 03 Apr 2013 I think it is fixed now so that 1e4 can be interpreted as
# an integer
import glob,os,re,sys
class ReadCmd(object):
def __init__(self,spec,head=None):
"""Create ReadCmd object to read commandline with validation
spec = must be a multi-line string, list of strings, or a single
string specifying a filename.
head = pass header as a string here. Any string here will be
prepended to header parsed from spec."""
pattern0 = r"#(.*)" # full line comment
pattern1 = r"(\w+)\s*=(.+)" # key=value
pattern2 = r"(\w+)=(.+)" # key=value on command line
helpFlag = False # set to true if -h, --help, help=h on cmd line
if isinstance(spec,str):
if os.path.isfile(spec): # read spec from a file
fp = open(spec,'r')
speclist = fp.readlines()
fp.close()
else:
speclist = spec.split('\n')
elif isinstance(spec,list) or isinstance(spec,tuple):
speclist = list(spec)
else:
self._error("TypeError: spec must be string, list, or tuple")
self.name = os.path.split(sys.argv[0])[1]
self.args = {}
if head is None:
self.head = [] # will hold head comments on usage
else:
self.head = [head]
for line in speclist: # first read spec file for defaults and help
if line.strip() == '': # skip blank lines
continue
m = re.match(pattern0,line.strip()) # look for comment lines
if m is not None:
self.head.append(m.group(1).strip())
else:
m = re.match(pattern1,line.strip())
if m is None: # didn't match
self._error("SyntaxError: Cannot read '%s' from spec" %line)
else:
key = m.group(1).strip()
junk = m.group(2).strip()
idx = junk.find('#')
if idx == -1: # no comment
value = junk
comment = ""
else:
n = junk.count('#')
if n == 1:
tmp = junk[:idx].strip()
if len(tmp) == 0: # no default value given
self._error("SyntaxError: Cannot read '%s' from spec" %line)
value = tmp
comment = junk[idx+1:].strip()
else: # n > 1
tmp = junk[:idx].strip()
if len(tmp) == 0: # first # sign is the value
value = '#'
else:
value = tmp
comment = junk[idx+1:].strip()
if self.args.has_key(key):
self._error("KeyError: Duplicate keyword '%s' in spec" %key)
self.args[key] = [value,comment]
self.validkeys = self.args.keys() # valid keywords
self.validkeys.sort()
if len(sys.argv) > 1: # stuff given on command line
junk = {} # will hold all keys read from command line
# now loop over command line and parse key=value pairs
for tmp in sys.argv[1:]:
if tmp in ['-h','--help','help=h']: # user wants help
helpFlag = True
else:
m = re.match(pattern2,tmp)
if m is None:
self._error("SyntaxError: Cannot read '%s'" %tmp)
key = m.group(1)
value = m.group(2)
if junk.has_key(key):
self._error("KeyError: Duplicate keyword '%s'" %key)
junk[key] = value
# now substitute command line keywords for defaults
for key in junk.iterkeys():
if key not in self.validkeys:
self._error("KeyError: Unknown keyword %s" %key)
self.args[key][0] = junk[key] # replace value, but not comment
if helpFlag:
print self
sys.exit()
self._checkRequired()
def getbool(self,key):
"""Return keyword value as a boolean True/False. A value of None returns
None.
Can understand True,False,1,0,yes,no, and None. Any capitalization
accepted (except for None).
key = keyword given as a string"""
self._checkKey(key)
temp = self.args[key][0]
try:
value = self._convertBool(temp)
return value
except ValueError:
self._error("ValueError: %s is not a valid boolean for keyword %s" %(temp,key))
def getfloat(self,key,min=None,max=None,option=None):
"""Return keyword value as a float. A value of None returns None.
key = keyword given as a string
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
self._checkKey(key)
value = self.args[key][0]
if value == 'None':
return None
else:
try:
tmp = float(value)
if min is not None or max is not None:
self._checkMinMax(key,tmp,min,max)
if option is not None:
self._checkOption(key,tmp,option)
return tmp
except ValueError:
self._error("ValueError: %s is not a valid float for keyword %s" %(value,key))
def getint(self,key,min=None,max=None,option=None):
"""Return keyword value as integer. A value of None returns None.
key = keyword given as a string
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
self._checkKey(key)
value = self.args[key][0]
if value == 'None':
return None
else:
try:
tmp = float(value)
if tmp%1 != 0:
raise ValueError
else:
tmp = int(tmp)
if min is not None or max is not None:
self._checkMinMax(key,tmp,min,max)
if option is not None:
self._checkOption(key,tmp,option)
return tmp
except ValueError:
self._error("ValueError: %s is not a valid integer for keyword %s" %(value,key))
def getkeys(self,comment='#',format=None):
"""Make a short string of all keyword=values.
Can format for 80 chars per line and also add a comment symbol
at the beginning of each line
comment = comment character for each line (can be None)
format = Can set to a number to limit line length to that no. of
chars"""
keys = self.validkeys
outstr = ""
if comment is not None and comment != '':
outstr += "%s " %(str(comment))
outstr += "%s " %self.name
if format is not None:
maxlen = format
else:
maxlen = 1e6
n = len(outstr)
for k in keys:
tmp = '%s=%s ' %(k,self.args[k][0])
n += len(tmp)
if format is not None and n > maxlen:
outstr += "\n"
if comment is not None:
outstr += "%s " %(str(comment))
n = len(tmp) + 2
outstr += tmp
outstr += "\n"
return outstr
def getlistbool(self,key,length=None):
"""Return keyword value as a list of booleans. A value of None returns
an empty list.
key = keyword given as a string
length = int/list/tuple of allowed number of elements in list"""
out = self._getlistbase(key,type=bool,length=length)
return out
def getlistfloat(self,key,length=None,min=None,max=None,option=None):
"""Return keyword value as a list of floats. A value of None returns an
empty list.
key = keyword given as a string
length = int/list/tuple of allowed number of elements in list
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
out = self._getlistbase(key,type=float,length=length,min=min,max=max,
option=option)
return out
def getlistint(self,key,length=None,min=None,max=None,option=None):
"""Return keyword value as a list of integers. A value of None returns
an empty list.
key = keyword given as a string
length = int/list/tuple of allowed number of elements in list
min = check for minimum value
max = check for maximum value
option = list/tuple of allowed values"""
out = self._getlistbase(key,type=int,length=length,min=min,max=max,
option=option)
return out
def getliststr(self,key,comment='#',exist=None,length=None,option=None,
ignorecase=False):
"""Return keyword value as a list of strings. A value of None returns
an empty list.
key = keyword given as a string
comment = String character for comment lines to ignore in an @file
exist = Can check to make sure all all input files exist. Default is
to not check. Note, if you give an @file, then the @file
will always be checked for existance no matter what.
length = int/list/tuple of allowed number of elements in list
option = list/tuple of allowed values (for each element)
ignorecase = boolean on whether to ignore differences between
upper/lower case when checking options"""
out = self._getlistbase(key,type=str,comment=comment,exist=exist,
length=length,option=option,ignorecase=ignorecase)
return out
def getstr(self,key,exist=None,option=None,ignorecase=False):
"""Return keyword value as a string. A value of None returns None.
key = keyword given as a string
exist = Assume keyword references a filename, check for existance
or not (boolean)
option = str/list/tuple of allowed values.
ignorecase = boolean on whether to ignore differences between
upper/lower case when checking options"""
self._checkKey(key)
value = self.args[key][0]
if value == 'None':
return None
if exist is True: # filename must exist
self._checkExist(value)
elif exist is False: # filename must NOT exist
self._checkNotExist(value)
if option is not None:
self._checkOption(key,value,option,ignorecase)
return value
def __str__(self):
"""Print out the current keywords, their values and a help message, if
one is present."""
key1 = [] # keys with missing required arguments
key2 = [] # keys with optional/default arguments
maxlength = 0
for k,v in self.args.iteritems():
n = len(k+v[0])
if n > maxlength:
maxlength = n
if v[0] == '???':
key1.append(k)
else:
key2.append(k)
key1.sort()
key2.sort()
output = ""
for line in self.head:
output += "%s\n" %line
output += "------------------------------------------------------------\n"
for k in key1: # loop over required keywords
junk = 3 + maxlength - len(k) - len(self.args[k][0])
space = ' '*junk
output += "%s=%s%s%s\n" %(k,self.args[k][0],space,self.args[k][1])
for k in key2: # loop over remaining keywords
junk = 3 + maxlength - len(k) - len(self.args[k][0])
space = ' '*junk
output += "%s=%s%s%s\n" %(k,self.args[k][0],space,self.args[k][1])
output += "------------------------------------------------------------"
return output
def _atFile(self,filename,comment):
"""Tries to read an at-file, a file that contains keyword values.
Specified by key=@filename. It converts the file to a
string of comma separated values. Blank lines are skipped.
filename - string name of file. Assumes @ has been stripped off
comment - character to use on lines that should be ignored as comments"""
self._checkExist(filename)
fp = open(filename,'r')
tmp = [line.partition(comment)[0].strip() for line in fp] # no comments
data = [a for a in tmp if len(a) > 0] # skip blank lines
fp.close()
return data
def _checkExist(self,*files):
"""Given an input file name as a string or a list of filenames will check to
make sure each file exists. If not, a fatal error will occur using
error()
If filename is a dash or a period, does not check for existance. This
is because I allow dashes to be sys.stdin/sys.stdout, and period to
be /dev/null"""
if len(files) == 0:
self._error("IndexError: You must pass at least one argument to _checkExist()")
for f in files:
if isinstance(f,str): # check a single filename
if f == '-': # give a pass since it is sys.stdin
pass
elif not os.path.exists(f):
t = os.path.split(f)[1]
self._error("IOError: Required file %s is missing" %t)
elif isinstance(f,(list,tuple)): # a list or tuple
for a in f:
if a == '-': # give a pass since it is sys.stdin
pass
elif not os.path.exists(a):
t = os.path.split(a)[1]
self._error("IOError: Required file %s is missing" %t)
else:
self._error("TypeError: _checkExist() can only check types str,list, and tuple")
def _checkKey(self,key):
"""Check to see if key is part of self.validkeys."""
if key in self.validkeys:
pass
else:
self._error("KeyError: '%s' is not a valid keyword" %key)
def _checkMinMax(self,key,value,minval=None,maxval=None):
"""Check to see if value is within bounds set by minval and maxval"""
if minval is not None:
if value < minval:
self._error("ValueError: %s is < minimum value of %f" %(key,minval))
if maxval is not None:
if value > maxval:
self._error("ValueError: %s is > maximum value of %f" %(key,maxval))
def _checkNotExist(self,*files):
"""Given an input file list, will check to make sure each files does NOT
exist. If any one of the files exists, a fatal error will occur
using error()
If filename is a dash or a period, does not check for existance. This
is because I allow dashes to be sys.stdin/sys.stdout, and period to
be /dev/null"""
if len(files) == 0:
self._error("IndexError: You must pass at least one argument to _checkNotExist()")
for f in files:
if isinstance(f,str): # check a single filename
if f in ('.','-'): # give these a pass as described in docstring
pass
elif os.path.exists(f):
t = os.path.split(f)[1]
self._error("IOError: File %s already exists" %t)
elif isinstance(f,(list,tuple)): # a list
for a in f:
if a in ('.','-'): # give these a pass as described in docstring
pass
elif os.path.exists(a):
t = os.path.split(a)[1]
self._error("IOError: File %s already exists" %t)
else:
self._error("TypeError: _checkNotExist can only check types str,list, and tuple")
def _checkOption(self,key,value,option,ignorecase=False):
"""Check whether a value is among valid options"""
if ignorecase is True:
temp = [a.lower() for a in option]
if value.lower() in temp:
pass
else:
self._error("IndexError: Allowed options for key %s are %s" %(key,str(option)))
else:
if value in option:
pass
else:
self._error("IndexError: Allowed options for key %s are %s" %(key,str(option)))
def _checkRequired(self):
"""Checks to see that no blank values exist"""
usage = "Usage: %s " %self.name
missing = 0 # number of missing keywords
extraFlag = False # set to true if >= 1 keyword is not missing
for k in self.validkeys:
if self.args[k][0] == '???': # ??? means a required value
usage = usage + "%s=??? " %k
missing += 1
else:
extraFlag = True
if missing > 0:
if extraFlag is True:
usage = usage + "..."
self._error("KeyError: Missing Keywords: %s" %usage)
def _convertBool(self,value):
"""Convert value to a Boolean. Accepts True, False, 1,0, yes, no, and
None. A value of None returns None."""
if value == 'None':
return None
if value.lower() in ('1','yes','true','y'):
return True
elif value.lower() in ('0','no','false','n'):
return False
else:
self._error("ValueError: '%s' is not a valid boolean" %value)
def _convertValues(self,value,outlist,type,exist,min,max):
"""Helper function for getlist() to convert values in list to boolean,
string, integer, or float"""
itemlist = []
if type is int:
for s in outlist:
try:
temp = map(int,s.split('-')) # parse 1-4 as 1,2,3,4
except ValueError:
self._error("ValueError: %s is not a valid range of integers" %s)
start = temp[0]
stop = temp[-1]
if start > stop:
self._error("ValueError: range minimum (%d) > maximum (%d)" %(start,stop))
itemlist = itemlist + range(start,stop+1)
elif type is float:
try:
itemlist = map(float,outlist)
except ValueError:
self._error("ValueError: %s is not a valid list of floats" %value)
elif type is str:
itemlist = outlist
if exist is True: # make sure files exist in the list
self._checkExist(outlist)
elif exist is False: # make sure files don't exist in the list
self._checkNotExist(outlist)
elif type is bool:
try:
itemlist = map(self._convertBool,outlist)
except ValueError:
self._error("ValueError: %s is not a valid list of booleans" %value)
else:
self._error("TypeError: type for getlist() must be str,int, or float")
if min is not None or max is not None:
for tmp in itemlist:
self._checkMinMax(value,tmp,min,max)
return itemlist
def _error(self,msg):
"""Print out an error message to screen and quit"""
sys.stderr.write("### %s\n" %msg)
sys.exit()
def _getlistbase(self,key,type=str,comment='#',min=None,max=None,
option=None,length=None,exist=None,ignorecase=False):
"""Return keyword value as a list. A value of None returns an empty list.
key = keyword given as a string
type = Can be either bool, float, int, or str (for boolean,
float, integer, or string)
comment = String character for comment lines to ignore in an @file
min = check for minimum value (for each element)
max = check for maximum value (for each element)
option = list/tuple of allowed values (for each element)
length = int/list/tuple of allowed number of elements in list
exist = Can check to make sure all all input files exist. Default is
to not check. Note, if you give an @file, then the @file
will always be checked for existance no matter what."""
self._checkKey(key)
value = self.args[key][0]
outlist = []
if value == 'None':
return outlist
for alist in value.split(';'):# split by semicolon for nested lists
blah = []
for junk in alist.split(','): # loop over comma-separated list
if junk[0] == '@': # read from atfile
temp = self._atFile(junk[1:],comment)
blah = blah + temp
else: # try file globbing
temp = glob.glob(junk)
if len(temp) == 0: # try to expand ~
temp = os.path.expanduser(junk)
if temp == junk: # no match so add exact input
blah.append(junk)
else:
blah = blah + temp
else:
blah = blah + temp
blah = self._convertValues(value,blah,type,exist,min,max)
if option is not None:
for value in blah:
self._checkOption(key,value,option,ignorecase)
outlist.append(blah)
if len(outlist) == 1: # only one nested list, so removing nesting
outlist = outlist[0]
if length is not None:
nval = len(outlist)
if isinstance(length,int):
if nval != length:
if nval == 1:
self._error("IndexError: key %s should have 1 element" %(key))
else:
self._error("IndexError: key %s should have %d elements" %(key,length))
elif isinstance(length,list) or isinstance(length,tuple):
if nval not in length:
self._error("IndexError: key %s should have %s elements" %(key,str(length)))
else:
self._error("IndexError: length parameter for key %s should be int/list/tuple" %key)
return outlist
if __name__ == "__main__":
spec = """
# Compute polarization vectors from a sharp_combine map
in = ??? # Input combine.fits
out = ??? # Output file of vectors
sigma = 3 # p/dp cutoff for vectors
skip = 4 # Only plot every ith pixel (since we oversample 4x)
offset = 0,0 # Offset in x,y for start of vectors
debias = False # Debias Polarizations (Ricean correction)"""
bob = ReadCmd(spec)
inFile = bob.getstr("in")
outFile = bob.getstr("out")
sigma = bob.getfloat("sigma")
skip = bob.getint("skip")
offset = bob.getlistint("offset",length=2)
debias = bob.getbool("debias")
print bob
print bob.getkeys(format=80),
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common credentials classes and constructors."""
from __future__ import print_function
import contextlib
import datetime
import json
import os
import threading
import warnings
import httplib2
import oauth2client
import oauth2client.client
from oauth2client import service_account
from oauth2client import tools # for gflags declarations
import six
from six.moves import http_client
from six.moves import urllib
from apitools.base.py import exceptions
from apitools.base.py import util
# App Engine does not support ctypes which are required for the
# monotonic time used in fasteners. Conversely, App Engine does
# not support colocated concurrent processes, so process locks
# are not needed.
try:
import fasteners
_FASTENERS_AVAILABLE = True
except ImportError as import_error:
server_env = os.environ.get('SERVER_SOFTWARE', '')
if not (server_env.startswith('Development') or
server_env.startswith('Google App Engine')):
raise import_error
_FASTENERS_AVAILABLE = False
# Note: we try the oauth2client imports two ways, to accomodate layout
# changes in oauth2client 2.0+. We can remove these once we no longer
# support oauth2client < 2.0.
#
# pylint: disable=wrong-import-order,ungrouped-imports
try:
from oauth2client.contrib import gce
except ImportError:
from oauth2client import gce
try:
from oauth2client.contrib import multiprocess_file_storage
_NEW_FILESTORE = True
except ImportError:
_NEW_FILESTORE = False
try:
from oauth2client.contrib import multistore_file
except ImportError:
from oauth2client import multistore_file
try:
import gflags
FLAGS = gflags.FLAGS
except ImportError:
FLAGS = None
__all__ = [
'CredentialsFromFile',
'GaeAssertionCredentials',
'GceAssertionCredentials',
'GetCredentials',
'GetUserinfo',
'ServiceAccountCredentialsFromFile',
]
# Lock when accessing the cache file to avoid resource contention.
cache_file_lock = threading.Lock()
def SetCredentialsCacheFileLock(lock):
global cache_file_lock # pylint: disable=global-statement
cache_file_lock = lock
# List of additional methods we use when attempting to construct
# credentials. Users can register their own methods here, which we try
# before the defaults.
_CREDENTIALS_METHODS = []
def _RegisterCredentialsMethod(method, position=None):
"""Register a new method for fetching credentials.
This new method should be a function with signature:
client_info, **kwds -> Credentials or None
This method can be used as a decorator, unless position needs to
be supplied.
Note that method must *always* accept arbitrary keyword arguments.
Args:
method: New credential-fetching method.
position: (default: None) Where in the list of methods to
add this; if None, we append. In all but rare cases,
this should be either 0 or None.
Returns:
method, for use as a decorator.
"""
if position is None:
position = len(_CREDENTIALS_METHODS)
else:
position = min(position, len(_CREDENTIALS_METHODS))
_CREDENTIALS_METHODS.insert(position, method)
return method
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent,
credentials_filename=None,
api_key=None, # pylint: disable=unused-argument
client=None, # pylint: disable=unused-argument
oauth2client_args=None,
**kwds):
"""Attempt to get credentials, using an oauth dance as the last resort."""
scopes = util.NormalizeScopes(scopes)
client_info = {
'client_id': client_id,
'client_secret': client_secret,
'scope': ' '.join(sorted(scopes)),
'user_agent': user_agent or '%s-generated/0.1' % package_name,
}
for method in _CREDENTIALS_METHODS:
credentials = method(client_info, **kwds)
if credentials is not None:
return credentials
credentials_filename = credentials_filename or os.path.expanduser(
'~/.apitools.token')
credentials = CredentialsFromFile(credentials_filename, client_info,
oauth2client_args=oauth2client_args)
if credentials is not None:
return credentials
raise exceptions.CredentialsError('Could not create valid credentials')
def ServiceAccountCredentialsFromFile(filename, scopes, user_agent=None):
"""Use the credentials in filename to create a token for scopes."""
filename = os.path.expanduser(filename)
# We have two options, based on our version of oauth2client.
if oauth2client.__version__ > '1.5.2':
# oauth2client >= 2.0.0
credentials = (
service_account.ServiceAccountCredentials.from_json_keyfile_name(
filename, scopes=scopes))
if credentials is not None:
if user_agent is not None:
credentials.user_agent = user_agent
return credentials
else:
# oauth2client < 2.0.0
with open(filename) as keyfile:
service_account_info = json.load(keyfile)
account_type = service_account_info.get('type')
if account_type != oauth2client.client.SERVICE_ACCOUNT:
raise exceptions.CredentialsError(
'Invalid service account credentials: %s' % (filename,))
# pylint: disable=protected-access
credentials = service_account._ServiceAccountCredentials(
service_account_id=service_account_info['client_id'],
service_account_email=service_account_info['client_email'],
private_key_id=service_account_info['private_key_id'],
private_key_pkcs8_text=service_account_info['private_key'],
scopes=scopes, user_agent=user_agent)
# pylint: enable=protected-access
return credentials
def ServiceAccountCredentialsFromP12File(
service_account_name, private_key_filename, scopes, user_agent):
"""Create a new credential from the named .p12 keyfile."""
private_key_filename = os.path.expanduser(private_key_filename)
scopes = util.NormalizeScopes(scopes)
if oauth2client.__version__ > '1.5.2':
# oauth2client >= 2.0.0
credentials = (
service_account.ServiceAccountCredentials.from_p12_keyfile(
service_account_name, private_key_filename, scopes=scopes))
if credentials is not None:
credentials.user_agent = user_agent
return credentials
else:
# oauth2client < 2.0.0
with open(private_key_filename, 'rb') as key_file:
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name, key_file.read(), scopes,
user_agent=user_agent)
def _GceMetadataRequest(relative_url, use_metadata_ip=False):
"""Request the given url from the GCE metadata service."""
if use_metadata_ip:
base_url = os.environ.get('GCE_METADATA_IP', '169.254.169.254')
else:
base_url = os.environ.get(
'GCE_METADATA_ROOT', 'metadata.google.internal')
url = 'http://' + base_url + '/computeMetadata/v1/' + relative_url
# Extra header requirement can be found here:
# https://developers.google.com/compute/docs/metadata
headers = {'Metadata-Flavor': 'Google'}
request = urllib.request.Request(url, headers=headers)
opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
try:
response = opener.open(request)
except urllib.error.URLError as e:
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
return response
class GceAssertionCredentials(gce.AppAssertionCredentials):
"""Assertion credentials for GCE instances."""
def __init__(self, scopes=None, service_account_name='default', **kwds):
"""Initializes the credentials instance.
Args:
scopes: The scopes to get. If None, whatever scopes that are
available to the instance are used.
service_account_name: The service account to retrieve the scopes
from.
**kwds: Additional keyword args.
"""
# If there is a connectivity issue with the metadata server,
# detection calls may fail even if we've already successfully
# identified these scopes in the same execution. However, the
# available scopes don't change once an instance is created,
# so there is no reason to perform more than one query.
self.__service_account_name = service_account_name
cached_scopes = None
cache_filename = kwds.get('cache_filename')
if cache_filename:
cached_scopes = self._CheckCacheFileForMatch(
cache_filename, scopes)
scopes = cached_scopes or self._ScopesFromMetadataServer(scopes)
if cache_filename and not cached_scopes:
self._WriteCacheFile(cache_filename, scopes)
# We check the scopes above, but don't need them again after
# this point. Newer versions of oauth2client let us drop them
# here, but since we support older versions as well, we just
# catch and squelch the warning.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
super(GceAssertionCredentials, self).__init__(scope=scopes, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
def _CheckCacheFileForMatch(self, cache_filename, scopes):
"""Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None.
"""
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)) if scopes else None,
'svc_acct_name': self.__service_account_name,
}
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cached_creds_str = cache_file.LockedRead()
if not cached_creds_str:
return None
cached_creds = json.loads(cached_creds_str)
if creds['svc_acct_name'] == cached_creds['svc_acct_name']:
if creds['scopes'] in (None, cached_creds['scopes']):
return cached_creds['scopes']
except KeyboardInterrupt:
raise
except: # pylint: disable=bare-except
# Treat exceptions as a cache miss.
pass
def _WriteCacheFile(self, cache_filename, scopes):
"""Writes the credential metadata to the cache file.
This does not save the credentials themselves (CredentialStore class
optionally handles that after this class is initialized).
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
"""
# Credentials metadata dict.
creds = {'scopes': sorted(list(scopes)),
'svc_acct_name': self.__service_account_name}
creds_str = json.dumps(creds)
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cache_file.LockedWrite(creds_str)
except KeyboardInterrupt:
raise
except: # pylint: disable=bare-except
# Treat exceptions as a cache miss.
pass
def _ScopesFromMetadataServer(self, scopes):
"""Returns instance scopes based on GCE metadata server."""
if not util.DetectGce():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
if not self.GetServiceAccount(self.__service_account_name):
raise exceptions.ResourceUnavailableError(
'GCE credentials requested but service account '
'%s does not exist.' % self.__service_account_name)
if scopes:
scope_ls = util.NormalizeScopes(scopes)
instance_scopes = self.GetInstanceScopes()
if scope_ls > instance_scopes:
raise exceptions.CredentialsError(
'Instance did not have access to scopes %s' % (
sorted(list(scope_ls - instance_scopes)),))
else:
scopes = self.GetInstanceScopes()
return scopes
def GetServiceAccount(self, account):
relative_url = 'instance/service-accounts'
response = _GceMetadataRequest(relative_url)
response_lines = [line.rstrip('/\n\r')
for line in response.readlines()]
return account in response_lines
def GetInstanceScopes(self):
relative_url = 'instance/service-accounts/{0}/scopes'.format(
self.__service_account_name)
response = _GceMetadataRequest(relative_url)
return util.NormalizeScopes(scope.strip()
for scope in response.readlines())
# pylint: disable=arguments-differ
def _refresh(self, do_request):
"""Refresh self.access_token.
This function replaces AppAssertionCredentials._refresh, which
does not use the credential store and is therefore poorly
suited for multi-threaded scenarios.
Args:
do_request: A function matching httplib2.Http.request's signature.
"""
# pylint: disable=protected-access
oauth2client.client.OAuth2Credentials._refresh(self, do_request)
# pylint: enable=protected-access
def _do_refresh_request(self, unused_http_request):
"""Refresh self.access_token by querying the metadata server.
If self.store is initialized, store acquired credentials there.
"""
relative_url = 'instance/service-accounts/{0}/token'.format(
self.__service_account_name)
try:
response = _GceMetadataRequest(relative_url)
except exceptions.CommunicationError:
self.invalid = True
if self.store:
self.store.locked_put(self)
raise
content = response.read()
try:
credential_info = json.loads(content)
except ValueError:
raise exceptions.CredentialsError(
'Could not parse response as JSON: %s' % content)
self.access_token = credential_info['access_token']
if 'expires_in' in credential_info:
expires_in = int(credential_info['expires_in'])
self.token_expiry = (
datetime.timedelta(seconds=expires_in) +
datetime.datetime.utcnow())
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
def to_json(self):
# OAuth2Client made gce.AppAssertionCredentials unserializable as of
# v3.0, but we need those credentials to be serializable for use with
# this library, so we use AppAssertionCredentials' parent's to_json
# method.
# pylint: disable=bad-super-call
return super(gce.AppAssertionCredentials, self).to_json()
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
kwargs = {}
if 'cache_filename' in data.get('kwargs', []):
kwargs['cache_filename'] = data['kwargs']['cache_filename']
# Newer versions of GceAssertionCredentials don't have a "scope"
# attribute.
scope_list = None
if 'scope' in data:
scope_list = [data['scope']]
credentials = GceAssertionCredentials(scopes=scope_list, **kwargs)
if 'access_token' in data:
credentials.access_token = data['access_token']
if 'token_expiry' in data:
credentials.token_expiry = datetime.datetime.strptime(
data['token_expiry'], oauth2client.client.EXPIRY_FORMAT)
if 'invalid' in data:
credentials.invalid = data['invalid']
return credentials
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
# TODO(craigcitro): Currently, we can't even *load*
# `oauth2client.appengine` without being on appengine, because of how
# it handles imports. Fix that by splitting that module into
# GAE-specific and GAE-independent bits, and guarding imports.
class GaeAssertionCredentials(oauth2client.client.AssertionCredentials):
"""Assertion credentials for Google App Engine apps."""
def __init__(self, scopes, **kwds):
if not util.DetectGae():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
self._scopes = list(util.NormalizeScopes(scopes))
super(GaeAssertionCredentials, self).__init__(None, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return GaeAssertionCredentials(data['_scopes'])
def _refresh(self, _):
"""Refresh self.access_token.
Args:
_: (ignored) A function matching httplib2.Http.request's signature.
"""
# pylint: disable=import-error
from google.appengine.api import app_identity
try:
token, _ = app_identity.get_access_token(self._scopes)
except app_identity.Error as e:
raise exceptions.CredentialsError(str(e))
self.access_token = token
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
This method is provided to support a common interface, but
the actual key used for a Google Compute Engine service account
is not available, so it can't be used to sign content.
Args:
blob: bytes, Message to be signed.
Raises:
NotImplementedError, always.
"""
raise NotImplementedError(
'Compute Engine service accounts cannot sign blobs')
def _GetRunFlowFlags(args=None):
"""Retrieves command line flags based on gflags module."""
# There's one rare situation where gsutil will not have argparse
# available, but doesn't need anything depending on argparse anyway,
# since they're bringing their own credentials. So we just allow this
# to fail with an ImportError in those cases.
#
# TODO(craigcitro): Move this import back to the top when we drop
# python 2.6 support (eg when gsutil does).
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
# Get command line argparse flags.
flags, _ = parser.parse_known_args(args=args)
# Allow `gflags` and `argparse` to be used side-by-side.
if hasattr(FLAGS, 'auth_host_name'):
flags.auth_host_name = FLAGS.auth_host_name
if hasattr(FLAGS, 'auth_host_port'):
flags.auth_host_port = FLAGS.auth_host_port
if hasattr(FLAGS, 'auth_local_webserver'):
flags.noauth_local_webserver = (not FLAGS.auth_local_webserver)
return flags
# TODO(craigcitro): Switch this from taking a path to taking a stream.
def CredentialsFromFile(path, client_info, oauth2client_args=None):
"""Read credentials from a file."""
user_agent = client_info['user_agent']
scope_key = client_info['scope']
if not isinstance(scope_key, six.string_types):
scope_key = ':'.join(scope_key)
storage_key = client_info['client_id'] + user_agent + scope_key
if _NEW_FILESTORE:
credential_store = multiprocess_file_storage.MultiprocessFileStorage(
path, storage_key)
else:
credential_store = multistore_file.get_credential_storage_custom_string_key( # noqa
path, storage_key)
if hasattr(FLAGS, 'auth_local_webserver'):
FLAGS.auth_local_webserver = False
credentials = credential_store.get()
if credentials is None or credentials.invalid:
print('Generating new OAuth credentials ...')
for _ in range(20):
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
flags = _GetRunFlowFlags(args=oauth2client_args)
credentials = tools.run_flow(flow, credential_store, flags)
break
except (oauth2client.client.FlowExchangeError, SystemExit) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because
# you reused a token.
print('Invalid authorization: %s' % (e,))
except httplib2.HttpLib2Error as e:
print('Communication error: %s' % (e,))
raise exceptions.CredentialsError(
'Communication error creating credentials: %s' % e)
return credentials
class _MultiProcessCacheFile(object):
"""Simple multithreading and multiprocessing safe cache file.
Notes on behavior:
* the fasteners.InterProcessLock object cannot reliably prevent threads
from double-acquiring a lock. A threading lock is used in addition to
the InterProcessLock. The threading lock is always acquired first and
released last.
* The interprocess lock will not deadlock. If a process can not acquire
the interprocess lock within `_lock_timeout` the call will return as
a cache miss or unsuccessful cache write.
* App Engine environments cannot be process locked because (1) the runtime
does not provide monotonic time and (2) different processes may or may
not share the same machine. Because of this, process locks are disabled
and locking is only guaranteed to protect against multithreaded access.
"""
_lock_timeout = 1
_encoding = 'utf-8'
_thread_lock = threading.Lock()
def __init__(self, filename):
self._file = None
self._filename = filename
if _FASTENERS_AVAILABLE:
self._process_lock_getter = self._ProcessLockAcquired
self._process_lock = fasteners.InterProcessLock(
'{0}.lock'.format(filename))
else:
self._process_lock_getter = self._DummyLockAcquired
self._process_lock = None
@contextlib.contextmanager
def _ProcessLockAcquired(self):
"""Context manager for process locks with timeout."""
try:
is_locked = self._process_lock.acquire(timeout=self._lock_timeout)
yield is_locked
finally:
if is_locked:
self._process_lock.release()
@contextlib.contextmanager
def _DummyLockAcquired(self):
"""Lock context manager for environments without process locks."""
yield True
def LockedRead(self):
"""Acquire an interprocess lock and dump cache contents.
This method safely acquires the locks then reads a string
from the cache file. If the file does not exist and cannot
be created, it will return None. If the locks cannot be
acquired, this will also return None.
Returns:
cache data - string if present, None on failure.
"""
file_contents = None
with self._thread_lock:
if not self._EnsureFileExists():
return None
with self._process_lock_getter() as acquired_plock:
if not acquired_plock:
return None
with open(self._filename, 'rb') as f:
file_contents = f.read().decode(encoding=self._encoding)
return file_contents
def LockedWrite(self, cache_data):
"""Acquire an interprocess lock and write a string.
This method safely acquires the locks then writes a string
to the cache file. If the string is written successfully
the function will return True, if the write fails for any
reason it will return False.
Args:
cache_data: string or bytes to write.
Returns:
bool: success
"""
if isinstance(cache_data, six.text_type):
cache_data = cache_data.encode(encoding=self._encoding)
with self._thread_lock:
if not self._EnsureFileExists():
return False
with self._process_lock_getter() as acquired_plock:
if not acquired_plock:
return False
with open(self._filename, 'wb') as f:
f.write(cache_data)
return True
def _EnsureFileExists(self):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True
# TODO(craigcitro): Push this into oauth2client.
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name
"""Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email address for this token, or None if the required scopes
aren't available.
"""
http = http or httplib2.Http()
url = _GetUserinfoUrl(credentials)
# We ignore communication woes here (i.e. SSL errors, socket
# timeout), as handling these should be done in a common location.
response, content = http.request(url)
if response.status == http_client.BAD_REQUEST:
credentials.refresh(http)
url = _GetUserinfoUrl(credentials)
response, content = http.request(url)
return json.loads(content or '{}') # Save ourselves from an empty reply.
def _GetUserinfoUrl(credentials):
url_root = 'https://oauth2.googleapis.com/tokeninfo'
query_args = {'access_token': credentials.access_token}
return '?'.join((url_root, urllib.parse.urlencode(query_args)))
@_RegisterCredentialsMethod
def _GetServiceAccountCredentials(
client_info, service_account_name=None, service_account_keyfile=None,
service_account_json_keyfile=None, **unused_kwds):
"""Returns ServiceAccountCredentials from give file."""
if ((service_account_name and not service_account_keyfile) or
(service_account_keyfile and not service_account_name)):
raise exceptions.CredentialsError(
'Service account name or keyfile provided without the other')
scopes = client_info['scope'].split()
user_agent = client_info['user_agent']
# Use the .json credentials, if provided.
if service_account_json_keyfile:
return ServiceAccountCredentialsFromFile(
service_account_json_keyfile, scopes, user_agent=user_agent)
# Fall back to .p12 if there's no .json credentials.
if service_account_name is not None:
return ServiceAccountCredentialsFromP12File(
service_account_name, service_account_keyfile, scopes, user_agent)
@_RegisterCredentialsMethod
def _GetGaeServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GaeAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetGceServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GceAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetApplicationDefaultCredentials(
client_info, skip_application_default_credentials=False,
**unused_kwds):
"""Returns ADC with right scopes."""
scopes = client_info['scope'].split()
if skip_application_default_credentials:
return None
gc = oauth2client.client.GoogleCredentials
with cache_file_lock:
try:
# pylint: disable=protected-access
# We've already done our own check for GAE/GCE
# credentials, we don't want to pay for checking again.
credentials = gc._implicit_credentials_from_files()
except oauth2client.client.ApplicationDefaultCredentialsError:
return None
# If we got back a non-service account credential, we need to use
# a heuristic to decide whether or not the application default
# credential will work for us. We assume that if we're requesting
# cloud-platform, our scopes are a subset of cloud scopes, and the
# ADC will work.
cp = 'https://www.googleapis.com/auth/cloud-platform'
if credentials is None:
return None
if not isinstance(credentials, gc) or cp in scopes:
return credentials.create_scoped(scopes)
return None
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from conary.lib import util
from conary.repository import calllog, changeset, filecontents, netclient
from conary.repository.netrepos import netserver
import gzip
import os
import tempfile
import time
# this returns the same server for any server name or label
# requested; because a shim can only refer to one server.
class FakeServerCache(netclient.ServerCache):
def __init__(self, server, repMap, userMap, conaryProxies, systemId=None):
self._server = server
netclient.ServerCache.__init__(self, repMap, userMap,
proxies=conaryProxies, systemId=systemId)
def __getitem__(self, item):
serverName = self._getServerName(item)
# return the proxy object for anything that matches the
# serverNames on this repository
if serverName in self._server._server.serverNameList:
return self._server
# otherwise get a real repository client
return netclient.ServerCache.__getitem__(self, item)
class NetworkRepositoryServer(netserver.NetworkRepositoryServer):
@netserver.accessReadOnly
def getFileContents(self, *args, **kwargs):
location = netserver.NetworkRepositoryServer.getFileContents(self,
*args, **kwargs)[0]
path = os.path.join(self.tmpPath,location.split('?')[1] + '-out')
paths = open(path).readlines()
os.unlink(path)
return [ x.split(" ")[0] for x in paths ]
@netserver.accessReadOnly
def getFileContentsFromTrove(self, *args, **kwargs):
location, sizes = netserver.NetworkRepositoryServer.getFileContentsFromTrove(
self, *args, **kwargs)
path = os.path.join(self.tmpPath,location.split('?')[1] + '-out')
paths = open(path).readlines()
os.unlink(path)
return [ x.split(" ")[0] for x in paths ]
def getChangeSet(self, authToken, clientVersion, chgSetList, recurse,
withFiles, withFileContents, excludeAutoSource):
paths = []
csList = []
def _cvtTroveList(l):
new = []
for (name, (oldV, oldF), (newV, newF), absolute) in l:
if oldV:
oldV = self.fromVersion(oldV)
oldF = self.fromFlavor(oldF)
else:
oldV = 0
oldF = 0
if newV:
newV = self.fromVersion(newV)
newF = self.fromFlavor(newF)
else:
# this happens when a distributed group has a trove
# on a remote repository disappear
newV = 0
newF = 0
new.append((name, (oldV, oldF), (newV, newF), absolute))
return new
for (name, (old, oldFlavor), (new, newFlavor), absolute) in chgSetList:
newVer = self.toVersion(new)
if old == 0:
l = (name, (None, None),
(self.toVersion(new), self.toFlavor(newFlavor)),
absolute)
else:
l = (name, (self.toVersion(old), self.toFlavor(oldFlavor)),
(self.toVersion(new), self.toFlavor(newFlavor)),
absolute)
csList.append(l)
ret = self.repos.createChangeSet(csList,
recurse = recurse,
withFiles = withFiles,
withFileContents = withFileContents,
excludeAutoSource = excludeAutoSource)
(cs, trovesNeeded, filesNeeded, removedTroveList) = ret
assert(not filesNeeded)
assert(not removedTroveList)
# FIXME: we need a way to remove these temporary
# files when we're done with them.
fd, tmpFile = tempfile.mkstemp(suffix = '.ccs')
os.close(fd)
cs.writeToFile(tmpFile)
size = os.stat(tmpFile).st_size
return (tmpFile, [size], _cvtTroveList(trovesNeeded), [], [])
class ShimNetClient(netclient.NetworkRepositoryClient):
"""
A subclass of NetworkRepositoryClient which can take a
shimclient.NetworkRepositoryServer instance (plus a few other
pieces of information) and expose the netclient interface without
the overhead of XMLRPC.
If 'server' is a regular netserver.NetworkRepositoryServer
instance, the shim won't be able to return changesets. If 'server'
is a shimclient.NetworkRepositoryServer, it will.
NOTE: Conary proxies are only used for "real" netclients
outside this repository's serverNameList.
"""
def getFileContentsObjects(self, server, fileList, callback, outF,
compressed):
if not isinstance(self.c[server], ShimServerProxy):
return netclient.NetworkRepositoryClient.getFileContentsObjects(
self, server, fileList, callback, outF, compressed)
filePaths = self.c[server].getFileContents(fileList)
fileObjList = []
for path in filePaths:
if compressed:
fileObjList.append(
filecontents.FromFilesystem(path, compressed = True))
else:
f = gzip.GzipFile(path, "r")
fileObjList.append(filecontents.FromFile(f))
return fileObjList
def getFileContentsFromTrove(self, n, v, f, pathList,
callback = None, compressed = False):
server = v.trailingLabel().getHost()
if not isinstance(self.c[server], ShimServerProxy):
return netclient.NetworkRepositoryClient.getFileContentsFromTrove(
self, n, v, f, pathList, callback = callback,
compressed = compressed)
pathList = [self.fromPath(x) for x in pathList]
v = self.fromVersion(v)
f = self.fromFlavor(f)
filePaths = self.c[server].getFileContentsFromTrove(n,v,f,
pathList)
fileObjList = []
for path in filePaths:
if compressed:
fileObjList.append(
filecontents.FromFilesystem(path, compressed = True))
else:
f = gzip.GzipFile(path, "r")
fileObjList.append(filecontents.FromFile(f))
return fileObjList
def commitChangeSet(self, chgSet, callback = None, mirror = False,
hidden = False):
trvCs = chgSet.iterNewTroveList().next()
newLabel = trvCs.getNewVersion().trailingLabel()
if not isinstance(self.c[newLabel], ShimServerProxy):
return netclient.NetworkRepositoryClient.commitChangeSet(self,
chgSet, callback = callback, mirror = False, hidden = False)
(fd, path) = tempfile.mkstemp(dir = self.c[newLabel]._server.tmpPath,
suffix = '.ccs-in')
os.close(fd)
chgSet.writeToFile(path)
base = os.path.basename(path)[:-3]
url = util.normurl(self.c[newLabel]._server.basicUrl) + "?" + base
self.c[newLabel].commitChangeSet(url, mirror = mirror,
hidden = hidden)
def commitChangeSetFile(self, fName, mirror = False, callback = None,
hidden = False):
# this could be more efficient. it rewrites the trove every time,
# but it doesn't seem to be heavily used
cs = changeset.ChangeSetFromFile(fName)
self.commitChangeSet(cs, callback = callback, mirror = mirror,
hidden = hidden)
def __init__(self, server, protocol, port, authToken, repMap, userMap,
conaryProxies=None, systemId=None):
if type(authToken[2]) is not list:
# old-style [single entitlement] authToken
authToken = (authToken[0], authToken[1],
[ ( authToken[2], authToken[3]) ], None )
elif len(authToken) == 3:
authToken = authToken + (None,)
netclient.NetworkRepositoryClient.__init__(self, repMap, userMap,
proxy=conaryProxies, systemId=systemId)
proxy = ShimServerProxy(server, protocol, port, authToken, systemId)
self.c = FakeServerCache(proxy, repMap, userMap, conaryProxies,
systemId=systemId)
class ShimServerProxy(netclient.ServerProxy):
def __init__(self, server, protocol, port, authToken, systemId=None):
self._authToken = authToken
self._server = server
self._protocol = protocol
self._port = port
self._systemId = systemId
self._protocolVersion = netclient.CLIENT_VERSIONS[-1]
if 'CONARY_CLIENT_LOG' in os.environ:
self._callLog = calllog.ClientCallLogger(
os.environ['CONARY_CLIENT_LOG'])
else:
self._callLog = None
def __repr__(self):
return '<ShimServerProxy for %r>' % (self._server,)
def setAbortCheck(self, *args):
pass
def getChangeSetObj(self, *args):
return self._server._getChangeSetObj(self._authToken, *args)
def usedProxy(self, *args):
return False
def _request(self, method, args, kwargs):
args = [self._protocolVersion] + list(args)
start = time.time()
result = self._server.callWrapper(self._protocol, self._port, method,
self._authToken, args, kwargs, systemId=self._systemId)
if self._callLog:
self._callLog.log("shim-" + self._server.repos.serverNameList[0],
[], method, result, args,
latency = time.time() - start)
return result
|
|
from __future__ import division
from itertools import product
import warnings
import numpy as np
from numpy.linalg import norm
from numpy.testing import (run_module_suite, assert_, assert_allclose,
assert_raises, assert_equal)
from scipy.sparse import issparse, lil_matrix
from scipy.sparse.linalg import aslinearoperator
from scipy.optimize import least_squares
from scipy.optimize._lsq.least_squares import IMPLEMENTED_LOSSES
from scipy.optimize._lsq.common import EPS, make_strictly_feasible
def fun_trivial(x, a=0):
return (x - a)**2 + 5.0
def jac_trivial(x, a=0.0):
return 2 * (x - a)
def fun_2d_trivial(x):
return np.array([x[0], x[1]])
def jac_2d_trivial(x):
return np.identity(2)
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
def jac_rosenbrock(x):
return np.array([
[-20 * x[0], 10],
[-1, 0]
])
def jac_rosenbrock_bad_dim(x):
return np.array([
[-20 * x[0], 10],
[-1, 0],
[0.0, 0.0]
])
def fun_rosenbrock_cropped(x):
return fun_rosenbrock(x)[0]
def jac_rosenbrock_cropped(x):
return jac_rosenbrock(x)[0]
# When x is 1-d array, return is 2-d array.
def fun_wrong_dimensions(x):
return np.array([x, x**2, x**3])
def jac_wrong_dimensions(x, a=0.0):
return np.atleast_3d(jac_trivial(x, a=a))
def fun_bvp(x):
n = int(np.sqrt(x.shape[0]))
u = np.zeros((n + 2, n + 2))
x = x.reshape((n, n))
u[1:-1, 1:-1] = x
y = u[:-2, 1:-1] + u[2:, 1:-1] + u[1:-1, :-2] + u[1:-1, 2:] - 4 * x + x**3
return y.ravel()
class BroydenTridiagonal(object):
def __init__(self, n=100, mode='sparse'):
np.random.seed(0)
self.n = n
self.x0 = -np.ones(n)
self.lb = np.linspace(-2, -1.5, n)
self.ub = np.linspace(-0.8, 0.0, n)
self.lb += 0.1 * np.random.randn(n)
self.ub += 0.1 * np.random.randn(n)
self.x0 += 0.1 * np.random.randn(n)
self.x0 = make_strictly_feasible(self.x0, self.lb, self.ub)
if mode == 'sparse':
self.sparsity = lil_matrix((n, n), dtype=int)
i = np.arange(n)
self.sparsity[i, i] = 1
i = np.arange(1, n)
self.sparsity[i, i - 1] = 1
i = np.arange(n - 1)
self.sparsity[i, i + 1] = 1
self.jac = self._jac
elif mode == 'operator':
self.jac = lambda x: aslinearoperator(self._jac(x))
elif mode == 'dense':
self.sparsity = None
self.jac = lambda x: self._jac(x).toarray()
else:
assert_(False)
def fun(self, x):
f = (3 - x) * x + 1
f[1:] -= x[:-1]
f[:-1] -= 2 * x[1:]
return f
def _jac(self, x):
J = lil_matrix((self.n, self.n))
i = np.arange(self.n)
J[i, i] = 3 - 2 * x
i = np.arange(1, self.n)
J[i, i - 1] = -1
i = np.arange(self.n - 1)
J[i, i + 1] = -2
return J
class ExponentialFittingProblem(object):
"""Provide data and function for exponential fitting in the form
y = a + exp(b * x) + noise."""
def __init__(self, a, b, noise, n_outliers=1, x_range=(-1, 1),
n_points=11, random_seed=None):
np.random.seed(random_seed)
self.m = n_points
self.n = 2
self.p0 = np.zeros(2)
self.x = np.linspace(x_range[0], x_range[1], n_points)
self.y = a + np.exp(b * self.x)
self.y += noise * np.random.randn(self.m)
outliers = np.random.randint(0, self.m, n_outliers)
self.y[outliers] += 50 * noise * np.random.rand(n_outliers)
self.p_opt = np.array([a, b])
def fun(self, p):
return p[0] + np.exp(p[1] * self.x) - self.y
def jac(self, p):
J = np.empty((self.m, self.n))
J[:, 0] = 1
J[:, 1] = self.x * np.exp(p[1] * self.x)
return J
def cubic_soft_l1(z):
rho = np.empty((3, z.size))
t = 1 + z
rho[0] = 3 * (t**(1/3) - 1)
rho[1] = t ** (-2/3)
rho[2] = -2/3 * t**(-5/3)
return rho
LOSSES = list(IMPLEMENTED_LOSSES.keys()) + [cubic_soft_l1]
class BaseMixin(object):
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
res = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
def test_jac_options(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0, jac='oops',
method=self.method)
def test_nfev_options(self):
for max_nfev in [None, 20]:
res = least_squares(fun_trivial, 2.0, max_nfev=max_nfev,
method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
def test_x_scale_options(self):
for x_scale in [1.0, np.array([0.5]), 'jac']:
res = least_squares(fun_trivial, 2.0, x_scale=x_scale)
assert_allclose(res.x, 0)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale='auto', method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=-1.0, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=None, method=self.method)
assert_raises(ValueError, least_squares, fun_trivial,
2.0, x_scale=1.0+2.0j, method=self.method)
def test_diff_step(self):
# res1 and res2 should be equivalent.
# res2 and res3 should be different.
res1 = least_squares(fun_trivial, 2.0, diff_step=1e-1,
method=self.method)
res2 = least_squares(fun_trivial, 2.0, diff_step=-1e-1,
method=self.method)
res3 = least_squares(fun_trivial, 2.0,
diff_step=None, method=self.method)
assert_allclose(res1.x, 0, atol=1e-4)
assert_allclose(res2.x, 0, atol=1e-4)
assert_allclose(res3.x, 0, atol=1e-4)
assert_equal(res1.x, res2.x)
assert_equal(res1.nfev, res2.nfev)
assert_(res2.nfev != res3.nfev)
def test_incorrect_options_usage(self):
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'no_such_option': 100})
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
method=self.method, options={'max_nfev': 100})
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
def test_rosenbrock(self):
x0 = [-2, 1]
x_opt = [1, 1]
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.x, x_opt)
def test_rosenbrock_cropped(self):
x0 = [-2, 1]
if self.method == 'lm':
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped,
x0, method='lm')
else:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock_cropped],
[1.0, np.array([1.0, 0.2]), 'jac'],
['exact', 'lsmr']):
res = least_squares(
fun_rosenbrock_cropped, x0, jac, x_scale=x_scale,
tr_solver=tr_solver, method=self.method)
assert_allclose(res.cost, 0, atol=1e-14)
def test_fun_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_wrong_dimensions,
2.0, method=self.method)
def test_jac_wrong_dimensions(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, jac_wrong_dimensions, method=self.method)
def test_fun_and_jac_inconsistent_dimensions(self):
x0 = [1, 2]
assert_raises(ValueError, least_squares, fun_rosenbrock, x0,
jac_rosenbrock_bad_dim, method=self.method)
def test_x0_multidimensional(self):
x0 = np.ones(4).reshape(2, 2)
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_scalar(self):
x0 = 2.0 + 0.0*1j
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_x0_complex_array(self):
x0 = [1.0, 2.0 + 0.0*1j]
assert_raises(ValueError, least_squares, fun_trivial, x0,
method=self.method)
def test_bvp(self):
# This test was introduced with fix #5556. It turned out that
# dogbox solver had a bug with trust-region radius update, which
# could block its progress and create an infinite loop. And this
# discrete boundary value problem is the one which triggers it.
n = 10
x0 = np.ones(n**2)
if self.method == 'lm':
max_nfev = 5000 # To account for Jacobian estimation.
else:
max_nfev = 100
res = least_squares(fun_bvp, x0, ftol=1e-2, method=self.method,
max_nfev=max_nfev)
assert_(res.nfev < max_nfev)
assert_(res.cost < 0.5)
class BoundsMixin(object):
def test_inconsistent(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(10.0, 0.0), method=self.method)
def test_infeasible(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(3., 4), method=self.method)
def test_wrong_number(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.,
bounds=(1., 2, 3), method=self.method)
def test_inconsistent_shape(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
bounds=(1.0, [2.0, 3.0]), method=self.method)
# 1-D array wont't be broadcasted
assert_raises(ValueError, least_squares, fun_rosenbrock, [1.0, 2.0],
bounds=([0.0], [3.0, 4.0]), method=self.method)
def test_in_bounds(self):
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(-1.0, 3.0), method=self.method)
assert_allclose(res.x, 0.0, atol=1e-4)
assert_equal(res.active_mask, [0])
assert_(-1 <= res.x <= 3)
res = least_squares(fun_trivial, 2.0, jac=jac,
bounds=(0.5, 3.0), method=self.method)
assert_allclose(res.x, 0.5, atol=1e-4)
assert_equal(res.active_mask, [-1])
assert_(0.5 <= res.x <= 3)
def test_bounds_shape(self):
for jac in ['2-point', '3-point', 'cs', jac_2d_trivial]:
x0 = [1.0, 1.0]
res = least_squares(fun_2d_trivial, x0, jac=jac)
assert_allclose(res.x, [0.0, 0.0])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=(0.5, [2.0, 2.0]), method=self.method)
assert_allclose(res.x, [0.5, 0.5])
res = least_squares(fun_2d_trivial, x0, jac=jac,
bounds=([0.3, 0.2], 3.0), method=self.method)
assert_allclose(res.x, [0.3, 0.2])
res = least_squares(
fun_2d_trivial, x0, jac=jac, bounds=([-1, 0.5], [1.0, 3.0]),
method=self.method)
assert_allclose(res.x, [0.0, 0.5], atol=1e-5)
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
class SparseMixin(object):
def test_exact_tr_solver(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='exact', method=self.method)
assert_raises(ValueError, least_squares, p.fun, p.x0,
tr_solver='exact', jac_sparsity=p.sparsity,
method=self.method)
def test_equivalence(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
res_sparse = least_squares(
sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(
dense.fun, dense.x0, jac=sparse.jac,
method=self.method)
assert_equal(res_sparse.nfev, res_dense.nfev)
assert_allclose(res_sparse.x, res_dense.x, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
def test_tr_options(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
tr_options={'btol': 1e-10})
assert_allclose(res.cost, 0, atol=1e-20)
def test_wrong_parameters(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
tr_solver='best', method=self.method)
assert_raises(TypeError, least_squares, p.fun, p.x0, p.jac,
tr_solver='lsmr', tr_options={'tol': 1e-10})
def test_solver_selection(self):
sparse = BroydenTridiagonal(mode='sparse')
dense = BroydenTridiagonal(mode='dense')
res_sparse = least_squares(sparse.fun, sparse.x0, jac=sparse.jac,
method=self.method)
res_dense = least_squares(dense.fun, dense.x0, jac=dense.jac,
method=self.method)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_(issparse(res_sparse.jac))
assert_(isinstance(res_dense.jac, np.ndarray))
def test_numerical_jac(self):
p = BroydenTridiagonal()
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
for jac in ['2-point', '3-point', 'cs']:
res_dense = least_squares(p.fun, p.x0, jac, method=self.method)
res_sparse = least_squares(
p.fun, p.x0, jac,method=self.method,
jac_sparsity=p.sparsity)
assert_equal(res_dense.nfev, res_sparse.nfev)
assert_allclose(res_dense.x, res_sparse.x, atol=1e-20)
assert_allclose(res_dense.cost, 0, atol=1e-20)
assert_allclose(res_sparse.cost, 0, atol=1e-20)
def test_with_bounds(self):
p = BroydenTridiagonal()
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
def test_wrong_jac_sparsity(self):
p = BroydenTridiagonal()
sparsity = p.sparsity[:-1]
assert_raises(ValueError, least_squares, p.fun, p.x0,
jac_sparsity=sparsity, method=self.method)
def test_linear_operator(self):
p = BroydenTridiagonal(mode='operator')
res = least_squares(p.fun, p.x0, p.jac, method=self.method)
assert_allclose(res.cost, 0.0, atol=1e-20)
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, tr_solver='exact')
def test_x_scale_jac_scale(self):
p = BroydenTridiagonal()
res = least_squares(p.fun, p.x0, p.jac, method=self.method,
x_scale='jac')
assert_allclose(res.cost, 0.0, atol=1e-20)
p = BroydenTridiagonal(mode='operator')
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method=self.method, x_scale='jac')
class LossFunctionMixin(object):
def test_options(self):
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_allclose(res.x, 0, atol=1e-15)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
loss='hinge', method=self.method)
def test_fun(self):
# Test that res.fun is actual residuals, and not modified by loss
# function stuff.
for loss in LOSSES:
res = least_squares(fun_trivial, 2.0, loss=loss,
method=self.method)
assert_equal(res.fun, fun_trivial(res.x))
def test_grad(self):
# Test that res.grad is true gradient of loss function at the
# solution. Use max_nfev = 1, to avoid reaching minimum.
x = np.array([2.0]) # res.x will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x * (x**2 + 5))
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.grad, 2 * x)
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**0.5)
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2))
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.grad, 2 * x * (x**2 + 5) / (1 + (x**2 + 5)**4))
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1, method=self.method)
assert_allclose(res.grad,
2 * x * (x**2 + 5) / (1 + (x**2 + 5)**2)**(2/3))
def test_jac(self):
# Test that res.jac.T.dot(res.jac) gives Gauss-Newton approximation
# of Hessian. This approximation is computed by doubly differentiating
# the cost function and dropping the part containing second derivative
# of f. For a scalar function it is computed as
# H = (rho' + 2 * rho'' * f**2) * f'**2, if the expression inside the
# brackets is less than EPS it is replaced by EPS. Here we check
# against the root of H.
x = 2.0 # res.x will be this.
f = x**2 + 5 # res.fun will be this.
res = least_squares(fun_trivial, x, jac_trivial, loss='linear',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x)
# For `huber` loss the Jacobian correction is identically zero
# in outlier region, in such cases it is modified to be equal EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
max_nfev=1, method=self.method)
assert_equal(res.jac, 2 * x * EPS**0.5)
# Now let's apply `loss_scale` to turn the residual into an inlier.
# The loss function becomes linear.
res = least_squares(fun_trivial, x, jac_trivial, loss='huber',
f_scale=10, max_nfev=1)
assert_equal(res.jac, 2 * x)
# 'soft_l1' always gives a positive scaling.
res = least_squares(fun_trivial, x, jac_trivial, loss='soft_l1',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * (1 + f**2)**-0.75)
# For 'cauchy' the correction term turns out to be negative, and it
# replaced by EPS**0.5.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Now use scaling to turn the residual to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='cauchy',
f_scale=10, max_nfev=1, method=self.method)
fs = f / 10
assert_allclose(res.jac, 2 * x * (1 - fs**2)**0.5 / (1 + fs**2))
# 'arctan' gives an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
max_nfev=1, method=self.method)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial, loss='arctan',
f_scale=20.0, max_nfev=1, method=self.method)
fs = f / 20
assert_allclose(res.jac, 2 * x * (1 - 3 * fs**4)**0.5 / (1 + fs**4))
# cubic_soft_l1 will give an outlier.
res = least_squares(fun_trivial, x, jac_trivial, loss=cubic_soft_l1,
max_nfev=1)
assert_allclose(res.jac, 2 * x * EPS**0.5)
# Turn to inlier.
res = least_squares(fun_trivial, x, jac_trivial,
loss=cubic_soft_l1, f_scale=6, max_nfev=1)
fs = f / 6
assert_allclose(res.jac,
2 * x * (1 - fs**2 / 3)**0.5 * (1 + fs**2)**(-5/6))
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
class TestDogbox(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'dogbox'
class TestTRF(BaseMixin, BoundsMixin, SparseMixin, LossFunctionMixin):
method = 'trf'
def test_lsmr_regularization(self):
p = BroydenTridiagonal()
for regularize in [True, False]:
res = least_squares(p.fun, p.x0, p.jac, method='trf',
tr_options={'regularize': regularize})
assert_allclose(res.cost, 0, atol=1e-20)
class TestLM(BaseMixin):
method = 'lm'
def test_bounds_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial,
2.0, bounds=(-3.0, 3.0), method='lm')
def test_m_less_n_not_supported(self):
x0 = [-2, 1]
assert_raises(ValueError, least_squares, fun_rosenbrock_cropped, x0,
method='lm')
def test_sparse_not_supported(self):
p = BroydenTridiagonal()
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_jac_sparsity_not_supported(self):
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
jac_sparsity=[1], method='lm')
def test_LinearOperator_not_supported(self):
p = BroydenTridiagonal(mode="operator")
assert_raises(ValueError, least_squares, p.fun, p.x0, p.jac,
method='lm')
def test_loss(self):
res = least_squares(fun_trivial, 2.0, loss='linear', method='lm')
assert_allclose(res.x, 0.0, atol=1e-4)
assert_raises(ValueError, least_squares, fun_trivial, 2.0,
method='lm', loss='huber')
def test_basic():
# test that 'method' arg is really optional
res = least_squares(fun_trivial, 2.0)
assert_allclose(res.x, 0, atol=1e-10)
if __name__ == "__main__":
run_module_suite()
|
|
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_switchport
short_description: Manage switchport (layer 2) interface resources in EOS
description:
- Provides active state management of switchport (layer 2) interface
configuration in Arista EOS. Logical switchports are mutually exclusive
with M(eos_ipinterface).
version_added: 1.0.0
category: Bridging
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The unique interface identifier name. The interface name must use
the full interface name (no abbreviated names). For example,
interfaces should be specified as Ethernet1 not Et1
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
mode:
description:
- Identifies the mode of operation for the interface. Switchport
interfaces can act as trunk interfaces (carrying multiple VLANs)
or as access interfaces (attached to a single VLAN). The EOS
default value is 'access'
required: false
default: null
choices: ['trunk', 'access']
aliases: []
version_added: 1.0.0
access_vlan:
description:
- Configures the VLAN associated with a switchport that is
configured to use 'access' mode. This parameter only takes
effect if mode is equal to 'access'. Valid values for access
vlan are in the range of 1 to 4094. The EOS default value for
access vlan is 1
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
trunk_native_vlan:
description:
- Configures the native VLAN on a trunk interface for untagged
packets entering the switchport. This parameter only takes
effect if mode is equal to 'trunk'. Valid values for trunk
native vlan are in the range of 1 to 4094. The EOS default value
for trunk native value is 1.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
trunk_allowed_vlans:
description:
- Configures the set of VLANs that are allowed to traverse this
switchport interface. This parameter only takes effect if
the mode is configured to 'trunk'. This parameter accepts a comma
delimited list of VLAN IDs to configure on the trunk port. Each
VLAN ID must be in the valid range of 1 to 4094. The EOS default
value for trunk allowed vlans is 1-4094.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
trunk_groups:
description:
- Configures the list of trunk groups on the switchport. The parameter
accepts a comma separated list of values to be provisioned on the
interface.
required: false
default: null
choices: []
aliases: []
version_added: 1.1.0
"""
EXAMPLES = """
- name: Ensure Ethernet1 is an access port
eos_switchport: name=Ethernet1 mode=access access_vlan=10
- name: Ensure Ethernet12 is a trunk port
eos_switchport: name=Ethernet12 mode=trunk trunk_native_vlan=100
- name: Add the set of allowed vlans to Ethernet2/1
eos_switchport: name=Ethernet2/1 mode=trunk trunk_allowed_vlans=1,10,100
- name: Add trunk group values to an interface
eos_switchport: name=Ethernet5 trunk_groups=foo,bar,baz
"""
from pyeapi.utils import expand_range
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
if self._node:
return self._node
self._node = self.connect()
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
changed = self.invoke(self.instance.get('state'))
self.result['changed'] = changed or True
self.refresh()
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def sort_vlans(arg):
"""Converts the arg to a list and sorts the values
"""
value = sorted([int(x) for x in arg.split(',')])
value = [str(x) for x in value]
return ','.join(value)
def instance(module):
""" Returns switchport instance object properties
"""
name = module.attributes['name']
result = module.node.api('switchports').get(name)
_instance = dict(name=name, state='absent')
if result:
_instance['state'] = 'present'
_instance['mode'] = result['mode']
_instance['access_vlan'] = result['access_vlan']
_instance['trunk_native_vlan'] = result['trunk_native_vlan']
vlans = ','.join(expand_range(result['trunk_allowed_vlans']))
_instance['trunk_allowed_vlans'] = sort_vlans(vlans)
_instance['trunk_groups'] = ','.join(result['trunk_groups'])
return _instance
def create(module):
"""Creates a new instance of switchport on the node
"""
name = module.attributes['name']
module.log('Invoked create for eos_switchport[%s]' % name)
module.node.api('switchports').create(name)
def remove(module):
"""Removes an existing instance of switchport on the node
"""
name = module.attributes['name']
module.log('Invoked remove for eos_switchport[%s]' % name)
module.node.api('switchports').delete(name)
def set_mode(module):
"""Configures the mode attribute for the switchport
"""
name = module.attributes['name']
value = module.attributes['mode']
module.log('Invoked set_mode for eos_switchport[%s] '
'with value %s' % (name, value))
module.node.api('switchports').set_mode(name, value)
def set_access_vlan(module):
"""Configures the access vlan attribute for the switchport
"""
name = module.attributes['name']
value = module.attributes['access_vlan']
module.log('Invoked set_access_vlan for eos_switchport[%s] '
'with value %s' % (name, value))
module.node.api('switchports').set_access_vlan(name, value)
def set_trunk_native_vlan(module):
"""Configures the trunk native vlan attribute for the switchport
"""
name = module.attributes['name']
value = module.attributes['trunk_native_vlan']
module.log('Invoked set_trunk_native_vlan for eos_switchport[%s] '
'with value %s' % (name, value))
module.node.api('switchports').set_trunk_native_vlan(name, value)
def set_trunk_allowed_vlans(module):
"""Configures the trunk allowed vlans attribute for the switchport
"""
name = module.attributes['name']
value = module.attributes['trunk_allowed_vlans']
module.log('Invoked set_trunk_allowed_vlans for eos_switchport[%s] '
'with value %s' % (name, value))
module.node.api('switchports').set_trunk_allowed_vlans(name, value)
def set_trunk_groups(module):
"""Configures the set of trunk groups on the interface
"""
name = module.attributes['name']
value = module.attributes['trunk_groups'].split(',')
module.log('Invoked set_trunk_groups for eos_switchport[%s] '
'with value %s' % (name, value))
module.node.api('switchports').set_trunk_groups(name, value)
def validate_trunk_groups(value):
"""Validates the trunk_groups argument
"""
if not value:
return None
values = sorted(value.split(','))
return ','.join(values)
def validate_trunk_allowed_vlans(value):
"""Validates the trunk_allowed_vlans argument
"""
if not value:
return None
value = ','.join(expand_range(value))
return sort_vlans(value)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
mode=dict(choices=['access', 'trunk']),
access_vlan=dict(),
trunk_native_vlan=dict(),
trunk_allowed_vlans=dict(),
trunk_groups=dict()
)
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main()
|
|
"""
Various utility functions.
-----
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License. See http://scipy.org.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
Author: Pearu Peterson <pearu@cens.ioc.ee>
Created: May 2006
-----
"""
__all__ = ['split_comma', 'specs_split_comma',
'ParseError','AnalyzeError',
'get_module_file','parse_bind','parse_result','is_name','parse_array_spec',
'CHAR_BIT','str2stmt',
'classes']
import re
import os, glob
import sys
import traceback
import six
class ParseError(Exception):
pass
class AnalyzeError(Exception):
pass
is_name = re.compile(r'^[a-z_]\w*$',re.I).match
name_re = re.compile(r'[a-z_]\w*',re.I).match
is_entity_decl = re.compile(r'^[a-z_]\w*',re.I).match
is_int_literal_constant = re.compile(r'^\d+(_\w+|)$').match
module_file_extensions = ['.f', '.f90', '.f95', '.f03', '.f08']
string_to_signed_int = lambda string : int("".join(string.strip().split()))
def split_comma(line, item = None, comma=',', keep_empty=False):
items = []
if item is None:
for s in line.split(comma):
s = s.strip()
if not s and not keep_empty: continue
items.append(s)
return items
if not line:
return []
newitem = item.copy(line, True)
apply_map = newitem.apply_map
for s in newitem.get_line().split(comma):
s = apply_map(s).strip()
if not s and not keep_empty: continue
items.append(s)
return items
def parse_array_spec(line, item = None):
items = []
for spec in split_comma(line, item):
items.append(tuple(split_comma(spec, item, comma=':', keep_empty=True)))
return items
def specs_split_comma(line, item = None, upper=False):
specs0 = split_comma(line, item)
specs = []
for spec in specs0:
i = spec.find('=')
if i!=-1:
kw = spec[:i].strip().upper()
v = spec[i+1:].strip()
specs.append('%s = %s' % (kw, v))
else:
if upper:
spec = spec.upper()
specs.append(spec)
return specs
def parse_bind(line, item = None):
if not line.lower().startswith('bind'):
return None, line
if item is not None:
newitem = item.copy(line, apply_map=True)
newline = newitem.get_line()
else:
newitem = None
newline = newline[4:].lstrip()
i = newline.find(')')
assert i!=-1,repr(newline)
args = []
for a in specs_split_comma(newline[1:i].strip(), newitem, upper=True):
args.append(a)
rest = newline[i+1:].lstrip()
if item is not None:
rest = newitem.apply_map(rest)
return args, rest
def parse_result(line, item = None):
if not line.lower().startswith('result'):
return None, line
line = line[6:].lstrip()
i = line.find(')')
assert i != -1,repr(line)
name = line[1:i].strip()
assert is_name(name),repr(name)
return name, line[i+1:].lstrip()
def filter_stmts(content, classes):
""" Pop and return classes instances from content.
"""
stmts = []
indices = []
for i in range(len(content)):
stmt = content[i]
if isinstance(stmt, classes):
stmts.append(stmt)
indices.append(i)
indices.reverse()
for i in indices:
del content[i]
return stmts
def get_module_files(directory, _cache={}):
if directory in _cache:
return _cache[directory]
module_line = re.compile(r'(\A|^)module\s+(?P<name>\w+)\s*(!.*|)$',re.I | re.M)
d = {}
files = []
for ext in module_file_extensions:
files += glob.glob(os.path.join(directory,'*'+ext))
for fn in files:
f = open(fn,'r')
for name in module_line.findall(f.read()):
name = name[1]
if name in d:
print(d[name],'already defines',name)
continue
d[name] = fn
_cache[directory] = d
return d
def get_module_file(name, directory, _cache={}):
fn = _cache.get(name, None)
if fn is not None:
return fn
if name.endswith('_module'):
for ext in module_file_extensions:
f1 = os.path.join(directory,name[:-7]+ext)
if os.path.isfile(f1):
_cache[name] = fn
return f1
files = []
for ext in module_file_extensions:
files += glob.glob(os.path.join(directory,'*'+ext))
for fn in files:
if module_in_file(name, fn):
_cache[name] = fn
return fn
return
def module_in_file(name, filename):
name = name.lower()
pattern = re.compile(r'\s*module\s+(?P<name>[a-z]\w*)', re.I).match
f = open(filename,'r')
for line in f:
m = pattern(line)
if m and m.group('name').lower()==name:
f.close()
return filename
f.close()
def str2stmt(string, isfree=True, isstrict=False):
""" Convert Fortran code to Statement tree.
"""
from .readfortran import Line, FortranStringReader
from .parsefortran import FortranParser
reader = FortranStringReader(string, isfree, isstrict)
parser = FortranParser(reader)
parser.parse()
parser.analyze()
block = parser.block
while len(block.content)==1:
block = block.content[0]
return block
def get_char_bit():
import numpy
one = numpy.ubyte(1)
two = numpy.ubyte(2)
n = numpy.ubyte(2)
i = 1
while n>=two:
n <<= one
i += 1
return i
CHAR_BIT = get_char_bit()
def show_item_on_failure(func, _exception_depth=[0]):
"""
Decorator for analyze methods.
"""
def new_func(self):
try:
func(self)
except AnalyzeError as msg:
clsname = self.__class__.__name__
self.error('%s.analyze error: %s' % (clsname,msg))
traceback.print_exc()
except ParseError as msg:
self.error('parse error: %s' % (msg))
except Exception as msg:
_exception_depth[0] += 1
if _exception_depth[0]==1:
self.error('exception triggered here: %s %s' % (Exception, msg))
raise
_exception_depth[0] = 0
return new_func
_classes_cache = {}
class meta_classes(type):
""" Meta class for ``classes``.
"""
__abstractmethods__ = False
def __getattr__(self, name):
# Expose created classes only as attributes to ``classes`` type.
cls = _classes_cache.get(name)
if cls is None:
raise AttributeError('instance does not have attribute %r' % (name))
return cls
class classes(six.with_metaclass(meta_classes, type)):
"""Make classes available as attributes of this class.
To add a class to the attributes list, one must use::
__metaclass__ = classes
in the definition of the class.
In addition, apply the following tasks:
* decorate analyze methods with show_item_on_failure
"""
def __new__(metacls, name, bases, dict):
if 'analyze' in dict:
dict['analyze'] = show_item_on_failure(dict['analyze'])
cls = type.__new__(metacls, name, bases, dict)
_classes_cache[name] = cls
return cls
|
|
# Natural Language Toolkit: Graphical Representations for Trees
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: tree.py,v 1.2 2005/07/19 04:28:36 adastra Exp $
"""
Graphically display a C{Tree}.
"""
from Tkinter import *
from nltk.tree import Tree
from nltk.token import Token
from nltk.draw import *
import sys
##//////////////////////////////////////////////////////
## Tree Segment
##//////////////////////////////////////////////////////
class TreeSegmentWidget(CanvasWidget):
"""
A canvas widget that displays a single segment of a hierarchical
tree. Each C{TreeSegmentWidget} connects a single X{node widget}
to a sequence of zero or more X{subtree widgets}. By default, the
bottom of the node is connected to the top of each subtree by a
single line. However, if the C{roof} attribute is set, then a
single triangular "roof" will connect the node to all of its
children.
Attributes:
- C{roof}: What sort of connection to draw between the node and
its subtrees. If C{roof} is true, draw a single triangular
"roof" over the subtrees. If C{roof} is false, draw a line
between each subtree and the node. Default value is false.
- C{xspace}: The amount of horizontal space to leave between
subtrees when managing this widget. Default value is 10.
- C{yspace}: The amount of space to place between the node and
its children when managing this widget. Default value is 15.
- C{color}: The color of the lines connecting the node to its
subtrees; and of the outline of the triangular roof. Default
value is C{'#006060'}.
- C{fill}: The fill color for the triangular roof. Default
value is C{''} (no fill).
- C{width}: The width of the lines connecting the node to its
subtrees; and of the outline of the triangular roof. Default
value is 1.
- C{orientation}: Determines whether the tree branches downwards
or rightwards. Possible values are C{'horizontal'} and
C{'vertical'}. The default value is C{'vertical'} (i.e.,
branch downwards).
- C{draggable}: whether the widget can be dragged by the user.
The following attributes may also be added in the near future:
- C{lineM{n}_color}: The color of the line connecting the node
to its C{M{n}}th subtree.
- C{lineM{n}_color}: The width of the line connecting the node
to its C{M{n}}th subtree.
- C{lineM{n}_color}: The dash pattern of the line connecting the
node to its C{M{n}}th subtree.
"""
def __init__(self, canvas, node, subtrees, **attribs):
"""
@type node:
@type subtrees: C{list} of C{CanvasWidgetI}
"""
self._node = node
self._subtrees = subtrees
# Attributes
self._horizontal = 0
self._roof = 0
self._xspace = 10
self._yspace = 15
self._ordered = False
# Create canvas objects.
self._lines = [canvas.create_line(0,0,0,0, fill='#006060')
for c in subtrees]
self._polygon = canvas.create_polygon(0,0, fill='', state='hidden',
outline='#006060')
# Register child widgets (node + subtrees)
self._add_child_widget(node)
for subtree in subtrees:
self._add_child_widget(subtree)
# Are we currently managing?
self._managing = False
CanvasWidget.__init__(self, canvas, **attribs)
def __setitem__(self, attr, value):
canvas = self.canvas()
if attr is 'roof':
self._roof = value
if self._roof:
for l in self._lines: canvas.itemconfig(l, state='hidden')
canvas.itemconfig(self._polygon, state='normal')
else:
for l in self._lines: canvas.itemconfig(l, state='normal')
canvas.itemconfig(self._polygon, state='hidden')
elif attr == 'orientation':
if value == 'horizontal': self._horizontal = 1
elif value == 'vertical': self._horizontal = 0
else:
raise ValueError('orientation must be horizontal or vertical')
elif attr == 'color':
for l in self._lines: canvas.itemconfig(l, fill=value)
canvas.itemconfig(self._polygon, outline=value)
elif isinstance(attr, tuple) and attr[0] == 'color':
# Set the color of an individual line.
l = self._lines[int(attr[1])]
canvas.itemconfig(l, fill=value)
elif attr == 'fill':
canvas.itemconfig(self._polygon, fill=value)
elif attr == 'width':
canvas.itemconfig(self._polygon, {attr:value})
for l in self._lines: canvas.itemconfig(l, {attr:value})
elif attr in ('xspace', 'yspace'):
if attr == 'xspace': self._xspace = value
elif attr == 'yspace': self._yspace = value
self.update(self._node)
elif attr == 'ordered':
self._ordered = value
else:
CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr == 'roof': return self._roof
elif attr == 'width':
return self.canvas().itemcget(self._polygon, attr)
elif attr == 'color':
return self.canvas().itemcget(self._polygon, 'outline')
elif isinstance(attr, tuple) and attr[0] == 'color':
l = self._lines[int(attr[1])]
return self.canvas().itemcget(l, 'fill')
elif attr == 'xspace': return self._xspace
elif attr == 'yspace': return self._yspace
elif attr == 'orientation':
if self._horizontal: return 'horizontal'
else: return 'vertical'
elif attr == 'ordered':
return self._ordered
else:
return CanvasWidget.__getitem__(self, attr)
def node(self):
return self._node
def subtrees(self):
return self._subtrees[:]
def set_node(self, node):
"""
Set the node to C{node}.
"""
self._remove_child_widget(self._node)
self._add_child_widget(node)
self._node = node
self.update(self._node)
def replace_child(self, oldchild, newchild):
"""
Replace the child C{oldchild} with C{newchild}.
"""
index = self._subtrees.index(oldchild)
self._subtrees[index] = newchild
self._remove_child_widget(oldchild)
self._add_child_widget(newchild)
self.update(newchild)
def remove_child(self, child):
index = self._subtrees.index(child)
del self._subtrees[index]
self._remove_child_widget(child)
self.canvas().delete(self._lines.pop())
self.update(self._node)
def insert_child(self, index, child):
self._subtrees.insert(index, child)
self._add_child_widget(child)
self._lines.append(canvas.create_line(0,0,0,0, fill='#006060'))
self.update(self._node)
# but.. lines???
def _tags(self):
if self._roof:
return [self._polygon]
else:
return self._lines
def _subtree_top(self, child):
if isinstance(child, TreeSegmentWidget):
bbox = child.node().bbox()
else:
bbox = child.bbox()
if self._horizontal:
return (bbox[0], (bbox[1]+bbox[3])/2.0)
else:
return ((bbox[0]+bbox[2])/2.0, bbox[1])
def _node_bottom(self):
bbox = self._node.bbox()
if self._horizontal:
return (bbox[2], (bbox[1]+bbox[3])/2.0)
else:
return ((bbox[0]+bbox[2])/2.0, bbox[3])
def _update(self, child):
if len(self._subtrees) == 0: return
if self._node.bbox() is None: return # [XX] ???
# Which lines need to be redrawn?
if child is self._node: need_update = self._subtrees
else: need_update = [child]
if self._ordered and not self._managing:
need_update = self._maintain_order(child)
# Update the polygon.
(nodex, nodey) = self._node_bottom()
(xmin, ymin, xmax, ymax) = self._subtrees[0].bbox()
for subtree in self._subtrees[1:]:
bbox = subtree.bbox()
xmin = min(xmin, bbox[0])
ymin = min(ymin, bbox[1])
xmax = max(xmax, bbox[2])
ymax = max(ymax, bbox[3])
if self._horizontal:
self.canvas().coords(self._polygon, nodex, nodey, xmin,
ymin, xmin, ymax, nodex, nodey)
else:
self.canvas().coords(self._polygon, nodex, nodey, xmin,
ymin, xmax, ymin, nodex, nodey)
# Redraw all lines that need it.
for subtree in need_update:
(nodex, nodey) = self._node_bottom()
line = self._lines[self._subtrees.index(subtree)]
(subtreex, subtreey) = self._subtree_top(subtree)
self.canvas().coords(line, nodex, nodey, subtreex, subtreey)
def _maintain_order(self, child):
if self._horizontal:
return self._maintain_order_horizontal(child)
else:
return self._maintain_order_vertical(child)
def _maintain_order_vertical(self, child):
(left, top, right, bot) = child.bbox()
if child is self._node:
# Check all the leaves
for subtree in self._subtrees:
(x1, y1, x2, y2) = subtree.bbox()
if bot+self._yspace > y1:
subtree.move(0,bot+self._yspace-y1)
return self._subtrees
else:
moved = [child]
index = self._subtrees.index(child)
# Check leaves to our right.
x = right + self._xspace
for i in range(index+1, len(self._subtrees)):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if x > x1:
self._subtrees[i].move(x-x1, 0)
x += x2-x1 + self._xspace
moved.append(self._subtrees[i])
# Check leaves to our left.
x = left - self._xspace
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if x < x2:
self._subtrees[i].move(x-x2, 0)
x -= x2-x1 + self._xspace
moved.append(self._subtrees[i])
# Check the node
(x1, y1, x2, y2) = self._node.bbox()
if y2 > top-self._yspace:
self._node.move(0, top-self._yspace-y2)
moved = self._subtrees
# Return a list of the nodes we moved
return moved
def _maintain_order_horizontal(self, child):
(left, top, right, bot) = child.bbox()
if child is self._node:
# Check all the leaves
for subtree in self._subtrees:
(x1, y1, x2, y2) = subtree.bbox()
if right+self._xspace > x1:
subtree.move(right+self._xspace-x1)
return self._subtrees
else:
moved = [child]
index = self._subtrees.index(child)
# Check leaves below us.
y = bot + self._yspace
for i in range(index+1, len(self._subtrees)):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if y > y1:
self._subtrees[i].move(0, y-y1)
y += y2-y1 + self._yspace
moved.append(self._subtrees[i])
# Check leaves above us
y = top - self._yspace
for i in range(index-1, -1, -1):
(x1, y1, x2, y2) = self._subtrees[i].bbox()
if y < y2:
self._subtrees[i].move(0, y-y2)
y -= y2-y1 + self._yspace
moved.append(self._subtrees[i])
# Check the node
(x1, y1, x2, y2) = self._node.bbox()
if x2 > left-self._xspace:
self._node.move(left-self._xspace-x2, 0)
moved = self._subtrees
# Return a list of the nodes we moved
return moved
def _manage_horizontal(self):
(nodex, nodey) = self._node_bottom()
# Put the subtrees in a line.
y = 20
for subtree in self._subtrees:
subtree_bbox = subtree.bbox()
dx = nodex - subtree_bbox[0] + self._xspace
dy = y - subtree_bbox[1]
subtree.move(dx, dy)
y += subtree_bbox[3] - subtree_bbox[1] + self._yspace
# Find the center of their tops.
center = 0.0
for subtree in self._subtrees:
center += self._subtree_top(subtree)[1]
center /= len(self._subtrees)
# Center the subtrees with the node.
for subtree in self._subtrees:
subtree.move(0, nodey-center)
def _manage_vertical(self):
(nodex, nodey) = self._node_bottom()
# Put the subtrees in a line.
x = 0
for subtree in self._subtrees:
subtree_bbox = subtree.bbox()
dy = nodey - subtree_bbox[1] + self._yspace
dx = x - subtree_bbox[0]
subtree.move(dx, dy)
x += subtree_bbox[2] - subtree_bbox[0] + self._xspace
# Find the center of their tops.
center = 0.0
for subtree in self._subtrees:
center += self._subtree_top(subtree)[0]/len(self._subtrees)
# Center the subtrees with the node.
for subtree in self._subtrees:
subtree.move(nodex-center, 0)
def _manage(self):
self._managing = True
(nodex, nodey) = self._node_bottom()
if len(self._subtrees) == 0: return
if self._horizontal: self._manage_horizontal()
else: self._manage_vertical()
# Update lines to subtrees.
for subtree in self._subtrees:
self._update(subtree)
self._managing = False
def __repr__(self):
return '[TreeSeg %s: %s]' % (self._node, self._subtrees)
def _tree_to_treeseg(canvas, tree, make_node, make_leaf,
tree_attribs, node_attribs,
leaf_attribs, loc_attribs,
property_names={'LEAF':'TEXT'}):
LEAF = property_names.get('LEAF', 'LEAF')
if isinstance(tree, Tree):
node = make_node(canvas, tree.node, **node_attribs)
subtrees = [_tree_to_treeseg(canvas, child, make_node, make_leaf,
tree_attribs, node_attribs,
leaf_attribs, loc_attribs)
for child in tree]
return TreeSegmentWidget(canvas, node, subtrees, **tree_attribs)
elif isinstance(tree, Token):
return make_leaf(canvas, tree[LEAF], **leaf_attribs)
else:
return make_leaf(canvas, tree, **leaf_attribs)
def tree_to_treesegment(canvas, tree, make_node=TextWidget,
make_leaf=TextWidget, **attribs):
"""
Convert a C{Tree} into a C{TreeSegmentWidget}.
@param make_node: A C{CanvasWidget} constructor or a function that
creates C{CanvasWidgets}. C{make_node} is used to convert
the C{Tree}'s nodes into C{CanvasWidgets}. If no constructor
is specified, then C{TextWidget} will be used.
@param make_leaf: A C{CanvasWidget} constructor or a function that
creates C{CanvasWidgets}. C{make_leaf} is used to convert
the C{Tree}'s leafs into C{CanvasWidgets}. If no constructor
is specified, then C{TextWidget} will be used.
@param attribs: Attributes for the canvas widgets that make up the
returned C{TreeSegmentWidget}. Any attribute beginning with
C{'tree_'} will be passed to all C{TreeSegmentWidget}s (with
the C{'tree_'} prefix removed. Any attribute beginning with
C{'node_'} will be passed to all nodes. Any attribute
beginning with C{'leaf_'} will be passed to all leaves. And
any attribute beginning with C{'loc_'} will be passed to all
text locations (for C{Tree}s).
"""
# Process attribs.
tree_attribs = {}
node_attribs = {}
leaf_attribs = {}
loc_attribs = {}
for (key, value) in attribs.items():
if key[:5] == 'tree_': tree_attribs[key[5:]] = value
elif key[:5] == 'node_': node_attribs[key[5:]] = value
elif key[:5] == 'leaf_': leaf_attribs[key[5:]] = value
elif key[:4] == 'loc_': loc_attribs[key[4:]] = value
else: raise ValueError('Bad attribute: %s' % key)
return _tree_to_treeseg(canvas, tree, make_node, make_leaf,
tree_attribs, node_attribs,
leaf_attribs, loc_attribs)
##//////////////////////////////////////////////////////
## Tree Widget
##//////////////////////////////////////////////////////
class TreeWidget(CanvasWidget):
"""
A canvas widget that displays a single C{Tree}.
C{TreeWidget} manages a group of C{TreeSegmentWidget}s that are
used to display a C{Tree}.
Attributes:
- C{node_M{attr}}: Sets the attribute C{M{attr}} on all of the
node widgets for this C{TreeWidget}.
- C{leaf_M{attr}}: Sets the attribute C{M{attr}} on all of the
leaf widgets for this C{TreeWidget}.
- C{loc_M{attr}}: Sets the attribute C{M{attr}} on all of the
location widgets for this C{TreeWidget} (if it was built from
a C{Tree}). Note that location widgets are
C{TextWidget}s.
- C{xspace}: The amount of horizontal space to leave between
subtrees when managing this widget. Default value is 10.
- C{yspace}: The amount of space to place between the node and
its children when managing this widget. Default value is 15.
- C{line_color}: The color of the lines connecting each expanded
node to its subtrees.
- C{roof_color}: The color of the outline of the triangular roof
for collapsed trees.
- C{roof_fill}: The fill color for the triangular roof for
collapsed trees.
- C{width}
- C{orientation}: Determines whether the tree branches downwards
or rightwards. Possible values are C{'horizontal'} and
C{'vertical'}. The default value is C{'vertical'} (i.e.,
branch downwards).
- C{shapeable}: whether the subtrees can be independantly
dragged by the user. THIS property simply sets the
C{DRAGGABLE} property on all of the C{TreeWidget}'s tree
segments.
- C{draggable}: whether the widget can be dragged by the user.
"""
def __init__(self, canvas, tree, make_node=TextWidget,
make_leaf=TextWidget, property_names={'LEAF':'TEXT'},
**attribs):
# Node & leaf canvas widget constructors
self._make_node = make_node
self._make_leaf = make_leaf
self._tree = tree
# Property names
self._property_names = property_names
# Attributes.
self._nodeattribs = {}
self._leafattribs = {}
self._locattribs = {'color': '#008000'}
self._line_color = '#008080'
self._line_width = 1
self._roof_color = '#008080'
self._roof_fill = '#c0c0c0'
self._shapeable = False
self._xspace = 10
self._yspace = 10
self._orientation = 'vertical'
self._ordered = False
# Build trees.
self._keys = {} # treeseg -> key
self._expanded_trees = {}
self._collapsed_trees = {}
self._nodes = []
self._leaves = []
#self._locs = []
self._make_collapsed_trees(canvas, tree, ())
self._treeseg = self._make_expanded_tree(canvas, tree, ())
self._add_child_widget(self._treeseg)
CanvasWidget.__init__(self, canvas, **attribs)
def expanded_tree(self, *path_to_tree):
"""
Return the C{TreeSegmentWidget} for the specified subtree.
@param path_to_tree: A list of indices i1, i2, ..., in, where
the desired widget is the widget corresponding to
C{tree.children()[i1].children()[i2]....children()[in]}.
For the root, the path is C{()}.
"""
return self._expanded_trees[path_to_tree]
def collapsed_tree(self, *path_to_tree):
"""
Return the C{TreeSegmentWidget} for the specified subtree.
@param path_to_tree: A list of indices i1, i2, ..., in, where
the desired widget is the widget corresponding to
C{tree.children()[i1].children()[i2]....children()[in]}.
For the root, the path is C{()}.
"""
return self._collapsed_trees[path_to_tree]
def bind_click_trees(self, callback, button=1):
"""
Add a binding to all tree segments.
"""
for tseg in self._expanded_trees.values():
tseg.bind_click(callback, button)
for tseg in self._collapsed_trees.values():
tseg.bind_click(callback, button)
def bind_drag_trees(self, callback, button=1):
"""
Add a binding to all tree segments.
"""
for tseg in self._expanded_trees.values():
tseg.bind_drag(callback, button)
for tseg in self._collapsed_trees.values():
tseg.bind_drag(callback, button)
def bind_click_leaves(self, callback, button=1):
"""
Add a binding to all leaves.
"""
for leaf in self._leaves: leaf.bind_click(callback, button)
for leaf in self._leaves: leaf.bind_click(callback, button)
def bind_drag_leaves(self, callback, button=1):
"""
Add a binding to all leaves.
"""
for leaf in self._leaves: leaf.bind_drag(callback, button)
for leaf in self._leaves: leaf.bind_drag(callback, button)
def bind_click_nodes(self, callback, button=1):
"""
Add a binding to all nodes.
"""
for node in self._nodes: node.bind_click(callback, button)
for node in self._nodes: node.bind_click(callback, button)
def bind_drag_nodes(self, callback, button=1):
"""
Add a binding to all nodes.
"""
for node in self._nodes: node.bind_drag(callback, button)
for node in self._nodes: node.bind_drag(callback, button)
def _make_collapsed_trees(self, canvas, tree, key):
LEAF = self._property_names.get('LEAF', 'LEAF')
if not isinstance(tree, Tree): return
make_node = self._make_node
make_leaf = self._make_leaf
node = make_node(canvas, tree.node, **self._nodeattribs)
self._nodes.append(node)
leaves = [make_leaf(canvas, l[LEAF], **self._leafattribs)
for l in tree.leaves()]
self._leaves += leaves
treeseg = TreeSegmentWidget(canvas, node, leaves, roof=1,
color=self._roof_color,
fill=self._roof_fill,
width=self._line_width)
self._collapsed_trees[key] = treeseg
self._keys[treeseg] = key
#self._add_child_widget(treeseg)
treeseg.hide()
# Build trees for children.
for i in range(len(tree)):
child = tree[i]
self._make_collapsed_trees(canvas, child, key + (i,))
def _make_expanded_tree(self, canvas, tree, key):
LEAF = self._property_names.get('LEAF', 'LEAF')
make_node = self._make_node
make_leaf = self._make_leaf
if isinstance(tree, Tree):
node = make_node(canvas, tree.node, **self._nodeattribs)
self._nodes.append(node)
children = tree
subtrees = [self._make_expanded_tree(canvas, children[i], key+(i,))
for i in range(len(children))]
treeseg = TreeSegmentWidget(canvas, node, subtrees,
color=self._line_color,
width=self._line_width)
self._expanded_trees[key] = treeseg
self._keys[treeseg] = key
return treeseg
elif isinstance(tree, Token):
leaf = make_leaf(canvas, tree[LEAF], **self._leafattribs)
self._leaves.append(leaf)
return leaf
else:
leaf = make_leaf(canvas, tree, **self._leafattribs)
self._leaves.append(leaf)
return leaf
def __setitem__(self, attr, value):
if attr[:5] == 'node_':
for node in self._nodes: node[attr[5:]] = value
elif attr[:5] == 'leaf_':
for leaf in self._leaves: leaf[attr[5:]] = value
elif attr == 'line_color':
self._line_color = value
for tseg in self._expanded_trees.values(): tseg['color'] = value
elif attr == 'line_width':
self._line_width = value
for tseg in self._expanded_trees.values(): tseg['width'] = value
for tseg in self._collapsed_trees.values(): tseg['width'] = value
elif attr == 'roof_color':
self._roof_color = value
for tseg in self._collapsed_trees.values(): tseg['color'] = value
elif attr == 'roof_fill':
self._roof_fill = value
for tseg in self._collapsed_trees.values(): tseg['fill'] = value
elif attr == 'shapeable':
self._shapeable = value
for tseg in self._expanded_trees.values():
tseg['draggable'] = value
for tseg in self._collapsed_trees.values():
tseg['draggable'] = value
for leaf in self._leaves: leaf['draggable'] = value
elif attr == 'xspace':
self._xspace = value
for tseg in self._expanded_trees.values():
tseg['xspace'] = value
for tseg in self._collapsed_trees.values():
tseg['xspace'] = value
self.manage()
elif attr == 'yspace':
self._yspace = value
for tseg in self._expanded_trees.values():
tseg['yspace'] = value
for tseg in self._collapsed_trees.values():
tseg['yspace'] = value
self.manage()
elif attr == 'orientation':
self._orientation = value
for tseg in self._expanded_trees.values():
tseg['orientation'] = value
for tseg in self._collapsed_trees.values():
tseg['orientation'] = value
self.manage()
elif attr == 'ordered':
self._ordered = value
for tseg in self._expanded_trees.values():
tseg['ordered'] = value
for tseg in self._collapsed_trees.values():
tseg['ordered'] = value
else: CanvasWidget.__setitem__(self, attr, value)
def __getitem__(self, attr):
if attr[:5] == 'node_':
return self._nodeattribs.get(attr[5:], None)
elif attr[:5] == 'leaf_':
return self._leafattribs.get(attr[5:], None)
elif attr[:4] == 'loc_':
return self._locattribs.get(attr[4:], None)
elif attr == 'line_color': return self._line_color
elif attr == 'line_width': return self._line_width
elif attr == 'roof_color': return self._roof_color
elif attr == 'roof_fill': return self._roof_fill
elif attr == 'shapeable': return self._shapeable
elif attr == 'xspace': return self._xspace
elif attr == 'yspace': return self._yspace
elif attr == 'orientation': return self._orientation
else: return CanvasWidget.__getitem__(self, attr)
def _tags(self): return []
def _manage(self):
segs = self._expanded_trees.values() + self._collapsed_trees.values()
for tseg in segs:
if tseg.hidden():
tseg.show()
tseg.manage()
tseg.hide()
def toggle_collapsed(self, treeseg):
"""
Collapse/expand a tree.
"""
old_treeseg = treeseg
if old_treeseg['roof']:
new_treeseg = self._expanded_trees[self._keys[old_treeseg]]
else:
new_treeseg = self._collapsed_trees[self._keys[old_treeseg]]
# Replace the old tree with the new tree.
if old_treeseg.parent() is self:
self._remove_child_widget(old_treeseg)
self._add_child_widget(new_treeseg)
self._treeseg = new_treeseg
else:
old_treeseg.parent().replace_child(old_treeseg, new_treeseg)
# Move the new tree to where the old tree was. Show it first,
# so we can find its bounding box.
new_treeseg.show()
(newx, newy) = new_treeseg.node().bbox()[:2]
(oldx, oldy) = old_treeseg.node().bbox()[:2]
new_treeseg.move(oldx-newx, oldy-newy)
# Hide the old tree
old_treeseg.hide()
# We could do parent.manage() here instead, if we wanted.
new_treeseg.parent().update(new_treeseg)
##//////////////////////////////////////////////////////
## draw_trees
##//////////////////////////////////////////////////////
class TreeView:
def __init__(self, *trees):
from nltk.draw import CanvasFrame
from math import sqrt, ceil
self._trees = trees
self._top = Tk()
self._top.title('NLTK')
self._top.bind('<Control-x>', self.destroy)
self._top.bind('<Control-q>', self.destroy)
cf = self._cframe = CanvasFrame(self._top)
self._top.bind('<Control-p>', self._cframe.print_to_file)
# Size is variable.
self._size = IntVar(self._top)
self._size.set(12)
bold = ('helvetica', -self._size.get(), 'bold')
helv = ('helvetica', -self._size.get())
# Lay the trees out in a square.
self._width = int(ceil(sqrt(len(trees))))
self._widgets = []
for i in range(len(trees)):
widget = TreeWidget(cf.canvas(), trees[i], node_font=bold,
leaf_color='#008040', node_color='#004080',
roof_color='#004040', roof_fill='white',
line_color='#004040', draggable=1,
leaf_font=helv)
widget.bind_click_trees(widget.toggle_collapsed)
self._widgets.append(widget)
cf.add_widget(widget, 0, 0)
self._layout()
self._cframe.pack(expand=1, fill='both')
self._init_menubar()
def _layout(self):
i = x = y = ymax = 0
width = self._width
for i in range(len(self._widgets)):
widget = self._widgets[i]
(oldx, oldy) = widget.bbox()[:2]
if i % width == 0:
y = ymax
x = 0
widget.move(x-oldx, y-oldy)
x = widget.bbox()[2] + 10
ymax = max(ymax, widget.bbox()[3] + 10)
def _init_menubar(self):
menubar = Menu(self._top)
filemenu = Menu(menubar, tearoff=0)
filemenu.add_command(label='Print to Postscript', underline=0,
command=self._cframe.print_to_file,
accelerator='Ctrl-p')
filemenu.add_command(label='Exit', underline=1,
command=self.destroy, accelerator='Ctrl-x')
menubar.add_cascade(label='File', underline=0, menu=filemenu)
zoommenu = Menu(menubar, tearoff=0)
zoommenu.add_radiobutton(label='Tiny', variable=self._size,
underline=0, value=10, command=self.resize)
zoommenu.add_radiobutton(label='Small', variable=self._size,
underline=0, value=12, command=self.resize)
zoommenu.add_radiobutton(label='Medium', variable=self._size,
underline=0, value=14, command=self.resize)
zoommenu.add_radiobutton(label='Large', variable=self._size,
underline=0, value=28, command=self.resize)
zoommenu.add_radiobutton(label='Huge', variable=self._size,
underline=0, value=50, command=self.resize)
menubar.add_cascade(label='Zoom', underline=0, menu=zoommenu)
self._top.config(menu=menubar)
def resize(self, *e):
bold = ('helvetica', -self._size.get(), 'bold')
helv = ('helvetica', -self._size.get())
xspace = self._size.get()
yspace = self._size.get()
for widget in self._widgets:
widget['node_font'] = bold
widget['leaf_font'] = helv
widget['xspace'] = xspace
widget['yspace'] = yspace
if self._size.get() < 20: widget['line_width'] = 1
elif self._size.get() < 30: widget['line_width'] = 2
else: widget['line_width'] = 3
self._layout()
def destroy(self, *e):
if self._top is None: return
self._top.destroy()
self._top = None
def mainloop(self, *args, **kwargs):
"""
Enter the Tkinter mainloop. This function must be called if
this demo is created from a non-interactive program (e.g.
from a secript); otherwise, the demo will close as soon as
the script completes.
"""
if in_idle(): return
self._top.mainloop(*args, **kwargs)
def draw_trees(*trees):
"""
Open a new window containing a graphical diagram of the given
trees.
@rtype: None
"""
TreeView(*trees).mainloop()
return
##//////////////////////////////////////////////////////
## Demo Code
##//////////////////////////////////////////////////////
import random
if __name__ == '__main__':
def fill(cw):
cw['fill'] = '#%06d' % random.randint(0,999999)
cf = CanvasFrame(width=550, height=450, closeenough=2)
tree = Tree.parse('''
(S (NP the very big cat)
(VP (Adv sorta) (V saw) (NP (Det the) (N dog))))
''', leafparser = lambda t: Token(TEXT=t))
tc = TreeWidget(cf.canvas(), tree, draggable=1,
node_font=('helvetica', -14, 'bold'),
leaf_font=('helvetica', -12, 'italic'),
roof_fill='white', roof_color='black',
leaf_color='green4', node_color='blue2')
cf.add_widget(tc,10,10)
def boxit(canvas, text):
big = ('helvetica', -16, 'bold')
return BoxWidget(canvas, TextWidget(canvas, text,
font=big), fill='green')
def ovalit(canvas, text):
return OvalWidget(canvas, TextWidget(canvas, text),
fill='cyan')
treetok = Tree.parse('''
(S (NP this tree) (VP (V is) (AdjP shapeable)))
''', leafparser = lambda t: Token(TEXT=t))
tc2 = TreeWidget(cf.canvas(), treetok, boxit, ovalit, shapeable=1)
def color(node):
node['color'] = '#%04d00' % random.randint(0,9999)
def color2(treeseg):
treeseg.node()['fill'] = '#%06d' % random.randint(0,9999)
treeseg.node().child()['color'] = 'white'
tc.bind_click_trees(tc.toggle_collapsed)
tc2.bind_click_trees(tc2.toggle_collapsed)
tc.bind_click_nodes(color, 3)
tc2.expanded_tree(1).bind_click(color2, 3)
tc2.expanded_tree().bind_click(color2, 3)
paren = ParenWidget(cf.canvas(), tc2)
cf.add_widget(paren, tc.bbox()[2]+10, 10)
tree3 = Tree.parse('''
(S (NP this tree) (AUX was)
(VP (V built) (PP (P with) (NP (N tree_to_treesegment)))))
''', leafparser = lambda t: Token(TEXT=t))
tc3 = tree_to_treesegment(cf.canvas(), tree3, tree_color='green4',
tree_xspace=2, tree_width=2)
tc3['draggable'] = 1
cf.add_widget(tc3, 10, tc.bbox()[3]+10)
def orientswitch(treewidget):
if treewidget['orientation'] == 'horizontal':
treewidget.expanded_tree(1,1).subtrees()[0].set_text('vertical')
treewidget.collapsed_tree(1,1).subtrees()[0].set_text('vertical')
treewidget.collapsed_tree(1).subtrees()[1].set_text('vertical')
treewidget.collapsed_tree().subtrees()[3].set_text('vertical')
treewidget['orientation'] = 'vertical'
else:
treewidget.expanded_tree(1,1).subtrees()[0].set_text('horizontal')
treewidget.collapsed_tree(1,1).subtrees()[0].set_text('horizontal')
treewidget.collapsed_tree(1).subtrees()[1].set_text('horizontal')
treewidget.collapsed_tree().subtrees()[3].set_text('horizontal')
treewidget['orientation'] = 'horizontal'
text = """
Try clicking, right clicking, and dragging
different elements of each of the trees.
The top-left tree is a TreeWidget built from
a Tree. The top-right is a TreeWidget built
from a Tree, using non-default widget
constructors for the nodes & leaves (BoxWidget
and OvalWidget). The bottom-left tree is
built from tree_to_treesegment."""
twidget = TextWidget(cf.canvas(), text.strip())
textbox = BoxWidget(cf.canvas(), twidget, fill='white', draggable=1)
cf.add_widget(textbox, tc3.bbox()[2]+10, tc2.bbox()[3]+10)
tree4 = Tree.parse('''
(S (NP this tree) (VP (V is) (Adj horizontal)))
''', leafparser = lambda t: Token(TEXT=t))
tc4 = TreeWidget(cf.canvas(), tree4, draggable=1,
line_color='brown2', roof_color='brown2',
node_font=('helvetica', -12, 'bold'),
node_color='brown4', orientation='horizontal')
tc4.manage()
cf.add_widget(tc4, tc3.bbox()[2]+10, textbox.bbox()[3]+10)
tc4.bind_click(orientswitch)
tc4.bind_click_trees(tc4.toggle_collapsed, 3)
# Run mainloop
cf.mainloop()
|
|
"""
api/Sets.py
Author: Josh Williams
Date Added: Fri Jan 26 10:18:26 CST 2007
Manages user album sets.
"""
## STD LIBS
import datetime
import time
## OUR LIBS
from AZTKAPI import AZTKAPI
from decorators import stack, zapi
import validation, utils, errors
## 3RD PARTY LIBS
from twisted.internet.defer import Deferred, DeferredList
from twisted.web import xmlrpc
class Sets(AZTKAPI, xmlrpc.XMLRPC):
"""
API for dealing with user sets
"""
enable_node = True
enable_web = True
enable_zapi = True
def _start(self):
self.valid_set_attrs = ["title", "description", "main_image"]
self.valid_sorts = {
'title-asc': ("lower(title)", "ASC"),
'title-desc': ("lower(title)", "DESC"),
'updated-asc': ("updated", "ASC"),
'updated-desc': ("updated", "DESC")
}
start = _start
def return_error(self, error):
d = Deferred()
d.callback((-1, error))
return d
@stack
def check_set_title(self, owner_userid, title):
"""
Checks to see if a set with a certain title already exists for a particular user.
@param owner_username: User who owns the set.
@type owner_username: String
@param title: Title to check
@type title: String
"""
try:
owner_username = validation.cast_integer(owner_userid, 'owner_userid')
title = validation.string(title)
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
d = self.app.db.query("""
SELECT zoto_user_owns_set_title(%s, %s) AS owns
""", (owner_userid, title), single_row=True)
d.addCallback(lambda result: (0, result['owns']))
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Checks to see if a set title is already present in a user's account",
[('owner_username', "User's account to check", basestring),
('title', "Title to check", basestring)],
target_user_index=0)
def xmlrpc_check_set_title(self, info, owner_userid, title):
return self.check_set_title(owner_userid, title)
@stack
def create_set(self, owner_userid, meta_info):
"""
Creates a set within the system.
@param owner_username: User who is creating the set.
@type owner_username: String
@param meta_info: Information about the album. Options listed above.
@type meta_info: Dictionary
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
if not meta_info.has_key('title'):
raise errors.ValidationError, "Title is required"
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
@stack
def create(result):
if result[1] == True:
return (-1, "Set [%s] already exists" % meta_info['title'])
##
## Build the fields/values
##
fields = ['owner_userid']
values = ["%(owner_userid)s"]
query_args = {'owner_userid': owner_userid}
for key, value in meta_info.items():
if key not in self.valid_set_attrs:
return (-1, "Invalid attribute: %s" % key)
fields.append(key)
values.append("%%(%s)s" % key)
query_args[key] = utils.sql_escape(value)
@stack
def insert_txn(txn, field_list, value_list, info):
txn.execute("""
INSERT INTO
user_album_sets (
%s
) VALUES (
%s
)
""" % (", ".join(field_list), ", ".join(value_list)), info)
txn.execute("""
SELECT CURRVAL('user_album_sets_set_id_seq') AS set_id
""")
id = txn.fetchone()['set_id']
return (0, id)
d2 = self.app.db.runInteraction(insert_txn, fields, values, query_args)
d2.addErrback(lambda _: (-1, _.getErrorMessage()))
return d2
##
## Name clash?
##
d = self.check_set_title(owner_userid, meta_info['title'])
d.addCallback(create)
return d
@zapi("Creates an album set",
[('meta_info', "Information dictionary about the set. Must contain at least title", dict)],
needs_auth=True)
def xmlrpc_create_set(self, info, meta_info):
return self.create_set(info['userid'], meta_info)
@stack
def delete_set(self, owner_userid, set_id):
"""
Deletes a set.
@param owner_username: Set owner
@type owner_username: String
@param set_id: ID of the set to delete
@type set_id: Integer
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
set_id = validation.integer(set_id, 'set_id')
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
d = self.app.db.query("""
SELECT * FROM zoto_set_delete(%s, %s)
""", (owner_userid, set_id), single_row=True)
d.addCallback(lambda _: (_['code'], _['message']))
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Deletes a user set.",
[('set_id', "Set to delete", int)],
needs_auth=True)
def xmlrpc_delete_set(self, info, set_id):
return self.delete_set(info['userid'], set_id)
@stack
def set_attr(self, owner_userid, set_id, key, value):
"""
Changes an attribute on a set.
@param set_id: Set ID
@type set_id: Integer
@param key: Value to be changed
@type key: String
@param value: New value
@type value: String
"""
try:
set_id = validation.cast_integer(set_id, 'set_id')
validation.oneof(key, self.valid_set_attrs, 'key')
value = validation.string(value)
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
if key == 'main_image':
key = 'main_image_id'
d = self.app.db.runOperation("""
UPDATE
user_album_sets
SET
%s = %%s
WHERE
set_id = %%s AND
owner_userid = %%s
""" % key, (value, set_id, owner_userid))
d.addCallback(lambda _: (0, "success"))
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Sets an attribute for a set",
[('set_id', "Set ID", int),
('key', "Attribute being altered", basestring),
('value', "New attribute value", basestring)],
needs_auth=True)
def xmlrpc_set_attr(self, info, set_id, key, value):
return self.set_attr(info['userid'], set_id, key, value)
@zapi("Sets the main image for a set",
[('set_id', "Set ID", int),
('media_id', "ID for the new main image", basestring)],
needs_auth=True,
target_media_index=1)
def xmlrpc_set_attr(self, info, set_id, image_id):
return self.set_attr(info['userid'], set_id, 'main_image', image_id)
@stack
def add_album(self, owner_userid, set_id, album_id):
"""
Adds an album to the specified set.
@param owner_username: Owner username
@type owner_username: String
@param set_id: Id of the parent set
@type set_id: Integer
@param album_id: Album ID being added
@type album_id: Integer
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
set_id = validation.cast_integer(set_id, 'set_id')
album_id = validation.cast_integer(album_id, 'album_id')
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
@stack
def check_set(result):
if result['owns']:
d2 = self.app.db.query("""
SELECT zoto_user_owns_album(%s, %s) AS owns
""", (owner_userid, album_id), single_row=True)
d2.addCallback(check_album)
return d2
else:
return (-1, "User doesn't own set: %s" % set_id)
@stack
def check_album(result):
if result['owns']:
d3 = self.app.db.query("""
SELECT * FROM zoto_set_add_album(%s, %s, %s)
""", (owner_userid, set_id, album_id), single_row=True)
d3.addCallback(lambda result: (result['code'], result['message']))
return d3
else:
return (-1, "User doesn't own album: %s" % album_id)
d = self.app.db.query("""
SELECT zoto_user_owns_set(%s, %s) AS owns
""", (owner_userid, set_id), single_row=True)
d.addCallback(check_set)
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Adds an album to a set",
[('set_ids', "Set IDs", (list, tuple)),
('album_ids', "Album IDs", (list, tuple))],
needs_auth=True)
def xmlrpc_add_albums(self, info, set_ids, album_ids):
@stack
def process_set(set_id):
dl2 = []
for album_id in album_ids:
dl2.append(process_album(set_id, album_id))
dList2 = DeferredList(dl2, fireOnOneErrback=True)
return dList2
@stack
def process_album(set_id, album_id):
return self.add_album(info['userid'], set_id, album_id)
dl = []
for set_id in set_ids:
dl.append(process_set(set_id))
dList = DeferredList(dl, fireOnOneErrback=True)
dList.addCallback(lambda _: (0, "success"))
dList.addErrback(lambda _: (-1, _.getErrorMessage))
return dList
@stack
def del_album(self, owner_userid, set_id, album_id):
"""
Removes an album from a set.
@param owner_username: User who owns the set
@type owner_username: String
@param set_id: Set to remove the album from
@type set_id: Integer
@param album_id: Album to be removed
@type album_id: Integer
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
set_id = validation.cast_integer(set_id, 'set_id')
album_id = validation.cast_integer(album_id, 'album_id')
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
d = self.app.db.query("""
SELECT * FROM zoto_set_del_album(%s, %s, %s)
""", (owner_userid, set_id, album_id), single_row=True)
d.addCallback(lambda result: (result['code'], result['message']))
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Removes an album from a set.",
[('set_id', "Set the album should be removed from", int),
('album_id', "Album to be removed", int)],
needs_auth=True)
def xmlrpc_del_album(self, info, set_id, album_id):
return self.del_album(info['userid'], set_id, album_id)
@zapi("Removes an album from a set",
[('set_ids', "Set IDs", (list, tuple)),
('album_ids', "Album IDs", (list, tuple))],
needs_auth=True)
def xmlrpc_del_albums(self, info, set_ids, album_ids):
@stack
def process_set(set_id):
dl2 = []
for album_id in album_ids:
dl2.append(process_album(set_id, album_id))
dList2 = DeferredList(dl2, fireOnOneErrback=True)
return dList2
@stack
def process_album(set_id, album_id):
return self.del_album(info['userid'], set_id, album_id)
dl = []
for set_id in set_ids:
dl.append(process_set(set_id))
dList = DeferredList(dl, fireOnOneErrback=True)
dList.addCallback(lambda _: (0, "success"))
dList.addErrback(lambda _: (-1, _.getErrorMessage))
return dList
@stack
def get_info(self, owner_userid, browse_userid, set_id):
"""
Gets information and statistics about a set.
@param owner_username: Owner of the set
@type owner_username: String
@param browse_username: User requesting the information
@type browse_username: String
@param set_id: Set to get information for
@type set_id: Integer
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
if browse_userid:
browse_userid = validation.cast_integer(browse_userid, 'browse_userid')
set_id = validation.cast_integer(set_id, 'set_id')
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
d = self.app.db.query("""
SELECT
set_id,
title,
description,
zoto_get_latest_id(main_image_id) AS main_image,
main_image_id,
t2.username AS owner_username,
owner_userid,
updated
FROM
user_album_sets t1
JOIN users t2 ON (t1.owner_userid = t2.userid)
WHERE
set_id = %s
""", (set_id, ), single_row=True)
@stack
def get_stats(result):
if result:
d2 = self.get_albums(owner_userid, browse_userid, {'set_id': set_id, 'count_only': True}, 0, 0)
d2.addCallback(add_stats, result)
return d2
else:
return (0, {})
@stack
def add_stats(result, set_info):
if result and result[0] == 0:
set_info['total_albums'] = result[1]['total_albums']
set_info['total_images'] = result[1]['total_images']
return (0, set_info)
d.addCallback(get_stats)
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Gets information about a set",
[('owner_username', "Owner username", basestring),
('set_id', "Set ID", int)],
target_user_index=0)
def xmlrpc_get_info(self, info, owner_userid, set_id):
return self.get_info(owner_userid, info['userid'], set_id)
@stack
def get_list(self, owner_userid, browse_userid, glob, limit, offset):
"""
Gets a user's list of sets.
@param owner_username: User to get sets for.
@type owner_username: String
@param glob: Dictionary of query options
@type glob: Dictionary
@param limit: Maximum number of sets to return
@type limit: Integer
@param offset: Starting point for query
@type offset: Integer
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
limit = validation.cast_integer(limit, 'limit')
offset = validation.cast_integer(offset, 'offset')
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
select = []
joins = [
'user_album_sets t1',
'LEFT JOIN user_album_set_xref_albums t2 USING (set_id)',
]
where = [
"t1.owner_userid = %(owner_userid)s",
"zoto_user_can_view_album(t1.owner_userid, t2.album_id, %(browse_userid)s)"
]
group_by = []
order_by = ""
order_dir = ""
limit_sql = ""
offset_sql = ""
order_by_sql = ""
single = False
query_args = {
'owner_userid': owner_userid,
'browse_userid': browse_userid
}
if glob.has_key('album_id') and glob['album_id'] != -1:
where.append("t2.album_id = %(album_id)s")
query_args['album_id'] = glob['album_id']
if glob.has_key('count_only') and glob['count_only']:
joins.append('LEFT JOIN user_album_xref_user_images t3 USING (album_id)')
select.append("""
(
SELECT
count(*)
FROM
user_albums t5
WHERE
owner_userid = %(owner_userid)s AND
zoto_user_can_view_album(t5.owner_userid, t5.album_id, %(browse_userid)s)
) AS total_albums
""")
select.append('count(distinct t1.set_id) AS total_sets')
select.append('count(distinct t1.set_id) AS count')
select.append('count(t3.image_id) AS total_images')
single = True
else:
select.append("""
(
SELECT
count(*)
FROM
user_album_set_xref_albums t5
JOIN user_album_sets t7 USING (set_id)
WHERE
set_id = t1.set_id AND
zoto_user_can_view_album(t7.owner_userid, t5.album_id, %(browse_userid)s)
) AS total_albums
""")
extra_fields = ['t1.set_id', 't1.title', 't1.description', 'zoto_get_latest_id(t1.main_image_id) AS main_image', 't1.main_image_id', 't1.updated']
group_by = ['t1.set_id', 't1.title', 't1.description', 't1.main_image_id', 't1.updated']
select += extra_fields
select.append("""
(
SELECT
count(*)
FROM
user_album_xref_user_images t3
LEFT JOIN user_album_set_xref_albums t4 USING (album_id)
JOIN user_album_sets t6 USING (set_id)
WHERE
t4.set_id = t1.set_id AND
zoto_user_can_view_album(t6.owner_userid, t3.album_id, %(browse_userid)s)
) AS total_images
""")
#group_by += extra_fields
if limit:
limit_sql = "LIMIT %s" % limit
if offset:
offset_sql = "OFFSET %s" % offset
order_by = 'set_id'
order_dir = 'desc'
if glob.has_key('order_by'):
order_by = glob['order_by']
if glob.has_key('order_dir'):
order_dir = glob['order_dir']
sort = "%s-%s" % (order_by, order_dir)
if self.valid_sorts.has_key(sort):
sort_item = self.valid_sorts[sort]
else:
self.log.warning("Invalid sort specified: %s" % sort)
sort_item = self.valid_sorts[self.valid_sorts.keys()[0]]
order_by_sql = "ORDER BY %s %s" % (sort_item[0], sort_item[1])
group_by_sql = ""
if len(group_by):
group_by_sql = "GROUP BY %s" % ", ".join(group_by)
query = """
SELECT
%s
FROM
%s
WHERE
%s
%s -- group_by
%s -- order_by
%s -- limit
%s -- offset
""" % (", ".join(select), " ".join(joins), " AND ".join(where), group_by_sql, order_by_sql, limit_sql, offset_sql)
self.log.debug("sets.get_list() query:\n%s" % query)
d = self.app.db.query(query, query_args, single_row=single)
d.addCallback(lambda _: (0, _))
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Gets a list of sets",
[('owner_username', "User to get sets for", basestring),
('glob', "Dictionary of query arguments", dict),
('limit', "Maximum records to return", int),
('offset', "Offset to begin retrieving records", int)],
target_user_index=0)
def xmlrpc_get_list(self, info, owner_userid, glob, limit, offset):
return self.get_list(owner_userid, info['userid'], glob, limit, offset)
@stack
def get_albums(self, owner_userid, auth_userid, glob, limit, offset):
"""
Gets a list of a user's albums within a set.
@param owner_username: User to get list for
@type owner_username: String
@param glob: dict to hold options
@type glob: Dictionary
@param limit: Number of albums to get
@type limit: Integer
@param offset: Offset within the user's albums to start at
@type offset: Integer
"""
try:
owner_userid = validation.cast_integer(owner_userid, 'owner_userid')
if auth_userid:
auth_userid = validation.cast_integer(auth_userid, 'auth_userid')
limit = validation.cast_integer(limit, 'limit')
offset = validation.cast_integer(offset, 'offset')
except errors.ValidationError, ex:
return utils.return_deferred_error(ex.value)
select = [
]
joins = [
'user_albums t1',
'JOIN zoto_album_permissions_view t4 USING (album_id)',
]
where = [
"t1.owner_userid = %(owner_userid)s",
"zoto_user_can_view_album(t1.owner_userid, t1.album_id, %(auth_userid)s)"
]
query_args = {
'owner_userid': owner_userid,
'auth_userid': auth_userid
}
group_by = []
order_by = ""
order_dir = ""
limit_sql = ""
offset_sql = ""
order_by_sql = ""
group_by_sql = ""
if glob.has_key('set_id') and glob['set_id'] and glob['set_id'] != -1:
joins.append('LEFT JOIN user_album_set_xref_albums t2 USING (album_id)')
where.append('set_id = %(set_id)s')
query_args['set_id'] = glob['set_id']
single = False
if glob.has_key('count_only') and glob['count_only']:
joins.append('LEFT JOIN user_album_xref_user_images t3 USING (album_id)')
select.append('count(distinct t1.title) AS total_albums')
select.append('count(distinct t1.title) AS count')
select.append('count(t3.image_id) AS total_images')
single = True
else:
extra_fields = [
't1.album_id',
'zoto_get_user_name(t1.owner_userid) AS owner_username',
't1.owner_userid',
't1.title',
't1.description',
'zoto_get_latest_id(t1.main_image_id) AS main_image',
't1.main_image_id',
't1.main_image_size',
't1.per_page',
't1.order_by',
't1.order_dir',
't1.thumb_size',
't1.updated',
't4.view_flag'
]
select += extra_fields
#group_by += extra_fields
select.append("""
(
SELECT
count(*)
FROM
user_album_xref_user_images t3
JOIN user_images t5 USING (image_id)
WHERE
album_id = t1.album_id
) AS total_images
""")
if limit:
limit_sql = "LIMIT %s" % limit
if offset:
offset_sql = "OFFSET %s" % offset
order_by = 'album_id'
order_dir = 'desc'
if glob.has_key('order_by'):
order_by = glob['order_by']
if glob.has_key('order_dir'):
order_dir = glob['order_dir']
sort = "%s-%s" % (order_by, order_dir)
if self.valid_sorts.has_key(sort):
sort_item = self.valid_sorts[sort]
else:
self.log.warning("Invalid sort specified: %s" % sort)
sort_item = self.valid_sorts[self.valid_sorts.keys()[0]]
order_by_sql = "ORDER BY %s %s" % (sort_item[0], sort_item[1])
if len(group_by):
group_by_sql = "GROUP BY %s" % ", ".join(group_by)
query = """
SELECT
%s
FROM
%s
WHERE
%s
%s -- group_by
%s -- order_by
%s -- limit
%s -- offset
""" % (", ".join(select), " ".join(joins), " AND ".join(where), group_by_sql, order_by_sql, limit_sql, offset_sql)
self.log.debug("sets.get_albums() query:\n%s" % query)
d = self.app.db.query(query, query_args, single_row=single)
d.addCallback(lambda _: (0, _))
d.addErrback(lambda _: (-1, _.getErrorMessage()))
return d
@zapi("Gets a list of a user's albums",
[('owner_username', "User to get a list for", basestring),
('glob', "Dictionary of query options", dict, False, {}),
('limit', "Maximum number of albums to return", int, False, 0),
('offset', "Offset to begin returning results", int, False, 0)],
target_user_index=0)
def xmlrpc_get_albums(self, info, owner_userid, glob, limit, offset):
return self.get_albums(owner_userid, info['userid'], glob, limit, offset)
|
|
#!/usr/bin/env python
import json
import os
import pytz
import requests
import threading
import time
from collections import OrderedDict
from datetime import datetime
class Error(Exception):
pass
class Auditor(object):
def __init__(self, hostname, port, secure=False, buffer_secs=None):
self.hostname = hostname
self.port = port
self.secure = secure
self.buffer_secs = buffer_secs
self.events = Events(self)
def _request(self, caller, handler, key=None, value=None):
headers = {'Content-type': 'application/json'}
kwargs = {
"headers": headers,
"timeout": 10,
}
if key and value:
kwargs["data"] = json.dumps({key: value})
response = caller(
"http://%s:%s%s" % (self.hostname, self.port, handler), **kwargs)
data = json.loads(response.text)
if data["type"] == "error":
raise Error(data["data"]["msg"])
return data["data"]
def _put(self, key, value, handler):
return self._request(requests.put, key, value, handler)
def _post(self, key, value, handler):
return self._request(requests.post, key, value, handler)
def _get(self, handler):
return self._request(requests.get, handler)
def alog(self, summary, tags="", user=None, level=1, close=True):
data = {
"summary": summary,
"user": get_user(user),
"level": level,
"start": pytz.UTC.localize(datetime.utcnow()),
}
if isinstance(tags, list):
tags = ", ".join(tags)
if tags: data["tags"] = tags
if close:
data["end"] = data["start"]
response = json.loads(requests.post("http://%s:%s/event/" % (self.hostname, self.port), data=data).text)
if response["type"] == "error":
raise Error(response["data"]["msg"])
# Don't return an Event at all when doing a simple
# summary log.
if close:
return
return Event(self, response["data"])
class EventCommiter(threading.Thread):
def __init__(self, event):
self.event = event
super(EventCommiter, self).__init__()
def run(self):
last_update = 0
while not self.event._closing:
now = time.time()
if (now - last_update) >= self.event._connection.buffer_secs:
self.event.commit()
last_update = time.time()
time.sleep(.2)
class Events(object):
def __init__(self, connection):
self._connection = connection
self.limit = 50
def __getitem__(self, val):
offset = 0
if not isinstance(val, int):
if val.start:
offset = val.start
limit = self.limit + offset
if val.stop:
limit = val.stop
else:
limit = val
events, total = self._get_events(offset, limit)
return events
def _get_events(self, offset, limit):
response = self._connection._get("/event/?offset=%s&limit=%s" % (offset, limit))
total = response["total"]
events = [Event(self._connection, event) for event in response["events"]]
return events, total
def __iter__(self):
events, total = self._get_events(0, self.limit)
for event in events:
yield event
# If this is True we need to start paginating.
if total > len(events):
for idx in range(1, (total / self.limit) + 1):
offset = idx * self.limit
events, _ = self._get_events(offset, offset + self.limit)
for event in events:
yield event
class Event(object):
def __init__(self, connection, payload):
self._connection = connection
self._update(payload)
self._closing = False
self._commiter = None
self._batched_details = {
"attribute": OrderedDict(),
"stream": OrderedDict(),
}
self._batched_details_lock = threading.RLock()
self.attrs = DetailsDescriptor(self, "attribute")
self.streams = DetailsDescriptor(self, "stream")
def _add_detail(self, details_type, name, value, mode="set"):
if self._commiter is None:
self._start_commiter()
with self._batched_details_lock:
detail = self._batched_details[details_type]
if name not in detail:
detail[name] = {
"details_type": details_type,
"name": name,
"value": [],
"mode": "append",
}
if mode == "set":
detail[name]["mode"] = "set"
detail[name]["value"] = [value]
elif mode == "append":
detail[name]["value"].append(value)
if not self._connection.buffer_secs:
self.commit()
def _start_commiter(self):
if self._connection.buffer_secs:
# This must be started last so that it has access to all
# of the attributes when it is started.
self._commiter = EventCommiter(self)
self._commiter.daemon = True
self._commiter.start()
@staticmethod
def _build_payload(values):
payload = []
for detail in values:
if detail["details_type"] == "stream":
payload.append({
"details_type": "stream",
"name": detail["name"],
"value": "".join(detail["value"]),
"mode": detail["mode"],
})
elif detail["details_type"] == "attribute":
for idx, val in enumerate(detail["value"]):
mode = "append"
if detail["mode"] == "set" and idx == 0:
mode = "set"
payload.append({
"details_type": "attribute",
"name": detail["name"],
"value": val,
"mode": mode,
})
return payload
def commit(self):
with self._batched_details_lock:
values = self._batched_details["attribute"].values()
values += self._batched_details["stream"].values()
if not len(values):
return
self._batched_details["attribute"] = OrderedDict()
self._batched_details["stream"] = OrderedDict()
self._connection._post("/event/%s/details/" % self.id,
"details", self._build_payload(values))
def _update(self, payload):
self.id = payload.get("id")
self.summary = payload.get("summary")
self.user = payload.get("user")
self.tags = payload.get("tags", "").split(", ")
self.start = payload.get("start")
self.end = payload.get("end")
def close(self):
self._closing = True
self._update(self._connection._put(
"/event/%s/" % self.id, "end", str(pytz.UTC.localize(datetime.utcnow()))
))
if self._commiter:
self._commiter.join()
self.commit()
class DetailsContainer(object):
""" Wraps a value for a particular detail."""
def __init__(self, parent, name):
self.parent = parent
self.name = name
self.value = []
def set(self, elem):
self.value = [elem]
self.parent.event._add_detail(
self.parent.name,
self.name,
elem,
mode="set")
def append(self, elem):
self.value.append(elem)
self.parent.event._add_detail(
self.parent.name,
self.name,
elem,
mode="append")
class DetailsDescriptor(object):
""" Acts as a proxy between varios details and their values."""
def __init__(self, event, name):
self.event = event
self.name = name
self._values = {}
def __getattr__(self, name):
if name not in self._values:
self._values[name] = DetailsContainer(self, name)
return self._values[name]
def __getitem__(self, key):
return self.__getattr__(key)
def get_user(user=None):
if user is not None:
return user
if "SUDO_USER" in os.environ:
return "%s(%s)" % (os.environ["USER"], os.environ["SUDO_USER"])
return os.environ["USER"]
|
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing for rewrite_json."""
import unittest
from tools.build_defs.docker.rewrite_json import _DOCKER_VERSION
from tools.build_defs.docker.rewrite_json import _OPERATING_SYSTEM
from tools.build_defs.docker.rewrite_json import _PROCESSOR_ARCHITECTURE
from tools.build_defs.docker.rewrite_json import MetadataOptions
from tools.build_defs.docker.rewrite_json import RewriteMetadata
class RewriteJsonTest(unittest.TestCase):
"""Testing for rewrite_json."""
def testNewEntrypoint(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
entrypoint = ['/bin/bash']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': entrypoint
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, parent=parent))
self.assertEquals(expected, actual)
def testOverrideEntrypoint(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/sh', 'does', 'not', 'matter'],
}
}
name = 'deadbeef'
parent = 'blah'
entrypoint = ['/bin/bash']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': entrypoint
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, parent=parent))
self.assertEquals(expected, actual)
def testNewCmd(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
}
}
name = 'deadbeef'
parent = 'blah'
cmd = ['/bin/bash']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
'Cmd': cmd
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, cmd=cmd, parent=parent))
self.assertEquals(expected, actual)
def testOverrideCmd(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
'Cmd': ['does', 'not', 'matter'],
}
}
name = 'deadbeef'
parent = 'blah'
cmd = ['does', 'matter']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/bash'],
'Cmd': cmd
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, cmd=cmd, parent=parent))
self.assertEquals(expected, actual)
def testOverrideBoth(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': ['/bin/sh'],
'Cmd': ['does', 'not', 'matter'],
}
}
name = 'deadbeef'
parent = 'blah'
entrypoint = ['/bin/bash', '-c']
cmd = ['my-command', 'my-arg1', 'my-arg2']
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Entrypoint': entrypoint,
'Cmd': cmd
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, cmd=cmd, parent=parent))
self.assertEquals(expected, actual)
def testOverrideParent(self):
name = 'me!'
parent = 'parent'
# In the typical case, we expect the parent to
# come in as the 'id', and our grandparent to
# be its 'parent'.
in_data = {
'id': parent,
'parent': 'grandparent',
}
expected = {
'id': name,
'parent': parent,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent))
self.assertEquals(expected, actual)
def testNewSize(self):
# Size is one of the few fields that, when omitted,
# should be removed.
in_data = {
'id': 'you',
'Size': '124',
}
name = 'me'
parent = 'blah'
size = '4321'
expected = {
'id': name,
'parent': parent,
'Size': size,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, size=size, parent=parent))
self.assertEquals(expected, actual)
def testOmitSize(self):
# Size is one of the few fields that, when omitted,
# should be removed.
in_data = {
'id': 'you',
'Size': '124',
}
name = 'me'
parent = 'blah'
expected = {
'id': name,
'parent': parent,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent))
self.assertEquals(expected, actual)
def testOmitName(self):
# Name is required.
with self.assertRaises(Exception):
RewriteMetadata({}, MetadataOptions(name=None))
def testStripContainerConfig(self):
# Size is one of the few fields that, when omitted,
# should be removed.
in_data = {
'id': 'you',
'container_config': {},
}
name = 'me'
parent = 'blah'
expected = {
'id': name,
'parent': parent,
'config': {},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent))
self.assertEquals(expected, actual)
def testEmptyBase(self):
in_data = {}
name = 'deadbeef'
entrypoint = ['/bin/bash', '-c']
cmd = ['my-command', 'my-arg1', 'my-arg2']
size = '999'
expected = {
'id': name,
'config': {
'Entrypoint': entrypoint,
'Cmd': cmd,
'ExposedPorts': {
'80/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
'Size': size,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, entrypoint=entrypoint, cmd=cmd, size=size,
ports=['80']))
self.assertEquals(expected, actual)
def testOmitParentWithBase(self):
# Our input data should be empty when parent is omitted
in_data = {
'id': 'you',
}
with self.assertRaises(Exception):
RewriteMetadata(in_data, MetadataOptions(name='me'))
def testNewPort(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
port = '80'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testAugmentPort(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
'443/tcp': {}
}
}
}
name = 'deadbeef'
parent = 'blah'
port = '80'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
'443/tcp': {},
port + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testMultiplePorts(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
port1 = '80'
port2 = '8080'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port1 + '/tcp': {},
port2 + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port1, port2]))
self.assertEquals(expected, actual)
def testPortCollision(self):
port = '80'
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port + '/tcp': {}
}
}
}
name = 'deadbeef'
parent = 'blah'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port + '/tcp': {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testPortWithProtocol(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
port = '80/tcp'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'ExposedPorts': {
port: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, ports=[port]))
self.assertEquals(expected, actual)
def testNewVolume(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
volume = '/logs'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
volume: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume]))
self.assertEquals(expected, actual)
def testAugmentVolume(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
'/original': {}
}
}
}
name = 'deadbeef'
parent = 'blah'
volume = '/data'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
'/original': {},
volume: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume]))
self.assertEquals(expected, actual)
def testMultipleVolumes(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
volume1 = '/input'
volume2 = '/output'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
volume1: {},
volume2: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume1, volume2]))
self.assertEquals(expected, actual)
def testEnv(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor'
}
}
name = 'deadbeef'
parent = 'blah'
env = [
'baz=blah',
'foo=bar',
]
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Env': env,
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, env=env, parent=parent))
self.assertEquals(expected, actual)
def testEnvResolveReplace(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Env': [
'foo=bar',
'baz=blah',
'blah=still around',
],
}
}
name = 'deadbeef'
parent = 'blah'
env = [
'baz=replacement',
'foo=$foo:asdf',
]
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Env': [
'baz=replacement',
'blah=still around',
'foo=bar:asdf',
],
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, env=env, parent=parent))
self.assertEquals(expected, actual)
def testAugmentVolumeWithNullInput(self):
in_data = {
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': None,
}
}
name = 'deadbeef'
parent = 'blah'
volume = '/data'
expected = {
'id': name,
'parent': parent,
'config': {
'User': 'mattmoor',
'WorkingDir': '/usr/home/mattmoor',
'Volumes': {
volume: {}
}
},
'docker_version': _DOCKER_VERSION,
'architecture': _PROCESSOR_ARCHITECTURE,
'os': _OPERATING_SYSTEM,
}
actual = RewriteMetadata(in_data, MetadataOptions(
name=name, parent=parent, volumes=[volume]))
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
|
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: PyParse.py
import re
import sys
C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE, C_STRING_NEXT_LINES, C_BRACKET = range(5)
_synchre = re.compile('\n ^\n [ \\t]*\n (?: while\n | else\n | def\n | return\n | assert\n | break\n | class\n | continue\n | elif\n | try\n | except\n | raise\n | import\n | yield\n )\n \\b\n', re.VERBOSE | re.MULTILINE).search
_junkre = re.compile('\n [ \\t]*\n (?: \\# \\S .* )?\n \\n\n', re.VERBOSE).match
_match_stringre = re.compile('\n \\""" [^"\\\\]* (?:\n (?: \\\\. | "(?!"") )\n [^"\\\\]*\n )*\n (?: \\""" )?\n\n| " [^"\\\\\\n]* (?: \\\\. [^"\\\\\\n]* )* "?\n\n| \'\'\' [^\'\\\\]* (?:\n (?: \\\\. | \'(?!\'\') )\n [^\'\\\\]*\n )*\n (?: \'\'\' )?\n\n| \' [^\'\\\\\\n]* (?: \\\\. [^\'\\\\\\n]* )* \'?\n', re.VERBOSE | re.DOTALL).match
_itemre = re.compile('\n [ \\t]*\n [^\\s#\\\\] # if we match, m.end()-1 is the interesting char\n', re.VERBOSE).match
_closere = re.compile('\n \\s*\n (?: return\n | break\n | continue\n | raise\n | pass\n )\n \\b\n', re.VERBOSE).match
_chew_ordinaryre = re.compile('\n [^[\\](){}#\'"\\\\]+\n', re.VERBOSE).match
_tran = [
'x'] * 256
for ch in '({[':
_tran[ord(ch)] = '('
for ch in ')}]':
_tran[ord(ch)] = ')'
for ch in '"\'\\\n#':
_tran[ord(ch)] = ch
_tran = ''.join(_tran)
del ch
try:
UnicodeType = type(unicode(''))
except NameError:
UnicodeType = None
class Parser:
def __init__(self, indentwidth, tabwidth):
self.indentwidth = indentwidth
self.tabwidth = tabwidth
def set_str(self, str):
if type(str) is UnicodeType:
uniphooey = str
str = []
push = str.append
for raw in map(ord, uniphooey):
push(raw < 127 and chr(raw) or 'x')
str = ''.join(str)
self.str = str
self.study_level = 0
def find_good_parse_start(self, is_char_in_string=None, _synchre=_synchre):
str, pos = self.str, None
if not is_char_in_string:
return
else:
limit = len(str)
for tries in range(5):
i = str.rfind(':\n', 0, limit)
if i < 0:
break
i = str.rfind('\n', 0, i) + 1
m = _synchre(str, i, limit)
if m and not is_char_in_string(m.start()):
pos = m.start()
break
limit = i
if pos is None:
m = _synchre(str)
if m and not is_char_in_string(m.start()):
pos = m.start()
return pos
i = pos + 1
while 1:
m = _synchre(str, i)
if m:
s, i = m.span()
if not is_char_in_string(s):
pos = s
else:
break
return pos
def set_lo(self, lo):
if lo > 0:
self.str = self.str[lo:]
def _study1--- This code section failed: ---
209 0 LOAD_FAST 0 'self'
3 LOAD_ATTR 0 'study_level'
6 LOAD_CONST 1 1
9 COMPARE_OP 5 '>='
12 POP_JUMP_IF_FALSE 19 'to 19'
210 15 LOAD_CONST 0 ''
18 RETURN_END_IF
19_0 COME_FROM '12'
211 19 LOAD_CONST 1 1
22 LOAD_FAST 0 'self'
25 STORE_ATTR 0 'study_level'
217 28 LOAD_FAST 0 'self'
31 LOAD_ATTR 1 'str'
34 STORE_FAST 1 'str'
218 37 LOAD_FAST 1 'str'
40 LOAD_ATTR 2 'translate'
43 LOAD_GLOBAL 3 '_tran'
46 CALL_FUNCTION_1 1
49 STORE_FAST 1 'str'
219 52 LOAD_FAST 1 'str'
55 LOAD_ATTR 4 'replace'
58 LOAD_CONST 2 'xxxxxxxx'
61 LOAD_CONST 3 'x'
64 CALL_FUNCTION_2 2
67 STORE_FAST 1 'str'
220 70 LOAD_FAST 1 'str'
73 LOAD_ATTR 4 'replace'
76 LOAD_CONST 4 'xxxx'
79 LOAD_CONST 3 'x'
82 CALL_FUNCTION_2 2
85 STORE_FAST 1 'str'
221 88 LOAD_FAST 1 'str'
91 LOAD_ATTR 4 'replace'
94 LOAD_CONST 5 'xx'
97 LOAD_CONST 3 'x'
100 CALL_FUNCTION_2 2
103 STORE_FAST 1 'str'
222 106 LOAD_FAST 1 'str'
109 LOAD_ATTR 4 'replace'
112 LOAD_CONST 5 'xx'
115 LOAD_CONST 3 'x'
118 CALL_FUNCTION_2 2
121 STORE_FAST 1 'str'
223 124 LOAD_FAST 1 'str'
127 LOAD_ATTR 4 'replace'
130 LOAD_CONST 6 '\nx'
133 LOAD_CONST 7 '\n'
136 CALL_FUNCTION_2 2
139 STORE_FAST 1 'str'
230 142 LOAD_GLOBAL 5 'C_NONE'
145 STORE_FAST 2 'continuation'
231 148 LOAD_CONST 8 ''
151 DUP_TOP
152 STORE_FAST 3 'level'
155 STORE_FAST 4 'lno'
232 158 LOAD_CONST 8 ''
161 BUILD_LIST_1 1
164 DUP_TOP
165 LOAD_FAST 0 'self'
168 STORE_ATTR 6 'goodlines'
171 STORE_FAST 5 'goodlines'
233 174 LOAD_FAST 5 'goodlines'
177 LOAD_ATTR 7 'append'
180 STORE_FAST 6 'push_good'
234 183 LOAD_CONST 8 ''
186 LOAD_GLOBAL 8 'len'
189 LOAD_FAST 1 'str'
192 CALL_FUNCTION_1 1
195 ROT_TWO
196 STORE_FAST 7 'i'
199 STORE_FAST 8 'n'
235 202 SETUP_LOOP 639 'to 844'
205 LOAD_FAST 7 'i'
208 LOAD_FAST 8 'n'
211 COMPARE_OP 0 '<'
214 POP_JUMP_IF_FALSE 843 'to 843'
236 217 LOAD_FAST 1 'str'
220 LOAD_FAST 7 'i'
223 BINARY_SUBSCR
224 STORE_FAST 9 'ch'
237 227 LOAD_FAST 7 'i'
230 LOAD_CONST 1 1
233 BINARY_ADD
234 STORE_FAST 7 'i'
240 237 LOAD_FAST 9 'ch'
240 LOAD_CONST 3 'x'
243 COMPARE_OP 2 '=='
246 POP_JUMP_IF_FALSE 255 'to 255'
241 249 CONTINUE 205 'to 205'
252 JUMP_FORWARD 0 'to 255'
255_0 COME_FROM '252'
243 255 LOAD_FAST 9 'ch'
258 LOAD_CONST 7 '\n'
261 COMPARE_OP 2 '=='
264 POP_JUMP_IF_FALSE 308 'to 308'
244 267 LOAD_FAST 4 'lno'
270 LOAD_CONST 1 1
273 BINARY_ADD
274 STORE_FAST 4 'lno'
245 277 LOAD_FAST 3 'level'
280 LOAD_CONST 8 ''
283 COMPARE_OP 2 '=='
286 POP_JUMP_IF_FALSE 205 'to 205'
246 289 LOAD_FAST 6 'push_good'
292 LOAD_FAST 4 'lno'
295 CALL_FUNCTION_1 1
298 POP_TOP
299 JUMP_BACK 205 'to 205'
248 302 CONTINUE 205 'to 205'
305 JUMP_FORWARD 0 'to 308'
308_0 COME_FROM '305'
250 308 LOAD_FAST 9 'ch'
311 LOAD_CONST 9 '('
314 COMPARE_OP 2 '=='
317 POP_JUMP_IF_FALSE 336 'to 336'
251 320 LOAD_FAST 3 'level'
323 LOAD_CONST 1 1
326 BINARY_ADD
327 STORE_FAST 3 'level'
252 330 CONTINUE 205 'to 205'
333 JUMP_FORWARD 0 'to 336'
336_0 COME_FROM '333'
254 336 LOAD_FAST 9 'ch'
339 LOAD_CONST 10 ')'
342 COMPARE_OP 2 '=='
345 POP_JUMP_IF_FALSE 373 'to 373'
255 348 LOAD_FAST 3 'level'
351 POP_JUMP_IF_FALSE 205 'to 205'
256 354 LOAD_FAST 3 'level'
357 LOAD_CONST 1 1
360 BINARY_SUBTRACT
361 STORE_FAST 3 'level'
364 JUMP_BACK 205 'to 205'
258 367 CONTINUE 205 'to 205'
370 JUMP_FORWARD 0 'to 373'
373_0 COME_FROM '370'
260 373 LOAD_FAST 9 'ch'
376 LOAD_CONST 11 '"'
379 COMPARE_OP 2 '=='
382 POP_JUMP_IF_TRUE 397 'to 397'
385 LOAD_FAST 9 'ch'
388 LOAD_CONST 12 "'"
391 COMPARE_OP 2 '=='
394_0 COME_FROM '382'
394 POP_JUMP_IF_FALSE 740 'to 740'
262 397 LOAD_FAST 9 'ch'
400 STORE_FAST 10 'quote'
263 403 LOAD_FAST 1 'str'
406 LOAD_FAST 7 'i'
409 LOAD_CONST 1 1
412 BINARY_SUBTRACT
413 LOAD_FAST 7 'i'
416 LOAD_CONST 13 2
419 BINARY_ADD
420 SLICE+3
421 LOAD_FAST 10 'quote'
424 LOAD_CONST 14 3
427 BINARY_MULTIPLY
428 COMPARE_OP 2 '=='
431 POP_JUMP_IF_FALSE 447 'to 447'
264 434 LOAD_FAST 10 'quote'
437 LOAD_CONST 14 3
440 BINARY_MULTIPLY
441 STORE_FAST 10 'quote'
444 JUMP_FORWARD 0 'to 447'
447_0 COME_FROM '444'
265 447 LOAD_FAST 4 'lno'
450 STORE_FAST 11 'firstlno'
266 453 LOAD_GLOBAL 8 'len'
456 LOAD_FAST 10 'quote'
459 CALL_FUNCTION_1 1
462 LOAD_CONST 1 1
465 BINARY_SUBTRACT
466 STORE_FAST 12 'w'
267 469 LOAD_FAST 7 'i'
472 LOAD_FAST 12 'w'
475 BINARY_ADD
476 STORE_FAST 7 'i'
268 479 SETUP_LOOP 252 'to 734'
482 LOAD_FAST 7 'i'
485 LOAD_FAST 8 'n'
488 COMPARE_OP 0 '<'
491 POP_JUMP_IF_FALSE 702 'to 702'
269 494 LOAD_FAST 1 'str'
497 LOAD_FAST 7 'i'
500 BINARY_SUBSCR
501 STORE_FAST 9 'ch'
270 504 LOAD_FAST 7 'i'
507 LOAD_CONST 1 1
510 BINARY_ADD
511 STORE_FAST 7 'i'
272 514 LOAD_FAST 9 'ch'
517 LOAD_CONST 3 'x'
520 COMPARE_OP 2 '=='
523 POP_JUMP_IF_FALSE 532 'to 532'
273 526 CONTINUE 482 'to 482'
529 JUMP_FORWARD 0 'to 532'
532_0 COME_FROM '529'
275 532 LOAD_FAST 1 'str'
535 LOAD_FAST 7 'i'
538 LOAD_CONST 1 1
541 BINARY_SUBTRACT
542 LOAD_FAST 7 'i'
545 LOAD_FAST 12 'w'
548 BINARY_ADD
549 SLICE+3
550 LOAD_FAST 10 'quote'
553 COMPARE_OP 2 '=='
556 POP_JUMP_IF_FALSE 573 'to 573'
276 559 LOAD_FAST 7 'i'
562 LOAD_FAST 12 'w'
565 BINARY_ADD
566 STORE_FAST 7 'i'
277 569 BREAK_LOOP
570 JUMP_FORWARD 0 'to 573'
573_0 COME_FROM '570'
279 573 LOAD_FAST 9 'ch'
576 LOAD_CONST 7 '\n'
579 COMPARE_OP 2 '=='
582 POP_JUMP_IF_FALSE 642 'to 642'
280 585 LOAD_FAST 4 'lno'
588 LOAD_CONST 1 1
591 BINARY_ADD
592 STORE_FAST 4 'lno'
281 595 LOAD_FAST 12 'w'
598 LOAD_CONST 8 ''
601 COMPARE_OP 2 '=='
604 POP_JUMP_IF_FALSE 482 'to 482'
283 607 LOAD_FAST 3 'level'
610 LOAD_CONST 8 ''
613 COMPARE_OP 2 '=='
616 POP_JUMP_IF_FALSE 632 'to 632'
284 619 LOAD_FAST 6 'push_good'
622 LOAD_FAST 4 'lno'
625 CALL_FUNCTION_1 1
628 POP_TOP
629 JUMP_FORWARD 0 'to 632'
632_0 COME_FROM '629'
285 632 BREAK_LOOP
633 JUMP_BACK 482 'to 482'
286 636 CONTINUE 482 'to 482'
639 JUMP_FORWARD 0 'to 642'
642_0 COME_FROM '639'
288 642 LOAD_FAST 9 'ch'
645 LOAD_CONST 15 '\\'
648 COMPARE_OP 2 '=='
651 POP_JUMP_IF_FALSE 482 'to 482'
290 654 LOAD_FAST 1 'str'
657 LOAD_FAST 7 'i'
660 BINARY_SUBSCR
661 LOAD_CONST 7 '\n'
664 COMPARE_OP 2 '=='
667 POP_JUMP_IF_FALSE 683 'to 683'
291 670 LOAD_FAST 4 'lno'
673 LOAD_CONST 1 1
676 BINARY_ADD
677 STORE_FAST 4 'lno'
680 JUMP_FORWARD 0 'to 683'
683_0 COME_FROM '680'
292 683 LOAD_FAST 7 'i'
686 LOAD_CONST 1 1
689 BINARY_ADD
690 STORE_FAST 7 'i'
293 693 CONTINUE 482 'to 482'
696 JUMP_BACK 482 'to 482'
699 JUMP_BACK 482 'to 482'
702 POP_BLOCK
300 703 LOAD_FAST 4 'lno'
706 LOAD_CONST 1 1
709 BINARY_SUBTRACT
710 LOAD_FAST 11 'firstlno'
713 COMPARE_OP 2 '=='
716 POP_JUMP_IF_FALSE 728 'to 728'
303 719 LOAD_GLOBAL 9 'C_STRING_FIRST_LINE'
722 STORE_FAST 2 'continuation'
725 JUMP_BACK 205 'to 205'
305 728 LOAD_GLOBAL 10 'C_STRING_NEXT_LINES'
731 STORE_FAST 2 'continuation'
734_0 COME_FROM '479'
306 734 CONTINUE 205 'to 205'
737 JUMP_FORWARD 0 'to 740'
740_0 COME_FROM '737'
308 740 LOAD_FAST 9 'ch'
743 LOAD_CONST 16 '#'
746 COMPARE_OP 2 '=='
749 POP_JUMP_IF_FALSE 776 'to 776'
310 752 LOAD_FAST 1 'str'
755 LOAD_ATTR 11 'find'
758 LOAD_CONST 7 '\n'
761 LOAD_FAST 7 'i'
764 CALL_FUNCTION_2 2
767 STORE_FAST 7 'i'
312 770 CONTINUE 205 'to 205'
773 JUMP_FORWARD 0 'to 776'
776_0 COME_FROM '773'
316 776 LOAD_FAST 1 'str'
779 LOAD_FAST 7 'i'
782 BINARY_SUBSCR
783 LOAD_CONST 7 '\n'
786 COMPARE_OP 2 '=='
789 POP_JUMP_IF_FALSE 830 'to 830'
317 792 LOAD_FAST 4 'lno'
795 LOAD_CONST 1 1
798 BINARY_ADD
799 STORE_FAST 4 'lno'
318 802 LOAD_FAST 7 'i'
805 LOAD_CONST 1 1
808 BINARY_ADD
809 LOAD_FAST 8 'n'
812 COMPARE_OP 2 '=='
815 POP_JUMP_IF_FALSE 830 'to 830'
319 818 LOAD_GLOBAL 12 'C_BACKSLASH'
821 STORE_FAST 2 'continuation'
824 JUMP_ABSOLUTE 830 'to 830'
827 JUMP_FORWARD 0 'to 830'
830_0 COME_FROM '827'
320 830 LOAD_FAST 7 'i'
833 LOAD_CONST 1 1
836 BINARY_ADD
837 STORE_FAST 7 'i'
840 JUMP_BACK 205 'to 205'
843 POP_BLOCK
844_0 COME_FROM '202'
325 844 LOAD_FAST 2 'continuation'
847 LOAD_GLOBAL 9 'C_STRING_FIRST_LINE'
850 COMPARE_OP 3 '!='
853 POP_JUMP_IF_FALSE 889 'to 889'
326 856 LOAD_FAST 2 'continuation'
859 LOAD_GLOBAL 10 'C_STRING_NEXT_LINES'
862 COMPARE_OP 3 '!='
865 POP_JUMP_IF_FALSE 889 'to 889'
868 LOAD_FAST 3 'level'
871 LOAD_CONST 8 ''
874 COMPARE_OP 4 '>'
877_0 COME_FROM '865'
877_1 COME_FROM '853'
877 POP_JUMP_IF_FALSE 889 'to 889'
327 880 LOAD_GLOBAL 13 'C_BRACKET'
883 STORE_FAST 2 'continuation'
886 JUMP_FORWARD 0 'to 889'
889_0 COME_FROM '886'
328 889 LOAD_FAST 2 'continuation'
892 LOAD_FAST 0 'self'
895 STORE_ATTR 14 'continuation'
333 898 LOAD_FAST 5 'goodlines'
901 LOAD_CONST 17 -1
904 BINARY_SUBSCR
905 LOAD_FAST 4 'lno'
908 COMPARE_OP 3 '!='
911 POP_JUMP_IF_FALSE 927 'to 927'
334 914 LOAD_FAST 6 'push_good'
917 LOAD_FAST 4 'lno'
920 CALL_FUNCTION_1 1
923 POP_TOP
924 JUMP_FORWARD 0 'to 927'
927_0 COME_FROM '924'
Parse error at or near `COME_FROM' instruction at offset 734_0
def get_continuation_type(self):
self._study1()
return self.continuation
def _study2(self):
if self.study_level >= 2:
return
self._study1()
self.study_level = 2
str, goodlines = self.str, self.goodlines
i = len(goodlines) - 1
p = len(str)
while i:
q = p
for nothing in range(goodlines[i - 1], goodlines[i]):
p = str.rfind('\n', 0, p - 1) + 1
if _junkre(str, p):
i = i - 1
else:
break
if i == 0:
q = p
self.stmt_start, self.stmt_end = p, q
lastch = ''
stack = []
push_stack = stack.append
bracketing = [(p, 0)]
while p < q:
m = _chew_ordinaryre(str, p, q)
if m:
newp = m.end()
i = newp - 1
while i >= p and str[i] in ' \t\n':
i = i - 1
if i >= p:
lastch = str[i]
p = newp
if p >= q:
break
ch = str[p]
if ch in '([{':
push_stack(p)
bracketing.append((p, len(stack)))
lastch = ch
p = p + 1
continue
if ch in ')]}':
if stack:
del stack[-1]
lastch = ch
p = p + 1
bracketing.append((p, len(stack)))
continue
if ch == '"' or ch == "'":
bracketing.append((p, len(stack) + 1))
lastch = ch
p = _match_stringre(str, p, q).end()
bracketing.append((p, len(stack)))
continue
if ch == '#':
bracketing.append((p, len(stack) + 1))
p = str.find('\n', p, q) + 1
bracketing.append((p, len(stack)))
continue
p = p + 1
if str[p] != '\n':
lastch = ch + str[p]
p = p + 1
self.lastch = lastch
if stack:
self.lastopenbracketpos = stack[-1]
self.stmt_bracketing = tuple(bracketing)
def compute_bracket_indent(self):
self._study2()
j = self.lastopenbracketpos
str = self.str
n = len(str)
origi = i = str.rfind('\n', 0, j) + 1
j = j + 1
while j < n:
m = _itemre(str, j)
if m:
j = m.end() - 1
extra = 0
break
else:
i = j = str.find('\n', j) + 1
else:
j = i = origi
while str[j] in ' \t':
j = j + 1
extra = self.indentwidth
return len(str[i:j].expandtabs(self.tabwidth)) + extra
def get_num_lines_in_stmt(self):
self._study1()
goodlines = self.goodlines
return goodlines[-1] - goodlines[-2]
def compute_backslash_indent(self):
self._study2()
str = self.str
i = self.stmt_start
while str[i] in ' \t':
i = i + 1
startpos = i
endpos = str.find('\n', startpos) + 1
found = level = 0
while i < endpos:
ch = str[i]
if ch in '([{':
level = level + 1
i = i + 1
elif ch in ')]}':
if level:
level = level - 1
i = i + 1
elif ch == '"' or ch == "'":
i = _match_stringre(str, i, endpos).end()
elif ch == '#':
break
elif level == 0 and ch == '=' and (i == 0 or str[i - 1] not in '=<>!') and str[i + 1] != '=':
found = 1
break
else:
i = i + 1
if found:
i = i + 1
found = re.match('\\s*\\\\', str[i:endpos]) is None
if not found:
i = startpos
while str[i] not in ' \t\n':
i = i + 1
return len(str[self.stmt_start:i].expandtabs(self.tabwidth)) + 1
def get_base_indent_string(self):
self._study2()
i, n = self.stmt_start, self.stmt_end
j = i
str = self.str
while j < n and str[j] in ' \t':
j = j + 1
return str[i:j]
def is_block_opener(self):
self._study2()
return self.lastch == ':'
def is_block_closer(self):
self._study2()
return _closere(self.str, self.stmt_start) is not None
lastopenbracketpos = None
def get_last_open_bracket_pos(self):
self._study2()
return self.lastopenbracketpos
stmt_bracketing = None
def get_last_stmt_bracketing(self):
self._study2()
return self.stmt_bracketing
|
|
import datetime
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_index_equal
import pandas_market_calendars as mcal
from pandas_market_calendars.exchange_calendar_nyse import NYSEExchangeCalendar
from tests.test_market_calendar import FakeCalendar, FakeBreakCalendar
def test_get_calendar():
assert isinstance(mcal.get_calendar('NYSE'), NYSEExchangeCalendar)
cal = mcal.get_calendar('NYSE', datetime.time(10, 0), datetime.time(14, 30))
assert isinstance(cal, NYSEExchangeCalendar)
assert cal.open_time == datetime.time(10, 0)
assert cal.close_time == datetime.time(14, 30)
# confirm that import works properly
_ = mcal.get_calendar('CME_Equity')
def test_get_calendar_names():
assert 'ASX' in mcal.get_calendar_names()
def test_date_range_exceptions():
cal = FakeCalendar(open_time= datetime.time(9), close_time= datetime.time(11, 30))
schedule = cal.schedule("2021-01-05", "2021-01-05")
### invalid closed argument
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", closed= "righ")
assert e.exconly() == "ValueError: closed must be 'left', 'right', 'both' or None."
### invalid force_close argument
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", force_close= "True")
assert e.exconly() == "ValueError: force_close must be True, False or None."
### close_time is before open_time
schedule = pd.DataFrame([["2020-01-01 12:00:00+00:00", "2020-01-01 11:00:00+00:00"]],
index= ["2020-01-01"], columns= ["market_open", "market_close"])
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, "15min", closed="right", force_close= True)
assert e.exconly() == "ValueError: Schedule contains rows where market_close < market_open,"\
" please correct the schedule"
### Overlap -
### the end of the last bar goes over the next start time
bcal = FakeBreakCalendar()
bschedule = bcal.schedule("2021-01-05", "2021-01-05")
with pytest.raises(ValueError) as e1:
# this frequency overlaps
mcal.date_range(bschedule, "2H", closed= "right", force_close= None)
# this doesn't
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
with pytest.raises(ValueError) as e2:
mcal.date_range(bschedule, "2H", closed= "both", force_close= None)
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
with pytest.raises(ValueError) as e3:
mcal.date_range(bschedule, "2H", closed= None, force_close= None)
mcal.date_range(bschedule, "1H", closed="right", force_close=None)
for e in (e1, e2, e3):
assert e.exconly() == "ValueError: The chosen frequency will lead to overlaps in the calculated index. "\
"Either choose a higher frequency or avoid setting force_close to None "\
"when setting closed to 'right', 'both' or None."
try:
# should all be fine, since force_close cuts the overlapping interval
mcal.date_range(bschedule, "2H", closed="right", force_close=True)
with pytest.warns(UserWarning): # should also warn about lost sessions
mcal.date_range(bschedule, "2H", closed="right", force_close=False)
mcal.date_range(bschedule, "2H", closed="both", force_close=True)
mcal.date_range(bschedule, "2H", closed="both", force_close=False)
# closed = "left" should never be a problem since it won't go outside market hours anyway
mcal.date_range(bschedule, "2H", closed="left", force_close=True)
mcal.date_range(bschedule, "2H", closed="left", force_close=False)
mcal.date_range(bschedule, "2H", closed="left", force_close=None)
except ValueError as e:
pytest.fail(f"Unexpected Error: \n{e}")
def test_date_range_permutations():
# open_time = 9, close_time = 11.30, freq = "1H"
cal = FakeCalendar(open_time= datetime.time(9), close_time= datetime.time(11, 30))
schedule = cal.schedule("2021-01-05", "2021-01-05")
# result matching values for: closed force_close
# 9 10 11 left False/ left None/ both False/ None False
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00"], tz= "UTC")
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= False)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= None)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "both", force_close= False)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= None, force_close= False)
assert_index_equal(actual, expected)
# 9 10 11 11.30 left True/ both True/ None True
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00", "2021-01-05 03:30:00+00:00"], tz= "UTC")
actual = mcal.date_range(schedule, "1H", closed= "left", force_close= True)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= "both", force_close= True)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed= None, force_close= True)
assert_index_equal(actual, expected)
# 10 11 right False
expected = pd.DatetimeIndex(
["2021-01-05 02:00:00+00:00", "2021-01-05 03:00:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="right", force_close=False)
assert_index_equal(actual, expected)
# 10 11 11.30 right True
expected = pd.DatetimeIndex(
["2021-01-05 02:00:00+00:00", "2021-01-05 03:00:00+00:00",
"2021-01-05 03:30:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="right", force_close=True)
assert_index_equal(actual, expected)
# 10 11 12 right None
expected = pd.DatetimeIndex(
["2021-01-05 02:00:00+00:00", "2021-01-05 03:00:00+00:00",
"2021-01-05 04:00:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="right", force_close=None)
assert_index_equal(actual, expected)
# 9 10 11 12 both None/ None None
expected = pd.DatetimeIndex(
["2021-01-05 01:00:00+00:00", "2021-01-05 02:00:00+00:00",
"2021-01-05 03:00:00+00:00", "2021-01-05 04:00:00+00:00"], tz="UTC")
actual = mcal.date_range(schedule, "1H", closed="both", force_close=None)
assert_index_equal(actual, expected)
actual = mcal.date_range(schedule, "1H", closed=None, force_close=None)
assert_index_equal(actual, expected)
def test_date_range_daily():
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(12, 0))
# If closed='right' and force_close False for daily then the result is empty
expected = pd.DatetimeIndex([], tz='UTC')
schedule = cal.schedule('2015-12-31', '2016-01-06')
with pytest.warns(UserWarning):
actual = mcal.date_range(schedule, '1D', force_close=False, closed='right')
assert_index_equal(actual, expected)
# New years is holiday
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2015-12-31 12:00', '2016-01-04 12:00', '2016-01-05 12:00', '2016-01-06 12:00']])
schedule = cal.schedule('2015-12-31', '2016-01-06')
actual = mcal.date_range(schedule, '1D')
assert_index_equal(actual, expected)
# July 3 is early close
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2012-07-02 12:00', '2012-07-03 11:30', '2012-07-04 12:00']])
schedule = cal.schedule('2012-07-02', '2012-07-04')
actual = mcal.date_range(schedule, '1D')
assert_index_equal(actual, expected)
# Dec 14, 2016 is adhoc early close
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2016-12-13 12:00', '2016-12-14 11:40', '2016-12-15 12:00']])
schedule = cal.schedule('2016-12-13', '2016-12-15')
actual = mcal.date_range(schedule, '1D')
assert_index_equal(actual, expected)
# July 3 is late open
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2012-07-02 09:00', '2012-07-03 11:15', '2012-07-04 09:00']])
schedule = cal.schedule('2012-07-02', '2012-07-04')
actual = mcal.date_range(schedule, '1D', force_close=False, closed=None)
assert_index_equal(actual, expected)
# Dec 13, 2016 is adhoc late open
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2016-12-13 11:20', '2016-12-13 12:00', '2016-12-14 09:00', '2016-12-14 11:40',
'2016-12-15 09:00', '2016-12-15 12:00']])
schedule = cal.schedule('2016-12-13', '2016-12-15')
actual = mcal.date_range(schedule, '1D', force_close=True, closed=None)
assert_index_equal(actual, expected)
# closed == "left" and force_close= True, should return the same thing
actual = mcal.date_range(schedule, '1D', force_close=True, closed="left")
assert_index_equal(actual, expected)
def test_date_range_lower_freq():
cal = mcal.get_calendar("NYSE")
schedule = cal.schedule(pd.Timestamp('2017-09-05 20:00', tz='UTC'), pd.Timestamp('2017-10-23 20:00', tz='UTC'))
# cannot get date range of frequency lower than 1D
with pytest.raises(ValueError) as e:
mcal.date_range(schedule, frequency='3D')
assert e.exconly() == "ValueError: Frequency must be 1D or higher frequency."
# instead get for 1D and convert to lower frequency
short = mcal.date_range(schedule, frequency='1D')
actual = mcal.convert_freq(short, '3D')
expected = pd.date_range('2017-09-05 20:00', '2017-10-23 20:00', freq='3D', tz='UTC')
assert_index_equal(actual, expected)
actual = mcal.convert_freq(short, '1W')
expected = pd.date_range('2017-09-05 20:00', '2017-10-23 20:00', freq='1W', tz='UTC')
assert_index_equal(actual, expected)
def test_date_range_hour():
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(10, 30))
# New Years Eve and weekend skipped
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2015-12-31 10:00', '2015-12-31 10:30',
'2016-01-04 10:00', '2016-01-04 10:30',
'2016-01-05 10:00', '2016-01-05 10:30',
'2016-01-06 10:00', '2016-01-06 10:30']])
schedule = cal.schedule('2015-12-31', '2016-01-06')
actual = mcal.date_range(schedule, '1H', force_close=True)
assert_index_equal(actual, expected)
# If force_close False for then result is missing close if not on even increment
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2015-12-31 10:00', '2016-01-04 10:00', '2016-01-05 10:00', '2016-01-06 10:00']])
schedule = cal.schedule('2015-12-31', '2016-01-06')
actual = mcal.date_range(schedule, '1H', force_close=False)
assert_index_equal(actual, expected)
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(12, 0))
# July 3 is late open and early close
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2012-07-02 10:00', '2012-07-02 11:00', '2012-07-02 12:00',
'2012-07-03 11:30',
'2012-07-04 10:00', '2012-07-04 11:00', '2012-07-04 12:00']])
schedule = cal.schedule('2012-07-02', '2012-07-04')
actual = mcal.date_range(schedule, '1H')
assert_index_equal(actual, expected)
# Dec 14, 2016 is adhoc early close
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2016-12-14 10:00', '2016-12-14 11:00', '2016-12-14 11:40',
'2016-12-15 10:00', '2016-12-15 11:00', '2016-12-15 12:00']])
schedule = cal.schedule('2016-12-14', '2016-12-15')
actual = mcal.date_range(schedule, '1H')
assert_index_equal(actual, expected)
# Dec 13, 2016 is adhoc late open, include the open with closed=True
expected = pd.DatetimeIndex([pd.Timestamp(x, tz=cal.tz).tz_convert('UTC') for x in
['2016-12-13 11:20', '2016-12-13 12:00',
'2016-12-14 09:00', '2016-12-14 10:00', '2016-12-14 11:00', '2016-12-14 11:40']])
schedule = cal.schedule('2016-12-13', '2016-12-14')
actual = mcal.date_range(schedule, '1H', closed=None)
assert_index_equal(actual, expected)
def test_date_range_minute():
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(10, 30))
# New Years Eve and weekend skipped
schedule = cal.schedule('2015-12-31', '2016-01-06')
actual = mcal.date_range(schedule, '1min', force_close=True)
assert len(actual) == 4 * 90
assert actual[0] == pd.Timestamp('2015-12-31 09:01', tz=cal.tz)
assert actual[len(actual) - 1] == pd.Timestamp('2016-01-06 10:30', tz=cal.tz)
for x in ['2015-12-31 09:02', '2015-12-31 10:30', '2016-01-04 09:01', '2016-01-06 09:01']:
assert pd.Timestamp(x, tz=cal.tz) in actual
for x in ['2015-12-31 09:00', '2015-12-31 10:31', '2016-01-02 09:01', '2016-01-03 09:01', '2016-01-06 09:00']:
assert pd.Timestamp(x, tz=cal.tz) not in actual
# July 3 is late open and early close
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(12, 0))
schedule = cal.schedule('2012-07-02', '2012-07-04')
actual = mcal.date_range(schedule, '1min')
assert len(actual) == 375 # 2 days of 3 hours, and one day of 15 mins
assert actual[0] == pd.Timestamp('2012-07-02 09:01', tz=cal.tz)
assert actual[len(actual) - 1] == pd.Timestamp('2012-07-04 12:00', tz=cal.tz)
for x in ['2012-07-02 09:02', '2012-07-02 12:00', '2012-07-03 11:16', '2012-07-03 11:30', '2012-07-04 09:01']:
assert pd.Timestamp(x, tz=cal.tz) in actual
for x in ['2012-07-02 09:00', '2012-07-02 12:01', '2012-07-03 11:15', '2012-07-03 11:31', '2012-07-04 09:00']:
assert pd.Timestamp(x, tz=cal.tz) not in actual
# Dec 13, 2016 is ad-hoc late open, include the open with closed=True, Dec 14 is ad-hoc early close
cal = FakeCalendar(open_time=datetime.time(9, 0), close_time=datetime.time(12, 0))
schedule = cal.schedule('2016-12-13', '2016-12-14')
actual = mcal.date_range(schedule, '1min', closed=None)
assert len(actual) == 41 + (61 + 60 + 40)
assert actual[0] == pd.Timestamp('2016-12-13 11:20', tz=cal.tz)
assert actual[len(actual) - 1] == pd.Timestamp('2016-12-14 11:40', tz=cal.tz)
for x in ['2016-12-13 11:21', '2016-12-13 12:00', '2016-12-14 09:00']:
assert pd.Timestamp(x, tz=cal.tz) in actual
for x in ['2016-12-13 11:19', '2016-12-13 12:01', '2016-12-14 08:59', '2016-12-14 11:41']:
assert pd.Timestamp(x, tz=cal.tz) not in actual
def test_date_range_w_breaks():
cal = FakeBreakCalendar()
schedule = cal.schedule('2016-12-28', '2016-12-28')
with pytest.warns(UserWarning):
mcal.date_range(schedule, "1H", closed= "right", force_close= False)
expected = ['2016-12-28 14:30:00+00:00', '2016-12-28 15:00:00+00:00',
'2016-12-28 16:00:00+00:00', '2016-12-28 16:30:00+00:00', '2016-12-28 17:00:00+00:00']
actual = mcal.date_range(schedule, '30min', closed=None)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
expected = ['2016-12-28 15:00:00+00:00', '2016-12-28 16:30:00+00:00', '2016-12-28 17:00:00+00:00']
actual = mcal.date_range(schedule, '30min', closed='right')
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
expected = ['2016-12-28 14:30:00+00:00', '2016-12-28 16:00:00+00:00', '2016-12-28 16:30:00+00:00']
actual = mcal.date_range(schedule, '30min', closed='left', force_close=False)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
expected = ['2016-12-28 14:30:00+00:00', '2016-12-28 15:00:00+00:00', '2016-12-28 16:00:00+00:00',
'2016-12-28 16:30:00+00:00', '2016-12-28 17:00:00+00:00']
actual = mcal.date_range(schedule, '30min', closed='left', force_close=True)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
# when the open is the break start
schedule = cal.schedule('2016-12-29', '2016-12-29')
expected = ['2016-12-29 16:00:00+00:00', '2016-12-29 16:15:00+00:00', '2016-12-29 16:30:00+00:00',
'2016-12-29 16:45:00+00:00', '2016-12-29 17:00:00+00:00']
actual = mcal.date_range(schedule, '15min', closed=None)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
expected = ['2016-12-29 16:15:00+00:00', '2016-12-29 16:30:00+00:00',
'2016-12-29 16:45:00+00:00', '2016-12-29 17:00:00+00:00']
actual = mcal.date_range(schedule, '15min', closed='right')
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
# when the close is the break end
schedule = cal.schedule('2016-12-30', '2016-12-30')
# force close True
expected = ['2016-12-30 14:30:00+00:00', '2016-12-30 14:45:00+00:00', '2016-12-30 15:00:00+00:00']
actual = mcal.date_range(schedule, '15min', closed=None, force_close=True)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
# force close False
expected = ['2016-12-30 14:30:00+00:00', '2016-12-30 14:45:00+00:00', '2016-12-30 15:00:00+00:00']
actual = mcal.date_range(schedule, '15min', closed=None, force_close=False)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
expected = ['2016-12-30 14:45:00+00:00', '2016-12-30 15:00:00+00:00']
actual = mcal.date_range(schedule, '15min', closed='right', force_close=False)
assert len(actual) == len(expected)
for x in expected:
assert pd.Timestamp(x) in actual
def test_merge_schedules():
cal1 = FakeCalendar()
cal2 = NYSEExchangeCalendar()
# cal1 is open on 2016-07-04 and cal2 is not
sch1 = cal1.schedule('2016-07-01', '2016-07-06')
sch2 = cal2.schedule('2016-07-01', '2016-07-06')
# outer join will include July 4th and have
expected = pd.DataFrame({'market_open': [pd.Timestamp(x, tz='UTC') for x in
['2016-07-01 02:13', '2016-07-04 02:13',
'2016-07-05 02:13', '2016-07-06 02:13']],
'market_close': [pd.Timestamp(x, tz='UTC') for x in
['2016-07-01 20:00', '2016-07-04 02:49',
'2016-07-05 20:00', '2016-07-06 20:00']]},
columns=['market_open', 'market_close'],
index=pd.DatetimeIndex(['2016-07-01', '2016-07-04', '2016-07-05', '2016-07-06']))
actual = mcal.merge_schedules([sch1, sch2], how='outer')
assert_frame_equal(actual, expected)
# inner join will exclude July 4th because not open for both
expected = pd.DataFrame({'market_open': [pd.Timestamp(x, tz='UTC') for x in
['2016-07-01 13:30', '2016-07-05 13:30', '2016-07-06 13:30']],
'market_close': [pd.Timestamp(x, tz='UTC') for x in
['2016-07-01 02:49', '2016-07-05 02:49', '2016-07-06 02:49']]},
columns=['market_open', 'market_close'],
index=pd.DatetimeIndex(['2016-07-01', '2016-07-05', '2016-07-06']))
actual = mcal.merge_schedules([sch1, sch2], how='inner')
assert_frame_equal(actual, expected)
# joining more than two calendars works correctly
actual = mcal.merge_schedules([sch1, sch1, sch1], how='inner')
assert_frame_equal(actual, sch1)
with pytest.raises(ValueError):
mcal.merge_schedules([sch1, sch2], how='left')
def test_merge_schedules_w_break():
# this currently does not work as all breaks are lost
cal = FakeCalendar()
cal_breaks = FakeBreakCalendar()
schedule = cal.schedule('2016-12-20', '2016-12-30')
schedule_breaks = cal_breaks.schedule('2016-12-20', '2016-12-30')
with pytest.warns(Warning) as w:
result = mcal.merge_schedules([schedule, schedule_breaks])
assert w[0].message.args[0] == 'Merge schedules will drop the break_start and break_end from result.'
assert 'break_start' not in result.columns
assert 'break_end' not in result.columns
|
|
# Village People, 2017
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from models import FuN
from collections import namedtuple
from termcolor import colored as clr
_fields = 'state'
_fields += ' policy action goals m_value w_value latents'
_fields += ' reward new_state'
_fields += ' back_idx alive_no'
Transition = namedtuple('Transition', _fields)
class FeudalAgent():
def __init__(self, name, action_space, cfg, shared_objects={}):
super(FeudalAgent, self).__init__()
self.name = name
self.actions_no = len(action_space)
self.batch_size = batch_size = cfg.general.batch_size
m_gamma = cfg.agent.m_gamma
w_gamma = cfg.agent.w_gamma
cfg.model.actons_no = self.actions_no
self.fun = fun = FuN(cfg.model)
if cfg.general.use_cuda:
fun.cuda()
self.best_reward = None
print(clr("Training starts.", "green"))
Optimizer = getattr(optim, cfg.training.algorithm)
self.optimizer = Optimizer(fun.parameters(),
**vars(cfg.training.algorithm_args))
self.coeff = cfg.training.feudal_coeff
self._reset()
def _reset(self):
self.last_ten = []
self.last_done = None
self.transitions = []
self.clock = 0
self.prev_done = prev_done = torch.ByteTensor(self.batch_size) \
.fill_(0) # .cuda()
self.prev_not_done = prev_not_done = 1 - prev_done
self.prev_alive_idx = prev_not_done.nonzero().squeeze(1)
self.back_idx = None
self.latents = []
self.back_links = []
self.alive_no = [self.batch_size]
self.goals = None
self.model_state = None
self.total_reward = .0
def act(self, states, rewards, done, is_training):
batch_size = self.batch_size
states = states.float()
done = done.byte()
assert states.size(0) == batch_size
assert rewards.size(0) == batch_size
assert done.size(0) == batch_size
if self.clock > 0:
prev_done = self.prev_done
prev_alive_idx = self.prev_alive_idx
if prev_done.any():
new_states = states.index_select(0, prev_alive_idx)
rewards = rewards.index_select(0, prev_alive_idx)
else:
new_states = states
self.total_reward = self.total_reward + rewards.sum()
transitions.append(
Transition(
state=self.prev_states,
policy=self.policy,
action=self.actions,
goals=self.goals,
m_value=self.m_value,
w_value=self.w_value,
latents=self.latents,
reward=rewards,
new_state=self.new_states,
back_idx=self.back_idx,
alive_no=self.alive_no[-1]
)
)
done = done | self.prev_done
if done.all():
self._improve_policy()
self._reset()
not_done = 1 - done
self.alive_no.append(not_done.nonzero().nelement())
alive_no = self.alive_no
prev_done = self.prev_done
prev_alive_idx = self.prev_alive_idx
goals = self.goals
model_state = self.model_state
if done.any():
# -- If there are some dead games: slice the states
self.alive_idx = alive_idx = not_done.nonzero().squeeze(1)
states = states.index_select(0, alive_idx)
if (done - prev_done).nonzero().nelement() > 0:
self.back_idx = not_done.index_select(0, prev_alive_idx) \
.nonzero().squeeze(1)
_idx = Variable(self.back_idx)
if self.clock > 0:
model_state = [s.index_select(0, _idx) for s in model_state]
self.goals = goals = [g.index_select(0, _idx) for g in
goals]
self.latents = [l.index_select(0, _idx) for l in
self.latents]
else:
back_idx = None
assert states.size(0) == alive_no[-1]
policy, latent, goals, self.m_value, self.w_value, self.model_state = \
self.fun(Variable(states), self.clock, goals, model_state)
actions = torch.multinomial(policy)
assert actions.size(0) == alive_no[-1]
full_actions = actions.data.new().resize_(batch_size).fill_(0)
full_actions.scatter_(0, self.alive_idx, actions.data.squeeze(1))
self.latents.append(latent)
self.goals = goals
self.policy = policy
self.actions = actions
prev_states, prev_done, prev_alive_idx = states, done, alive_idx
not_done = 1 - done
alive_no.append(not_done.nonzero().nelement())
self.clock += 1
def _improve_policy(self):
total_reward = self.total_reward
last_te = self.last_ten
transitions = self.transitions
m_gamma = self.m_gamma
w_gamma = self.w_gamma
# -- Loop ends here
last_ten.append(total_reward)
# -- Improve policy
for tr in transitions:
assert tr.state.size(0) == tr.alive_no
assert tr.action.size(0) == tr.alive_no
for l in tr.latents:
assert l.size(0) == tr.alive_no
for g in tr.goals:
assert g.size(0) == tr.alive_no
assert tr.m_value.size(0) == tr.alive_no
assert tr.w_value.size(0) == tr.alive_no
assert tr.reward.size(0) == tr.alive_no
assert tr.new_state.size(0) == tr.alive_no
assert (tr.back_idx is None) or (tr.back_idx.size(0) == tr.alive_no)
m_return = None
T = len(transitions)
tpgs, m_critics, w_actors, w_critics = [], [], [], []
for (t, tr) in reversed(
[(t, tr) for (t, tr) in enumerate(transitions)]):
# -- Compute manager's reward
if m_return is None:
m_return = tr.reward
elif fwd_idx is None:
m_return = tr.reward + m_gamma * m_return
else:
m_return = tr.reward.clone().fill_(0) \
.scatter_(0, fwd_idx, m_return)
m_return = tr.reward + m_gamma * m_return
# -- Transition policy gradients
c = cfg.model.c
if t + c < T:
m_adv = m_return - tr.m_value.data
s_t = tr.latents[-1].data
g_t = tr.goals[-1]
for i in range(t + 1, t + c + 1):
tr_i = transitions[i]
if tr_i.back_idx is not None:
g_t = g_t.index_select(0, Variable(tr_i.back_idx))
s_t = s_t.index_select(0, tr_i.back_idx)
m_adv = m_adv.index_select(0, tr_i.back_idx)
s_t_c = tr_i.latents[-1].data
cos = F.cosine_similarity(Variable(s_t_c - s_t), g_t, 1)
tpgs.append(torch.dot(cos, Variable(m_adv)))
m_critics.append(
F.smooth_l1_loss(tr.m_value, Variable(m_return))
)
# -- Compute worker's intrinsic reward
c = min(cfg.model.c, len(tr.latents) - 1)
if c > 0:
last_l = tr.latents[-1].data
intr_r = last_l.new().resize_(tr.alive_no).fill_(0)
for l, g in zip(tr.latents[-(c + 1):-1], tr.goals[-(c + 1):-1]):
intr_r += F.cosine_similarity(last_l - l.data, g.data, 1)
intr_r /= c
if cfg.shortcut:
w_return = m_return
else:
w_return = m_return + cfg.model.alpha * intr_r
w_adv = w_return - tr.w_value.data
grad = w_adv.new().resize_(tr.policy.size()).fill_(0)
grad.scatter_(1, tr.action.data, w_adv.unsqueeze(1))
grad /= -tr.policy.data
grad /= tr.policy.size(0)
w_actors.append(torch.dot(Variable(grad), tr.policy))
w_critics.append(F.smooth_l1_loss(tr.w_value, Variable(w_return)))
fwd_idx = tr.back_idx
t_loss, mc_loss = sum(tpgs), sum(m_critics)
wa_loss, wc_loss = sum(w_actors), sum(w_critics)
if len(last_ten) == 10:
print("// Episode " + clr("{:d}".format(ep + 1), "yellow") + ".")
mean_r = sum(last_ten) / 10.0
if best_reward is None or best_reward < mean_r:
print(clr("Reward : ") + \
clr("{:.2f}".format(mean_r), "white", "on_magenta")
)
best_reward = mean_r
else:
print(clr("Reward : ") + clr("{:.2f}".format(mean_r), "red"))
last_ten = []
sep = clr(" | ", "green")
print("TPG: " + \
clr("{:f}".format(t_loss.data[0]), "yellow") + sep + \
"M critic: " + \
clr("{:f}".format(mc_loss.data[0]), "yellow") + sep + \
"W actor: " + \
clr("{:f}".format(wa_loss.data[0]), "yellow") + sep + \
"W critic: " + \
clr("{:f}".format(wc_loss.data[0]), "yellow"))
if self.shortcut:
(coeff["WACTOR"] * wa_loss + \
coeff["WCRITIC"] * wc_loss
).backward()
else:
(coeff["TPG"] * t_loss + \
coeff["MCRITIC"] * mc_loss + \
coeff["WACTOR"] * wa_loss + \
coeff["WCRITIC"] * wc_loss
).backward()
self.optimizer.step()
self.optimizer.zero_grad()
|
|
# Copyright (C) 2017, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import fnmatch
import importlib
import inspect
import re
import sys
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils import statemachine
from cliff import app
from cliff import commandmanager
def _indent(text):
"""Indent by four spaces."""
prefix = ' ' * 4
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return ''.join(prefixed_lines())
def _format_description(parser):
"""Get parser description.
We parse this as reStructuredText, allowing users to embed rich
information in their help messages if they so choose.
"""
for line in statemachine.string2lines(
parser.description, tab_width=4, convert_whitespace=True):
yield line
def _format_usage(parser):
"""Get usage without a prefix."""
fmt = argparse.HelpFormatter(parser.prog)
optionals = parser._get_optional_actions()
positionals = parser._get_positional_actions()
groups = parser._mutually_exclusive_groups
# hacked variant of the regex used by the actual argparse module. Unlike
# that version, this one attempts to group long and short opts with their
# optional arguments ensuring that, for example, '--format <FORMAT>'
# becomes ['--format <FORMAT>'] and not ['--format', '<FORMAT>'].
# Yes, they really do use regexes to break apart and rewrap their help
# string. Don't ask me why.
part_regexp = re.compile(r"""
\(.*?\)+ |
\[.*?\]+ |
(?:(?:-\w|--\w+(?:-\w+)*)(?:\s+<?\w[\w-]*>?)?) |
\S+
""", re.VERBOSE)
opt_usage = fmt._format_actions_usage(optionals, groups)
pos_usage = fmt._format_actions_usage(positionals, groups)
opt_parts = part_regexp.findall(opt_usage)
pos_parts = part_regexp.findall(pos_usage)
parts = opt_parts + pos_parts
if len(' '.join([parser.prog] + parts)) < 72:
return [' '.join([parser.prog] + parts)]
return [parser.prog] + [_indent(x) for x in parts]
def _format_epilog(parser):
"""Get parser epilog.
We parse this as reStructuredText, allowing users to embed rich
information in their help messages if they so choose.
"""
for line in statemachine.string2lines(
parser.epilog, tab_width=4, convert_whitespace=True):
yield line
def _format_positional_action(action):
"""Format a positional action."""
if action.help == argparse.SUPPRESS:
return
# NOTE(stephenfin): We strip all types of brackets from 'metavar' because
# the 'option' directive dictates that only option argument names should be
# surrounded by angle brackets
yield '.. option:: {}'.format(
(action.metavar or action.dest).strip('<>[]() '))
if action.help:
yield ''
for line in statemachine.string2lines(
action.help, tab_width=4, convert_whitespace=True):
yield _indent(line)
def _format_optional_action(action):
"""Format an optional action."""
if action.help == argparse.SUPPRESS:
return
if action.nargs == 0:
yield '.. option:: {}'.format(', '.join(action.option_strings))
else:
# TODO(stephenfin): At some point, we may wish to provide more
# information about the options themselves, for example, if nargs is
# specified
option_strings = [' '.join(
[x, action.metavar or '<{}>'.format(action.dest.upper())])
for x in action.option_strings]
yield '.. option:: {}'.format(', '.join(option_strings))
if action.help:
yield ''
for line in statemachine.string2lines(
action.help, tab_width=4, convert_whitespace=True):
yield _indent(line)
def _format_parser(parser):
"""Format the output of an argparse 'ArgumentParser' object.
Given the following parser::
>>> import argparse
>>> parser = argparse.ArgumentParser(prog='hello-world', \
description='This is my description.',
epilog='This is my epilog')
>>> parser.add_argument('name', help='User name', metavar='<name>')
>>> parser.add_argument('--language', action='store', dest='lang', \
help='Greeting language')
Returns the following::
This is my description.
.. program:: hello-world
.. code:: shell
hello-world [-h] [--language LANG] <name>
.. option:: name
User name
.. option:: --language LANG
Greeting language
.. option:: -h, --help
Show this help message and exit
This is my epilog.
"""
if parser.description:
for line in _format_description(parser):
yield line
yield ''
yield '.. program:: {}'.format(parser.prog)
yield '.. code-block:: shell'
yield ''
for line in _format_usage(parser):
yield _indent(line)
yield ''
# In argparse, all arguments and parameters are known as "actions".
# Optional actions are what would be known as flags or options in other
# libraries, while positional actions would generally be known as
# arguments. We present these slightly differently.
for action in parser._get_optional_actions():
for line in _format_optional_action(action):
yield line
yield ''
for action in parser._get_positional_actions():
for line in _format_positional_action(action):
yield line
yield ''
if parser.epilog:
for line in _format_epilog(parser):
yield line
yield ''
class AutoprogramCliffDirective(rst.Directive):
"""Auto-document a subclass of `cliff.command.Command`."""
has_content = False
required_arguments = 1
option_spec = {
'command': directives.unchanged,
'arguments': directives.unchanged,
'ignored': directives.unchanged,
'application': directives.unchanged,
}
def _get_ignored_opts(self):
global_ignored = self.env.config.autoprogram_cliff_ignored
local_ignored = self.options.get('ignored', '')
local_ignored = [x.strip() for x in local_ignored.split(',')
if x.strip()]
return list(set(global_ignored + local_ignored))
def _drop_ignored_options(self, parser, ignored_opts):
for action in list(parser._actions):
for option_string in action.option_strings:
if option_string in ignored_opts:
del parser._actions[parser._actions.index(action)]
break
def _load_app(self):
mod_str, _sep, class_str = self.arguments[0].rpartition('.')
if not mod_str:
return
try:
importlib.import_module(mod_str)
except ImportError:
return
try:
cliff_app_class = getattr(sys.modules[mod_str], class_str)
except AttributeError:
return
if not inspect.isclass(cliff_app_class):
return
if not issubclass(cliff_app_class, app.App):
return
app_arguments = self.options.get('arguments', '').split()
return cliff_app_class(*app_arguments)
def _load_command(self, manager, command_name):
"""Load a command using an instance of a `CommandManager`."""
try:
# find_command expects the value of argv so split to emulate that
return manager.find_command(command_name.split())[0]
except ValueError:
raise self.error('"{}" is not a valid command in the "{}" '
'namespace'.format(
command_name, manager.namespace))
def _load_commands(self):
# TODO(sfinucan): We should probably add this wildcarding functionality
# to the CommandManager itself to allow things like "show me the
# commands like 'foo *'"
command_pattern = self.options.get('command')
manager = commandmanager.CommandManager(self.arguments[0])
if command_pattern:
commands = [x for x in manager.commands
if fnmatch.fnmatch(x, command_pattern)]
else:
commands = manager.commands.keys()
if not commands:
msg = 'No commands found in the "{}" namespace'
if command_pattern:
msg += ' using the "{}" command name/pattern'
msg += ('. Are you sure this is correct and the application being '
'documented is installed?')
raise self.warning(msg.format(self.arguments[0], command_pattern))
return dict((name, self._load_command(manager, name))
for name in commands)
def _generate_app_node(self, app, application_name):
ignored_opts = self._get_ignored_opts()
parser = app.parser
self._drop_ignored_options(parser, ignored_opts)
parser.prog = application_name
source_name = '<{}>'.format(app.__class__.__name__)
result = statemachine.ViewList()
for line in _format_parser(parser):
result.append(line, source_name)
section = nodes.section()
self.state.nested_parse(result, 0, section)
# return [section.children]
return section.children
def _generate_nodes_per_command(self, title, command_name, command_class,
ignored_opts):
"""Generate the relevant Sphinx nodes.
This doesn't bother using raw docutils nodes as they simply don't offer
the power of directives, like Sphinx's 'option' directive. Instead, we
generate reStructuredText and parse this in a nested context (to obtain
correct header levels). Refer to [1] for more information.
[1] http://www.sphinx-doc.org/en/stable/extdev/markupapi.html
:param title: Title of command
:param command_name: Name of command, as used on the command line
:param command_class: Subclass of :py:class:`cliff.command.Command`
:param prefix: Prefix to apply before command, if any
:param ignored_opts: A list of options to exclude from output, if any
:returns: A list of nested docutil nodes
"""
command = command_class(None, None)
parser = command.get_parser(command_name)
ignored_opts = ignored_opts or []
self._drop_ignored_options(parser, ignored_opts)
section = nodes.section(
'',
nodes.title(text=title),
ids=[nodes.make_id(title)],
names=[nodes.fully_normalize_name(title)])
source_name = '<{}>'.format(command.__class__.__name__)
result = statemachine.ViewList()
for line in _format_parser(parser):
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section]
def _generate_command_nodes(self, commands, application_name):
ignored_opts = self._get_ignored_opts()
output = []
for command_name in sorted(commands):
command_class = commands[command_name]
title = command_name
if application_name:
command_name = ' '.join([application_name, command_name])
output.extend(self._generate_nodes_per_command(
title, command_name, command_class, ignored_opts))
return output
def run(self):
self.env = self.state.document.settings.env
application_name = (self.options.get('application')
or self.env.config.autoprogram_cliff_application)
app = self._load_app()
if app:
return self._generate_app_node(app, application_name)
commands = self._load_commands()
return self._generate_command_nodes(commands, application_name)
def setup(app):
app.add_directive('autoprogram-cliff', AutoprogramCliffDirective)
app.add_config_value('autoprogram_cliff_application', '', True)
app.add_config_value('autoprogram_cliff_ignored', ['--help'], True)
|
|
'''
Created on Jul 14, 2011
@author: sean
'''
from __future__ import print_function
from opcode import *
import _ast
import sys
from meta.utils import py3, py3op, py2op
from meta.asttools.visitors.print_visitor import print_ast, dump_ast
from meta.asttools import cmp_ast
from meta.decompiler.expression_mutator import ExpressionMutator
if py3:
class _ast_Print: pass
basestring = str
else:
_ast_Print = _ast.Print
def isNone(node):
if node is None:
return True
elif isinstance(node, _ast.Name) and (node.id == 'None') and isinstance(node.ctx, _ast.Load):
return True
return False
def BINARY_(OP):
def BINARY_OP(self, instr):
right = self.pop_ast_item()
left = self.pop_ast_item()
add = _ast.BinOp(left=left, right=right, op=OP(), lineno=instr.lineno, col_offset=0)
self.push_ast_item(add)
return BINARY_OP
def INPLACE_(OP):
def INPLACE_OP(self, instr):
right = self.pop_ast_item()
left = self.pop_ast_item()
left.ctx = _ast.Store()
aug_assign = _ast.AugAssign(target=left, op=OP(), value=right, lineno=instr.lineno, col_offset=0)
self.push_ast_item(aug_assign)
return INPLACE_OP
def UNARY_(OP):
def UNARY_OP(self, instr):
expr = self.pop_ast_item()
not_ = _ast.UnaryOp(op=OP(), operand=expr, lineno=instr.lineno, col_offset=0)
self.push_ast_item(not_)
return UNARY_OP
CMP_OPMAP = {'>=' :_ast.GtE,
'<=' :_ast.LtE,
'>' :_ast.Gt,
'<' :_ast.Lt,
'==': _ast.Eq,
'!=': _ast.NotEq,
'in': _ast.In,
'not in': _ast.NotIn,
'is':_ast.Is,
'is not':_ast.IsNot,
}
def make_const(arg, lineno=0, col_offset=0):
kw = {'lineno':lineno, 'col_offset':col_offset}
if isinstance(arg, basestring):
const = _ast.Str(s=arg, **kw)
elif isinstance(arg, (int, float, complex)):
const = _ast.Num(n=arg, **kw)
elif arg is None:
const = _ast.Name(id='None', ctx=_ast.Load(), **kw)
elif isinstance(arg, tuple):
elts = []
for item in arg:
elts.append(make_const(item, **kw))
const = _ast.Tuple(elts=elts, ctx=_ast.Load(), **kw)
else:
const = arg
return const
class SimpleInstructions(object):
def LOAD_CONST(self, instr):
const = make_const(instr.arg, lineno=instr.lineno, col_offset=0)
self.push_ast_item(const)
def LOAD_NAME(self, instr):
name = _ast.Name(id=instr.arg, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
self.push_ast_item(name)
def LOAD_DEREF(self, instr):
name = _ast.Name(id=instr.arg, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
self.push_ast_item(name)
def CALL_FUNCTION_VAR(self, instr):
arg = self.pop_ast_item()
self.CALL_FUNCTION(instr)
callfunc = self.pop_ast_item()
callfunc.starargs = arg
self.push_ast_item(callfunc)
def CALL_FUNCTION_KW(self, instr):
kwarg = self.pop_ast_item()
self.CALL_FUNCTION(instr)
callfunc = self.pop_ast_item()
callfunc.kwargs = kwarg
self.push_ast_item(callfunc)
def CALL_FUNCTION_VAR_KW(self, instr):
kwarg = self.pop_ast_item()
arg = self.pop_ast_item()
self.CALL_FUNCTION(instr)
callfunc = self.pop_ast_item()
callfunc.starargs = arg
callfunc.kwargs = kwarg
self.push_ast_item(callfunc)
def CALL_FUNCTION(self, instr):
nkwargs = instr.oparg >> 8
nargs = (~(nkwargs << 8)) & instr.oparg
args = []
keywords = []
for _ in range(nkwargs):
expr = self.pop_ast_item()
name = self.pop_ast_item()
keyword = _ast.keyword(arg=name.s, value=expr, lineno=instr.lineno)
keywords.insert(0, keyword)
for _ in range(nargs):
arg = self.pop_ast_item()
args.insert(0, arg)
if len(args) == 1 and isinstance(args[0], (_ast.FunctionDef, _ast.ClassDef)):
function = args[0]
if function.decorator_list is None:
function.decorator_list = []
node = self.pop_ast_item()
function.decorator_list.insert(0, node)
self.push_ast_item(function)
return
node = self.pop_ast_item()
callfunc = _ast.Call(func=node, args=args, keywords=keywords, starargs=None, kwargs=None,
lineno=instr.lineno, col_offset=0)
self.push_ast_item(callfunc)
def LOAD_FAST(self, instr):
name = _ast.Name(id=instr.arg, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
self.push_ast_item(name)
def LOAD_GLOBAL(self, instr):
name = _ast.Name(id=instr.arg, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
self.push_ast_item(name)
def STORE_FAST(self, instr):
self.STORE_NAME(instr)
def STORE_DEREF(self, instr):
self.STORE_NAME(instr)
def STORE_NAME(self, instr):
value = self.pop_ast_item()
value = self.process_ifexpr(value)
if isinstance(value, _ast.Import):
if value.from_:
assert isinstance(self._ast_stack[-1], _ast.ImportFrom)
from_ = self.pop_ast_item()
as_name = instr.arg
name = from_.names[0].name
if as_name != name:
from_.names[0].asname = as_name
self.push_ast_item(from_)
else:
as_name = instr.arg
if value.names[0].asname is None:
base_name = value.names[0].name.split('.')[0]
if base_name != as_name:
value.names[0].asname = as_name
self.push_ast_item(value)
elif isinstance(value, (_ast.Attribute)) and isinstance(value.value, (_ast.Import)):
asname = instr.arg
value = value.value
value.names[0].asname = asname
self.push_ast_item(value)
elif isinstance(value, (_ast.ClassDef, _ast.FunctionDef)):
as_name = instr.arg
value.name = as_name
self.push_ast_item(value)
elif isinstance(value, _ast.AugAssign):
self.push_ast_item(value)
elif isinstance(value, _ast.Assign):
_ = self.pop_ast_item()
assname = _ast.Name(instr.arg, _ast.Store(), lineno=instr.lineno, col_offset=0)
value.targets.append(assname)
self.push_ast_item(value)
else:
assname = _ast.Name(instr.arg, _ast.Store(), lineno=instr.lineno, col_offset=0)
assign = _ast.Assign(targets=[assname], value=value, lineno=instr.lineno, col_offset=0)
self.push_ast_item(assign)
@py3op
def STORE_LOCALS(self, instr):
'remove Locals from class def'
self.pop_ast_item()
def STORE_GLOBAL(self, instr):
if not isinstance(self._ast_stack[0], _ast.Global):
self._ast_stack.insert(0, _ast.Global(names=[]))
if instr.arg not in self._ast_stack[0].names:
self._ast_stack[0].names.append(instr.arg)
self.STORE_NAME(instr)
def RETURN_VALUE(self, instr):
value = self.pop_ast_item()
value = self.process_ifexpr(value)
ret = _ast.Return(value=value, lineno=instr.lineno, col_offset=0)
self.push_ast_item(ret)
def LOAD_ATTR(self, instr):
name = self.pop_ast_item()
attr = instr.arg
get_attr = _ast.Attribute(value=name, attr=attr, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
self.push_ast_item(get_attr)
def STORE_ATTR(self, instr):
attrname = instr.arg
node = self.pop_ast_item()
expr = self.pop_ast_item()
expr = self.process_ifexpr(expr)
assattr = _ast.Attribute(value=node, attr=attrname, ctx=_ast.Store(), lineno=instr.lineno, col_offset=0)
set_attr = _ast.Assign(targets=[assattr], value=expr, lineno=instr.lineno, col_offset=0)
self.push_ast_item(set_attr)
def IMPORT_NAME(self, instr):
from_ = self.pop_ast_item()
hmm = self.pop_ast_item()
names = [_ast.alias(name=instr.arg, asname=None)]
import_ = _ast.Import(names=names, lineno=instr.lineno, col_offset=0)
import_.from_ = not isNone(from_)
self.push_ast_item(import_)
def IMPORT_FROM(self, instr):
import_ = self.pop_ast_item()
names = [_ast.alias(instr.arg, None)]
modname = import_.names[0].name
from_ = _ast.ImportFrom(module=modname, names=names, level=0, lineno=instr.lineno, col_offset=0)
self.push_ast_item(from_)
self.push_ast_item(import_)
def IMPORT_STAR(self, instr):
import_ = self.pop_ast_item()
names = import_.names
alias = _ast.alias(name='*', asname=None)
from_ = _ast.ImportFrom(module=names[0].name, names=[alias], level=0, lineno=instr.lineno, col_offset=0)
self.push_ast_item(from_)
def process_ifexpr(self, node):
if node == 'LOAD_LOCALS': #Special directive
return node
return ExpressionMutator().visit(node)
def POP_TOP(self, instr):
node = self.pop_ast_item()
node = self.process_ifexpr(node)
if isinstance(node, _ast.Import):
return
if isinstance(node, _ast_Print):
_ = self.pop_ast_item()
self.push_ast_item(node)
return
discard = _ast.Expr(value=node, lineno=instr.lineno, col_offset=0)
self.push_ast_item(discard)
def ROT_TWO(self, instr):
one = self.pop_ast_item()
two = self.pop_ast_item()
if self.ilst[0].opname == 'STORE_NAME':
kw = dict(lineno=instr.lineno, col_offset=0)
stores = []
while self.ilst[0].opname == 'STORE_NAME':
stores.append(self.ilst.pop(0))
assert len(stores) <= 3, stores
elts_load = [one, two]
if len(stores) == 3:
elts_load.insert(0, self.pop_ast_item())
tup_load = _ast.Tuple(elts=elts_load[::-1], ctx=_ast.Load(), **kw)
elts_store = [_ast.Name(id=store.arg, ctx=_ast.Store(), **kw) for store in stores]
tup_store = _ast.Tuple(elts=elts_store, ctx=_ast.Store(), **kw)
assgn = _ast.Assign(value=tup_load, targets=[tup_store], **kw)
self.push_ast_item(assgn)
# self.push_ast_item(tup_store)
else:
self.push_ast_item(one)
self.push_ast_item(two)
BINARY_ADD = BINARY_(_ast.Add)
BINARY_SUBTRACT = BINARY_(_ast.Sub)
BINARY_DIVIDE = BINARY_(_ast.Div)
BINARY_TRUE_DIVIDE = BINARY_(_ast.Div)
BINARY_MULTIPLY = BINARY_(_ast.Mult)
BINARY_FLOOR_DIVIDE = BINARY_(_ast.FloorDiv)
BINARY_POWER = BINARY_(_ast.Pow)
BINARY_AND = BINARY_(_ast.BitAnd)
BINARY_OR = BINARY_(_ast.BitOr)
BINARY_XOR = BINARY_(_ast.BitXor)
BINARY_LSHIFT = BINARY_(_ast.LShift)
BINARY_RSHIFT = BINARY_(_ast.RShift)
BINARY_MODULO = BINARY_(_ast.Mod)
INPLACE_ADD = INPLACE_(_ast.Add)
INPLACE_SUBTRACT = INPLACE_(_ast.Sub)
INPLACE_DIVIDE = INPLACE_(_ast.Div)
INPLACE_FLOOR_DIVIDE = INPLACE_(_ast.FloorDiv)
INPLACE_MULTIPLY = INPLACE_(_ast.Mult)
INPLACE_AND = INPLACE_(_ast.BitAnd)
INPLACE_OR = INPLACE_(_ast.BitOr)
INPLACE_LSHIFT = INPLACE_(_ast.LShift)
INPLACE_RSHIFT = INPLACE_(_ast.RShift)
INPLACE_POWER = INPLACE_(_ast.Pow)
INPLACE_MODULO = INPLACE_(_ast.Mod)
INPLACE_XOR = INPLACE_(_ast.BitXor)
UNARY_NOT = UNARY_(_ast.Not)
UNARY_NEGATIVE = UNARY_(_ast.USub)
UNARY_INVERT = UNARY_(_ast.Invert)
UNARY_POSITIVE = UNARY_(_ast.UAdd)
def COMPARE_OP(self, instr):
op = instr.arg
right = self.pop_ast_item()
expr = self.pop_ast_item()
OP = CMP_OPMAP[op]
compare = _ast.Compare(left=expr, ops=[OP()], comparators=[right], lineno=instr.lineno, col_offset=0)
self.push_ast_item(compare)
def YIELD_VALUE(self, instr):
value = self.pop_ast_item()
yield_ = _ast.Yield(value=value, lineno=instr.lineno, col_offset=0)
self.push_ast_item(yield_)
self.seen_yield = True
def BUILD_LIST(self, instr):
nitems = instr.oparg
nodes = []
list_ = _ast.List(elts=nodes, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
for i in range(nitems):
nodes.insert(0, self.pop_ast_item())
self.push_ast_item(list_)
def BUILD_TUPLE(self, instr):
nitems = instr.oparg
nodes = []
list_ = _ast.Tuple(elts=nodes, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
for i in range(nitems):
nodes.insert(0, self.pop_ast_item())
if any([item == 'CLOSURE' for item in nodes]):
assert all([item == 'CLOSURE' for item in nodes])
return
self.push_ast_item(list_)
def BUILD_SET(self, instr):
nitems = instr.oparg
nodes = []
list_ = _ast.Set(elts=nodes, ctx=_ast.Load(), lineno=instr.lineno, col_offset=0)
for i in range(nitems):
nodes.insert(0, self.pop_ast_item())
self.push_ast_item(list_)
def BUILD_MAP(self, instr):
nitems = instr.oparg
keys = []
values = []
for i in range(nitems):
map_instrs = []
while 1:
new_instr = self.ilst.pop(0)
if new_instr.opname == 'STORE_MAP':
break
map_instrs.append(new_instr)
items = self.decompile_block(map_instrs).stmnt()
assert len(items) == 2
values.append(items[0])
keys.append(items[1])
list_ = _ast.Dict(keys=keys, values=values, lineno=instr.lineno, col_offset=0)
self.push_ast_item(list_)
def UNPACK_SEQUENCE(self, instr):
nargs = instr.oparg
nodes = []
ast_tuple = _ast.Tuple(elts=nodes, ctx=_ast.Store(), lineno=instr.lineno, col_offset=0)
for i in range(nargs):
nex_instr = self.ilst.pop(0)
self.push_ast_item(None)
self.visit(nex_instr)
node = self.pop_ast_item()
nodes.append(node.targets[0])
expr = self.pop_ast_item()
if isinstance(expr, _ast.Assign):
assgn = expr
assgn.targets.append(ast_tuple)
value_dup = self.pop_ast_item()
assert cmp_ast(assgn.value, value_dup)
else:
assgn = _ast.Assign(targets=[ast_tuple], value=expr, lineno=instr.lineno, col_offset=0)
self.push_ast_item(assgn)
def DELETE_NAME(self, instr):
name = _ast.Name(id=instr.arg, ctx=_ast.Del(), lineno=instr.lineno, col_offset=0)
delete = _ast.Delete(targets=[name], lineno=instr.lineno, col_offset=0)
self.push_ast_item(delete)
def DELETE_FAST(self, instr):
name = _ast.Name(id=instr.arg, ctx=_ast.Del(), lineno=instr.lineno, col_offset=0)
delete = _ast.Delete(targets=[name], lineno=instr.lineno, col_offset=0)
self.push_ast_item(delete)
def DELETE_ATTR(self, instr):
expr = self.pop_ast_item()
attr = _ast.Attribute(value=expr, attr=instr.arg, ctx=_ast.Del(), lineno=instr.lineno, col_offset=0)
delete = _ast.Delete(targets=[attr], lineno=instr.lineno, col_offset=0)
self.push_ast_item(delete)
def EXEC_STMT(self, instr):
locals_ = self.pop_ast_item()
globals_ = self.pop_ast_item()
expr = self.pop_ast_item()
if locals_ is globals_:
locals_ = None
if isinstance(globals_, _ast.Name) and getattr(globals_, 'id',) == 'None':
globals_ = None
exec_ = _ast.Exec(body=expr, globals=globals_, locals=locals_, lineno=instr.lineno, col_offset=0)
self.push_ast_item(exec_)
def DUP_TOP(self, instr):
expr = self.pop_ast_item()
self.push_ast_item(expr)
self.push_ast_item(expr)
@py3op
def DUP_TOP_TWO(self, instr):
expr1 = self.pop_ast_item()
expr2 = self.pop_ast_item()
self.push_ast_item(expr2)
self.push_ast_item(expr1)
self.push_ast_item(expr2)
self.push_ast_item(expr1)
def DUP_TOPX(self, instr):
exprs = []
for i in range(instr.oparg):
expr = self.pop_ast_item()
exprs.insert(0, expr)
self._ast_stack.extend(exprs)
self._ast_stack.extend(exprs)
def ROT_THREE(self, instr):
expr1 = self.pop_ast_item()
expr2 = self.pop_ast_item()
expr3 = self.pop_ast_item()
self.push_ast_item(expr1)
self.push_ast_item(expr3)
self.push_ast_item(expr2)
def ROT_FOUR(self, instr):
expr1 = self.pop_ast_item()
expr2 = self.pop_ast_item()
expr3 = self.pop_ast_item()
expr4 = self.pop_ast_item()
self.push_ast_item(expr1)
self.push_ast_item(expr4)
self.push_ast_item(expr3)
self.push_ast_item(expr2)
def PRINT_ITEM(self, instr):
item = self.pop_ast_item()
if self._ast_stack:
print_ = self._ast_stack[-1]
else:
print_ = None
if isinstance(print_, _ast_Print) and not print_.nl and print_.dest == None:
print_.values.append(item)
else:
print_ = _ast_Print(dest=None, values=[item], nl=False, lineno=instr.lineno, col_offset=0)
self.push_ast_item(print_)
def PRINT_NEWLINE(self, instr):
item = self._ast_stack[-1]
if isinstance(item, _ast_Print) and not item.nl and item.dest == None:
item.nl = True
else:
print_ = _ast_Print(dest=None, values=[], nl=True, lineno=instr.lineno, col_offset=0)
self.push_ast_item(print_)
def PRINT_ITEM_TO(self, instr):
stream = self.pop_ast_item()
print_ = None
if isinstance(stream, _ast_Print) and not stream.nl:
print_ = stream
stream = self.pop_ast_item()
dup_print = self.pop_ast_item()
assert dup_print is print_
self.push_ast_item(stream)
else:
print_ = _ast_Print(dest=stream, values=[], nl=False, lineno=instr.lineno, col_offset=0)
item = self.pop_ast_item()
print_.values.append(item)
self.push_ast_item(print_)
def PRINT_NEWLINE_TO(self, instr):
item = self.pop_ast_item()
stream = self.pop_ast_item()
self.push_ast_item(item)
if isinstance(item, _ast_Print) and not item.nl and item.dest is stream:
item.nl = True
else:
print_ = _ast_Print(dest=stream, values=[], nl=True, lineno=instr.lineno, col_offset=0)
self.push_ast_item(print_)
def format_slice(self, index, kw):
if isinstance(index, _ast.Tuple):
dims = []
have_slice = False
for dim in index.elts:
if not isinstance(dim, _ast.Slice):
dim = _ast.Index(value=dim, **kw)
else:
have_slice = True
dims.append(dim)
if have_slice:
index = _ast.ExtSlice(dims=dims, **kw)
else:
index = _ast.Index(value=index, **kw)
elif not isinstance(index, _ast.Slice):
index = _ast.Index(value=index, **kw)
return index
def BINARY_SUBSCR(self, instr):
index = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
index = self.format_slice(index, kw)
subscr = _ast.Subscript(value=value, slice=index, ctx=_ast.Load(), **kw)
self.push_ast_item(subscr)
def SLICE_0(self, instr):
'obj[:]'
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw)
self.push_ast_item(subscr)
def SLICE_1(self, instr):
'obj[lower:]'
lower = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw)
self.push_ast_item(subscr)
def SLICE_2(self, instr):
'obj[:stop]'
upper = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw)
self.push_ast_item(subscr)
def SLICE_3(self, instr):
'obj[lower:upper]'
upper = self.pop_ast_item()
lower = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Load(), **kw)
self.push_ast_item(subscr)
def BUILD_SLICE(self, instr):
step = None
upper = None
lower = None
if instr.oparg > 2:
step = self.pop_ast_item()
if instr.oparg > 1:
upper = self.pop_ast_item()
if instr.oparg > 0:
lower = self.pop_ast_item()
upper = None if isNone(upper) else upper
lower = None if isNone(lower) else lower
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=step, upper=upper, **kw)
self.push_ast_item(slice)
def STORE_SLICE_0(self, instr):
'obj[:] = expr'
value = self.pop_ast_item()
expr = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.push_ast_item(assign)
def STORE_SLICE_1(self, instr):
'obj[lower:] = expr'
lower = self.pop_ast_item()
value = self.pop_ast_item()
expr = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.push_ast_item(assign)
def STORE_SLICE_2(self, instr):
'obj[:upper] = expr'
upper = self.pop_ast_item()
value = self.pop_ast_item()
expr = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.push_ast_item(assign)
def STORE_SLICE_3(self, instr):
'obj[lower:upper] = expr'
upper = self.pop_ast_item()
lower = self.pop_ast_item()
value = self.pop_ast_item()
expr = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Store(), **kw)
if isinstance(expr, _ast.AugAssign):
assign = expr
result = cmp_ast(expr.target, subscr)
assert result
else:
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.push_ast_item(assign)
def DELETE_SLICE_0(self, instr):
'obj[:] = expr'
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.push_ast_item(delete)
def DELETE_SLICE_1(self, instr):
'obj[lower:] = expr'
lower = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=None, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.push_ast_item(delete)
def DELETE_SLICE_2(self, instr):
'obj[:upper] = expr'
upper = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=None, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.push_ast_item(delete)
def DELETE_SLICE_3(self, instr):
'obj[lower:upper] = expr'
upper = self.pop_ast_item()
lower = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
slice = _ast.Slice(lower=lower, step=None, upper=upper, **kw)
subscr = _ast.Subscript(value=value, slice=slice, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.push_ast_item(delete)
def STORE_SUBSCR(self, instr):
index = self.pop_ast_item()
value = self.pop_ast_item()
expr = self.pop_ast_item()
expr = self.process_ifexpr(expr)
if isinstance(expr, _ast.AugAssign):
self.push_ast_item(expr)
else:
kw = dict(lineno=instr.lineno, col_offset=0)
index = self.format_slice(index, kw)
subscr = _ast.Subscript(value=value, slice=index, ctx=_ast.Store(), **kw)
assign = _ast.Assign(targets=[subscr], value=expr, **kw)
self.push_ast_item(assign)
def DELETE_SUBSCR(self, instr):
index = self.pop_ast_item()
value = self.pop_ast_item()
kw = dict(lineno=instr.lineno, col_offset=0)
index = self.format_slice(index, kw)
subscr = _ast.Subscript(value=value, slice=index, ctx=_ast.Del(), **kw)
delete = _ast.Delete(targets=[subscr], **kw)
self.push_ast_item(delete)
@py2op
def RAISE_VARARGS(self, instr):
nargs = instr.oparg
tback = None
inst = None
type = None
if nargs > 2:
tback = self.pop_ast_item()
if nargs > 1:
inst = self.pop_ast_item()
if nargs > 0:
type = self.pop_ast_item()
raise_ = _ast.Raise(tback=tback, inst=inst, type=type,
lineno=instr.lineno, col_offset=0)
self.push_ast_item(raise_)
@RAISE_VARARGS.py3op
def RAISE_VARARGS(self, instr):
nargs = instr.oparg
cause = None
exc = None
if nargs > 1:
cause = self.pop_ast_item()
if nargs > 0:
exc = self.pop_ast_item()
raise_ = _ast.Raise(exc=exc, cause=cause,
lineno=instr.lineno, col_offset=0)
self.push_ast_item(raise_)
@py3op
def EXTENDED_ARG(self, instr):
code = self.pop_ast_item()
argument_names = self.pop_ast_item()
assert len(argument_names.elts) == (instr.oparg - 1)
args = []
kw = dict(lineno=instr.lineno, col_offset=0)
for argument_name in argument_names.elts[::-1]:
annotation = self.pop_ast_item()
arg = _ast.arg(annotation=annotation, arg=argument_name.s, **kw) #@UndefinedVariable
args.append(arg)
for arg in args:
self.push_ast_item(arg)
self.push_ast_item(code)
@EXTENDED_ARG.py2op
def EXTENDED_ARG(self, instr):
raise Exception("This is not available in python 2.x")
|
|
# stdlib
from collections import defaultdict
from Queue import Empty, Queue
import threading
import time
# project
from checks import AgentCheck
from checks.libs.thread_pool import Pool
from config import _is_affirmative
TIMEOUT = 180
DEFAULT_SIZE_POOL = 6
MAX_LOOP_ITERATIONS = 1000
FAILURE = "FAILURE"
class Status:
DOWN = "DOWN"
WARNING = "WARNING"
CRITICAL = "CRITICAL"
UP = "UP"
class EventType:
DOWN = "servicecheck.state_change.down"
UP = "servicecheck.state_change.up"
class NetworkCheck(AgentCheck):
SOURCE_TYPE_NAME = 'servicecheck'
SERVICE_CHECK_PREFIX = 'network_check'
STATUS_TO_SERVICE_CHECK = {
Status.UP : AgentCheck.OK,
Status.WARNING : AgentCheck.WARNING,
Status.CRITICAL : AgentCheck.CRITICAL,
Status.DOWN : AgentCheck.CRITICAL,
}
"""
Services checks inherits from this class.
This class should never be directly instanciated.
Work flow:
The main agent loop will call the check function for each instance for
each iteration of the loop.
The check method will make an asynchronous call to the _process method in
one of the thread initiated in the thread pool created in this class constructor.
The _process method will call the _check method of the inherited class
which will perform the actual check.
The _check method must return a tuple which first element is either
Status.UP or Status.DOWN.
The second element is a short error message that will be displayed
when the service turns down.
"""
def __init__(self, name, init_config, agentConfig, instances):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# A dictionary to keep track of service statuses
self.statuses = {}
self.notified = {}
self.nb_failures = 0
self.pool_started = False
# Make sure every instance has a name that we use as a unique key
# to keep track of statuses
names = []
for inst in instances:
if 'name' not in inst:
raise Exception("All instances should have a 'name' parameter,"
" error on instance: {0}".format(inst))
if inst['name'] in names:
raise Exception("Duplicate names for instances with name {0}"
.format(inst['name']))
def stop(self):
self.stop_pool()
self.pool_started = False
def start_pool(self):
# The pool size should be the minimum between the number of instances
# and the DEFAULT_SIZE_POOL. It can also be overridden by the 'threads_count'
# parameter in the init_config of the check
self.log.info("Starting Thread Pool")
default_size = min(self.instance_count(), DEFAULT_SIZE_POOL)
self.pool_size = int(self.init_config.get('threads_count', default_size))
self.pool = Pool(self.pool_size)
self.resultsq = Queue()
self.jobs_status = {}
self.jobs_results = {}
self.pool_started = True
def stop_pool(self):
self.log.info("Stopping Thread Pool")
if self.pool_started:
self.pool.terminate()
self.pool.join()
self.jobs_status.clear()
assert self.pool.get_nworkers() == 0
def restart_pool(self):
self.stop_pool()
self.start_pool()
def check(self, instance):
if not self.pool_started:
self.start_pool()
if threading.activeCount() > 5 * self.pool_size + 5: # On Windows the agent runs on multiple threads so we need to have an offset of 5 in case the pool_size is 1
raise Exception("Thread number (%s) is exploding. Skipping this check" % threading.activeCount())
self._process_results()
self._clean()
name = instance.get('name', None)
if name is None:
self.log.error('Each service check must have a name')
return
if name not in self.jobs_status:
# A given instance should be processed one at a time
self.jobs_status[name] = time.time()
self.jobs_results[name] = self.pool.apply_async(self._process, args=(instance,))
else:
self.log.error("Instance: %s skipped because it's already running." % name)
def _process(self, instance):
try:
statuses = self._check(instance)
if isinstance(statuses, tuple):
# Assume the check only returns one service check
status, msg = statuses
self.resultsq.put((status, msg, None, instance))
elif isinstance(statuses, list):
for status in statuses:
sc_name, status, msg = status
self.resultsq.put((status, msg, sc_name, instance))
except Exception:
result = (FAILURE, FAILURE, FAILURE, FAILURE)
self.resultsq.put(result)
def _process_results(self):
for i in xrange(MAX_LOOP_ITERATIONS):
try:
# We want to fetch the result in a non blocking way
status, msg, sc_name, instance = self.resultsq.get_nowait()
except Empty:
break
if status == FAILURE:
self.nb_failures += 1
if self.nb_failures >= self.pool_size - 1:
self.nb_failures = 0
self.restart_pool()
continue
self.report_as_service_check(sc_name, status, instance, msg)
# FIXME: 5.3, this has been deprecated before, get rid of events
# Don't create any event to avoid duplicates with server side
# service_checks
skip_event = _is_affirmative(instance.get('skip_event', False))
instance_name = instance['name']
if not skip_event:
self.warning("Using events for service checks is deprecated in favor of monitors and will be removed in future versions of the Datadog Agent.")
event = None
if instance_name not in self.statuses:
self.statuses[instance_name] = defaultdict(list)
self.statuses[instance_name][sc_name].append(status)
window = int(instance.get('window', 1))
if window > 256:
self.log.warning("Maximum window size (256) exceeded, defaulting it to 256")
window = 256
threshold = instance.get('threshold', 1)
if len(self.statuses[instance_name][sc_name]) > window:
self.statuses[instance_name][sc_name].pop(0)
nb_failures = self.statuses[instance_name][sc_name].count(Status.DOWN)
if nb_failures >= threshold:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.DOWN:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.DOWN
else:
if self.notified.get((instance_name, sc_name), Status.UP) != Status.UP:
event = self._create_status_event(sc_name, status, msg, instance)
self.notified[(instance_name, sc_name)] = Status.UP
if event is not None:
self.events.append(event)
# The job is finished here, this instance can be re processed
if instance_name in self.jobs_status:
del self.jobs_status[instance_name]
# if an exception happened, log it
if instance_name in self.jobs_results:
ret = self.jobs_results[instance_name].get()
if isinstance(ret, Exception):
self.log.exception("Exception in worker thread: {0}".format(ret))
del self.jobs_results[instance_name]
def _check(self, instance):
"""This function should be implemented by inherited classes"""
raise NotImplementedError
def _clean(self):
now = time.time()
for name, start_time in self.jobs_status.iteritems():
if now - start_time > TIMEOUT:
self.log.critical("Restarting Pool. One check is stuck: %s" % name)
self.restart_pool()
break
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""URL downloading API.
Methods defined in this module:
Fetch(): fetchs a given URL using an HTTP request using on of the methods
GET, POST, HEAD, PUT, DELETE or PATCH request
"""
import httplib
import os
import StringIO
import threading
import UserDict
import urllib2
import urlparse
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import urlfetch_service_pb
from google.appengine.api.urlfetch_errors import *
from google.appengine.runtime import apiproxy_errors
MAX_REDIRECTS = 5
GET = 1
POST = 2
HEAD = 3
PUT = 4
DELETE = 5
PATCH = 6
_URL_STRING_MAP = {
'GET': GET,
'POST': POST,
'HEAD': HEAD,
'PUT': PUT,
'DELETE': DELETE,
'PATCH': PATCH,
}
_VALID_METHODS = frozenset(_URL_STRING_MAP.values())
_thread_local_settings = threading.local()
class _CaselessDict(UserDict.IterableUserDict):
"""Case insensitive dictionary.
This class was lifted from os.py and slightly modified.
"""
def __init__(self, dict=None, **kwargs):
self.caseless_keys = {}
UserDict.IterableUserDict.__init__(self, dict, **kwargs)
def __setitem__(self, key, item):
"""Set dictionary item.
Args:
key: Key of new item. Key is case insensitive, so "d['Key'] = value "
will replace previous values set by "d['key'] = old_value".
item: Item to store.
"""
caseless_key = key.lower()
if caseless_key in self.caseless_keys:
del self.data[self.caseless_keys[caseless_key]]
self.caseless_keys[caseless_key] = key
self.data[key] = item
def __getitem__(self, key):
"""Get dictionary item.
Args:
key: Key of item to get. Key is case insensitive, so "d['Key']" is the
same as "d['key']".
Returns:
Item associated with key.
"""
return self.data[self.caseless_keys[key.lower()]]
def __delitem__(self, key):
"""Remove item from dictionary.
Args:
key: Key of item to remove. Key is case insensitive, so "del d['Key']" is
the same as "del d['key']"
"""
caseless_key = key.lower()
del self.data[self.caseless_keys[caseless_key]]
del self.caseless_keys[caseless_key]
def has_key(self, key):
"""Determine if dictionary has item with specific key.
Args:
key: Key to check for presence. Key is case insensitive, so
"d.has_key('Key')" evaluates to the same value as "d.has_key('key')".
Returns:
True if dictionary contains key, else False.
"""
return key.lower() in self.caseless_keys
def __contains__(self, key):
"""Same as 'has_key', but used for 'in' operator.'"""
return self.has_key(key)
def get(self, key, failobj=None):
"""Get dictionary item, defaulting to another value if it does not exist.
Args:
key: Key of item to get. Key is case insensitive, so "d['Key']" is the
same as "d['key']".
failobj: Value to return if key not in dictionary.
"""
try:
cased_key = self.caseless_keys[key.lower()]
except KeyError:
return failobj
return self.data[cased_key]
def update(self, dict=None, **kwargs):
"""Update dictionary using values from another dictionary and keywords.
Args:
dict: Dictionary to update from.
kwargs: Keyword arguments to update from.
"""
if dict:
try:
keys = dict.keys()
except AttributeError:
for k, v in dict:
self[k] = v
else:
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
"""Make a shallow, case sensitive copy of self."""
return dict(self)
def _is_fetching_self(url, method):
"""Checks if the fetch is for the same URL from which it originated.
Args:
url: str, The URL being fetched.
method: value from _VALID_METHODS.
Returns:
boolean indicating whether or not it seems that the app is trying to fetch
itself.
"""
if (method != GET or
"HTTP_HOST" not in os.environ or
"PATH_INFO" not in os.environ):
return False
_, host_port, path, _, _ = urlparse.urlsplit(url)
if host_port == os.environ['HTTP_HOST']:
current_path = urllib2.unquote(os.environ['PATH_INFO'])
desired_path = urllib2.unquote(path)
if (current_path == desired_path or
(current_path in ('', '/') and desired_path in ('', '/'))):
return True
return False
def create_rpc(deadline=None, callback=None):
"""Creates an RPC object for use with the urlfetch API.
Args:
deadline: Optional deadline in seconds for the operation; the default
is a system-specific deadline (typically 5 seconds).
callback: Optional callable to invoke on completion.
Returns:
An apiproxy_stub_map.UserRPC object specialized for this service.
"""
if deadline is None:
deadline = get_default_fetch_deadline()
return apiproxy_stub_map.UserRPC('urlfetch', deadline, callback)
def fetch(url, payload=None, method=GET, headers={},
allow_truncated=False, follow_redirects=True,
deadline=None, validate_certificate=None):
"""Fetches the given HTTP URL, blocking until the result is returned.
Other optional parameters are:
method: The constants GET, POST, HEAD, PUT, DELETE, or PATCH or the
same HTTP methods as strings.
payload: POST, PUT, or PATCH payload (implies method is not GET, HEAD,
or DELETE). this is ignored if the method is not POST, PUT, or PATCH.
headers: dictionary of HTTP headers to send with the request
allow_truncated: if true, truncate large responses and return them without
error. Otherwise, ResponseTooLargeError is raised when a response is
truncated.
follow_redirects: if true (the default), redirects are
transparently followed and the response (if less than 5
redirects) contains the final destination's payload and the
response status is 200. You lose, however, the redirect chain
information. If false, you see the HTTP response yourself,
including the 'Location' header, and redirects are not
followed.
deadline: deadline in seconds for the operation.
validate_certificate: if true, do not send request to server unless the
certificate is valid, signed by a trusted CA and the hostname matches
the certificate. A value of None indicates that the behaviour will be
chosen by the underlying urlfetch implementation.
We use a HTTP/1.1 compliant proxy to fetch the result.
The returned data structure has the following fields:
content: string containing the response from the server
status_code: HTTP status code returned by the server
headers: dictionary of headers returned by the server
If the URL is an empty string or obviously invalid, we throw an
urlfetch.InvalidURLError. If the server cannot be contacted, we throw a
urlfetch.DownloadError. Note that HTTP errors are returned as a part
of the returned structure, so HTTP errors like 404 do not result in an
exception.
"""
rpc = create_rpc(deadline=deadline)
make_fetch_call(rpc, url, payload, method, headers,
allow_truncated, follow_redirects, validate_certificate)
return rpc.get_result()
def make_fetch_call(rpc, url, payload=None, method=GET, headers={},
allow_truncated=False, follow_redirects=True,
validate_certificate=None):
"""Executes the RPC call to fetch a given HTTP URL.
The first argument is a UserRPC instance. See urlfetch.fetch for a
thorough description of remaining arguments.
Raises:
InvalidMethodError: if requested method is not in _VALID_METHODS
ResponseTooLargeError: if the response payload is too large
InvalidURLError: if there are issues with the content/size of the
requested URL
Returns:
The rpc object passed into the function.
"""
assert rpc.service == 'urlfetch', repr(rpc.service)
if isinstance(method, basestring):
method = method.upper()
method = _URL_STRING_MAP.get(method, method)
if method not in _VALID_METHODS:
raise InvalidMethodError('Invalid method %s.' % str(method))
if _is_fetching_self(url, method):
raise InvalidURLError("App cannot fetch the same URL as the one used for "
"the request.")
request = urlfetch_service_pb.URLFetchRequest()
response = urlfetch_service_pb.URLFetchResponse()
if isinstance(url, unicode):
url = url.encode('UTF-8')
request.set_url(url)
if method == GET:
request.set_method(urlfetch_service_pb.URLFetchRequest.GET)
elif method == POST:
request.set_method(urlfetch_service_pb.URLFetchRequest.POST)
elif method == HEAD:
request.set_method(urlfetch_service_pb.URLFetchRequest.HEAD)
elif method == PUT:
request.set_method(urlfetch_service_pb.URLFetchRequest.PUT)
elif method == DELETE:
request.set_method(urlfetch_service_pb.URLFetchRequest.DELETE)
elif method == PATCH:
request.set_method(urlfetch_service_pb.URLFetchRequest.PATCH)
if payload and method in (POST, PUT, PATCH):
request.set_payload(payload)
for key, value in headers.iteritems():
header_proto = request.add_header()
header_proto.set_key(key)
header_proto.set_value(str(value))
request.set_followredirects(follow_redirects)
if validate_certificate is not None:
request.set_mustvalidateservercertificate(validate_certificate)
if rpc.deadline is not None:
request.set_deadline(rpc.deadline)
rpc.make_call('Fetch', request, response, _get_fetch_result, allow_truncated)
return rpc
def _get_fetch_result(rpc):
"""Check success, handle exceptions, and return converted RPC result.
This method waits for the RPC if it has not yet finished, and calls the
post-call hooks on the first invocation.
Args:
rpc: A UserRPC object.
Raises:
InvalidURLError: if the url was invalid.
DownloadError: if there was a problem fetching the url.
ResponseTooLargeError: if the response was either truncated (and
allow_truncated=False was passed to make_fetch_call()), or if it
was too big for us to download.
Returns:
A _URLFetchResult object.
"""
assert rpc.service == 'urlfetch', repr(rpc.service)
assert rpc.method == 'Fetch', repr(rpc.method)
url = rpc.request.url()
try:
rpc.check_success()
except apiproxy_errors.RequestTooLargeError, err:
raise InvalidURLError(
'Request body too large fetching URL: ' + url)
except apiproxy_errors.ApplicationError, err:
error_detail = ''
if err.error_detail:
error_detail = ' Error: ' + err.error_detail
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.INVALID_URL):
raise InvalidURLError(
'Invalid request URL: ' + url + error_detail)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.CLOSED):
raise ConnectionClosedError(
'Connection closed unexpectedly by server at URL: ' + url)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.TOO_MANY_REDIRECTS):
raise TooManyRedirectsError(
'Too many redirects at URL: ' + url + ' with redirect=true')
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.MALFORMED_REPLY):
raise MalformedReplyError(
'Malformed HTTP reply received from server at URL: '
+ url + error_detail)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.INTERNAL_TRANSIENT_ERROR):
raise InternalTransientError(
'Temporary error in fetching URL: ' + url + ', please re-try')
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.DNS_ERROR):
raise DNSLookupFailedError('DNS lookup failed for URL: ' + url)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.UNSPECIFIED_ERROR):
raise DownloadError('Unspecified error in fetching URL: '
+ url + error_detail)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR):
raise DownloadError("Unable to fetch URL: " + url + error_detail)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.RESPONSE_TOO_LARGE):
raise ResponseTooLargeError('HTTP response too large from URL: ' + url)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.DEADLINE_EXCEEDED):
raise DeadlineExceededError(
'Deadline exceeded while waiting for HTTP response from URL: ' + url)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.SSL_CERTIFICATE_ERROR):
raise SSLCertificateError(
'Invalid and/or missing SSL certificate for URL: ' + url)
if (err.application_error ==
urlfetch_service_pb.URLFetchServiceError.CONNECTION_ERROR):
raise DownloadError('Unable to connect to server at URL: ' + url)
raise err
response = rpc.response
allow_truncated = rpc.user_data
result = _URLFetchResult(response)
if response.contentwastruncated() and not allow_truncated:
raise ResponseTooLargeError(result)
return result
Fetch = fetch
class _URLFetchResult(object):
"""A Pythonic representation of our fetch response protocol buffer.
"""
def __init__(self, response_proto):
"""Constructor.
Args:
response_proto: the URLFetchResponse proto buffer to wrap.
"""
self.__pb = response_proto
self.content = response_proto.content()
self.status_code = response_proto.statuscode()
self.content_was_truncated = response_proto.contentwastruncated()
self.final_url = response_proto.finalurl() or None
self.header_msg = httplib.HTTPMessage(
StringIO.StringIO(''.join(['%s: %s\n' % (h.key(), h.value())
for h in response_proto.header_list()] + ['\n'])))
self.headers = _CaselessDict(self.header_msg.items())
def get_default_fetch_deadline():
"""Get the default value for create_rpc()'s deadline parameter."""
return getattr(_thread_local_settings, "default_fetch_deadline", None)
def set_default_fetch_deadline(value):
"""Set the default value for create_rpc()'s deadline parameter.
This setting is thread-specific (i.e. it's stored in a thread local).
This function doesn't do any range or type checking of the value. The
default is None.
See also: create_rpc(), fetch()
"""
_thread_local_settings.default_fetch_deadline = value
|
|
"""
Low-level BLAS functions (:mod:`scipy.linalg.blas`)
===================================================
This module contains low-level functions from the BLAS library.
.. versionadded:: 0.12.0
.. note::
The common ``overwrite_<>`` option in many routines, allows the
input arrays to be overwritten to avoid extra memory allocation.
However this requires the array to satisfy two conditions
which are memory order and the data type to match exactly the
order and the type expected by the routine.
As an example, if you pass a double precision float array to any
``S....`` routine which expects single precision arguments, f2py
will create an intermediate array to match the argument types and
overwriting will be performed on that intermediate array.
Similarly, if a C-contiguous array is passed, f2py will pass a
FORTRAN-contiguous array internally. Please make sure that these
details are satisfied. More information can be found in the f2py
documentation.
.. warning::
These functions do little to no error checking.
It is possible to cause crashes by mis-using them,
so prefer using the higher-level routines in `scipy.linalg`.
Finding functions
-----------------
.. autosummary::
:toctree: generated/
get_blas_funcs
find_best_blas_type
BLAS Level 1 functions
----------------------
.. autosummary::
:toctree: generated/
caxpy
ccopy
cdotc
cdotu
crotg
cscal
csrot
csscal
cswap
dasum
daxpy
dcopy
ddot
dnrm2
drot
drotg
drotm
drotmg
dscal
dswap
dzasum
dznrm2
icamax
idamax
isamax
izamax
sasum
saxpy
scasum
scnrm2
scopy
sdot
snrm2
srot
srotg
srotm
srotmg
sscal
sswap
zaxpy
zcopy
zdotc
zdotu
zdrot
zdscal
zrotg
zscal
zswap
BLAS Level 2 functions
----------------------
.. autosummary::
:toctree: generated/
sgbmv
sgemv
sger
ssbmv
sspr
sspr2
ssymv
ssyr
ssyr2
stbmv
stpsv
strmv
strsv
dgbmv
dgemv
dger
dsbmv
dspr
dspr2
dsymv
dsyr
dsyr2
dtbmv
dtpsv
dtrmv
dtrsv
cgbmv
cgemv
cgerc
cgeru
chbmv
chemv
cher
cher2
chpmv
chpr
chpr2
ctbmv
ctbsv
ctpmv
ctpsv
ctrmv
ctrsv
csyr
zgbmv
zgemv
zgerc
zgeru
zhbmv
zhemv
zher
zher2
zhpmv
zhpr
zhpr2
ztbmv
ztbsv
ztpmv
ztrmv
ztrsv
zsyr
BLAS Level 3 functions
----------------------
.. autosummary::
:toctree: generated/
sgemm
ssymm
ssyr2k
ssyrk
strmm
strsm
dgemm
dsymm
dsyr2k
dsyrk
dtrmm
dtrsm
cgemm
chemm
cher2k
cherk
csymm
csyr2k
csyrk
ctrmm
ctrsm
zgemm
zhemm
zher2k
zherk
zsymm
zsyr2k
zsyrk
ztrmm
ztrsm
"""
#
# Author: Pearu Peterson, March 2002
# refactoring by Fabian Pedregosa, March 2010
#
__all__ = ['get_blas_funcs', 'find_best_blas_type']
import numpy as _np
import functools
from scipy.linalg import _fblas
try:
from scipy.linalg import _cblas
except ImportError:
_cblas = None
# Expose all functions (only fblas --- cblas is an implementation detail)
empty_module = None
from scipy.linalg._fblas import *
del empty_module
# all numeric dtypes '?bBhHiIlLqQefdgFDGO' that are safe to be converted to
# single precision float : '?bBhH!!!!!!ef!!!!!!'
# double precision float : '?bBhHiIlLqQefdg!!!!'
# single precision complex : '?bBhH!!!!!!ef!!F!!!'
# double precision complex : '?bBhHiIlLqQefdgFDG!'
_type_score = {x: 1 for x in '?bBhHef'}
_type_score.update({x: 2 for x in 'iIlLqQd'})
# Handle float128(g) and complex256(G) separately in case non-Windows systems.
# On Windows, the values will be rewritten to the same key with the same value.
_type_score.update({'F': 3, 'D': 4, 'g': 2, 'G': 4})
# Final mapping to the actual prefixes and dtypes
_type_conv = {1: ('s', _np.dtype('float32')),
2: ('d', _np.dtype('float64')),
3: ('c', _np.dtype('complex64')),
4: ('z', _np.dtype('complex128'))}
# some convenience alias for complex functions
_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2',
'cdot': 'cdotc', 'zdot': 'zdotc',
'cger': 'cgerc', 'zger': 'zgerc',
'sdotc': 'sdot', 'sdotu': 'sdot',
'ddotc': 'ddot', 'ddotu': 'ddot'}
def find_best_blas_type(arrays=(), dtype=None):
"""Find best-matching BLAS/LAPACK type.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
prefix : str
BLAS/LAPACK prefix character.
dtype : dtype
Inferred Numpy data type.
prefer_fortran : bool
Whether to prefer Fortran order routines over C order.
Examples
--------
>>> import scipy.linalg.blas as bla
>>> a = np.random.rand(10,15)
>>> b = np.asfortranarray(a) # Change the memory layout order
>>> bla.find_best_blas_type((a,))
('d', dtype('float64'), False)
>>> bla.find_best_blas_type((a*1j,))
('z', dtype('complex128'), False)
>>> bla.find_best_blas_type((b,))
('d', dtype('float64'), True)
"""
dtype = _np.dtype(dtype)
max_score = _type_score.get(dtype.char, 5)
prefer_fortran = False
if arrays:
# In most cases, single element is passed through, quicker route
if len(arrays) == 1:
max_score = _type_score.get(arrays[0].dtype.char, 5)
prefer_fortran = arrays[0].flags['FORTRAN']
else:
# use the most generic type in arrays
scores = [_type_score.get(x.dtype.char, 5) for x in arrays]
max_score = max(scores)
ind_max_score = scores.index(max_score)
# safe upcasting for mix of float64 and complex64 --> prefix 'z'
if max_score == 3 and (2 in scores):
max_score = 4
if arrays[ind_max_score].flags['FORTRAN']:
# prefer Fortran for leading array with column major order
prefer_fortran = True
# Get the LAPACK prefix and the corresponding dtype if not fall back
# to 'd' and double precision float.
prefix, dtype = _type_conv.get(max_score, ('d', _np.dtype('float64')))
return prefix, dtype, prefer_fortran
def _get_funcs(names, arrays, dtype,
lib_name, fmodule, cmodule,
fmodule_name, cmodule_name, alias):
"""
Return available BLAS/LAPACK functions.
Used also in lapack.py. See get_blas_funcs for docstring.
"""
funcs = []
unpack = False
dtype = _np.dtype(dtype)
module1 = (cmodule, cmodule_name)
module2 = (fmodule, fmodule_name)
if isinstance(names, str):
names = (names,)
unpack = True
prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype)
if prefer_fortran:
module1, module2 = module2, module1
for name in names:
func_name = prefix + name
func_name = alias.get(func_name, func_name)
func = getattr(module1[0], func_name, None)
module_name = module1[1]
if func is None:
func = getattr(module2[0], func_name, None)
module_name = module2[1]
if func is None:
raise ValueError(
'%s function %s could not be found' % (lib_name, func_name))
func.module_name, func.typecode = module_name, prefix
func.dtype = dtype
func.prefix = prefix # Backward compatibility
funcs.append(func)
if unpack:
return funcs[0]
else:
return funcs
def _memoize_get_funcs(func):
"""
Memoized fast path for _get_funcs instances
"""
memo = {}
func.memo = memo
@functools.wraps(func)
def getter(names, arrays=(), dtype=None):
key = (names, dtype)
for array in arrays:
# cf. find_blas_funcs
key += (array.dtype.char, array.flags.fortran)
try:
value = memo.get(key)
except TypeError:
# unhashable key etc.
key = None
value = None
if value is not None:
return value
value = func(names, arrays, dtype)
if key is not None:
memo[key] = value
return value
return getter
@_memoize_get_funcs
def get_blas_funcs(names, arrays=(), dtype=None):
"""Return available BLAS function objects from names.
Arrays are used to determine the optimal prefix of BLAS routines.
Parameters
----------
names : str or sequence of str
Name(s) of BLAS functions without type prefix.
arrays : sequence of ndarrays, optional
Arrays can be given to determine optimal prefix of BLAS
routines. If not given, double-precision routines will be
used, otherwise the most generic type in arrays will be used.
dtype : str or dtype, optional
Data-type specifier. Not used if `arrays` is non-empty.
Returns
-------
funcs : list
List containing the found function(s).
Notes
-----
This routine automatically chooses between Fortran/C
interfaces. Fortran code is used whenever possible for arrays with
column major order. In all other cases, C code is preferred.
In BLAS, the naming convention is that all functions start with a
type prefix, which depends on the type of the principal
matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy
types {float32, float64, complex64, complex128} respectively.
The code and the dtype are stored in attributes `typecode` and `dtype`
of the returned functions.
Examples
--------
>>> import scipy.linalg as LA
>>> a = np.random.rand(3,2)
>>> x_gemv = LA.get_blas_funcs('gemv', (a,))
>>> x_gemv.typecode
'd'
>>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,))
>>> x_gemv.typecode
'z'
"""
return _get_funcs(names, arrays, dtype,
"BLAS", _fblas, _cblas, "fblas", "cblas",
_blas_alias)
|
|
"""The tests for MQTT device triggers."""
import json
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.mqtt import DOMAIN, debug_info
from homeassistant.components.mqtt.device_trigger import async_attach_trigger
from homeassistant.helpers import device_registry as dr
from homeassistant.setup import async_setup_component
from tests.common import (
assert_lists_same,
async_fire_mqtt_message,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test we get the expected triggers from a discovered mqtt device."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla",
"type": "button_short_press",
"subtype": "button_1",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_get_unknown_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test we don't get unknown triggers."""
# Discover a sensor (without device triggers)
data1 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, [])
async def test_get_non_existing_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test getting non existing triggers."""
# Discover a sensor (without device triggers)
data1 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, [])
@pytest.mark.no_fail_on_log_exception
async def test_discover_bad_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test bad discovery message."""
# Test sending bad data
data0 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payloads": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data0)
await hass.async_block_till_done()
assert device_reg.async_get_device({("mqtt", "0AFFD2")}) is None
# Test sending correct data
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla",
"type": "button_short_press",
"subtype": "button_1",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_update_remove_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test triggers can be updated and removed."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
expected_triggers1 = [
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla",
"type": "button_short_press",
"subtype": "button_1",
},
]
expected_triggers2 = [dict(expected_triggers1[0])]
expected_triggers2[0]["subtype"] = "button_2"
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers1)
# Update trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data2)
await hass.async_block_till_done()
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers2)
# Remove trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", "")
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert device_entry is None
async def test_if_fires_on_mqtt_message(hass, device_reg, calls, mqtt_mock):
"""Test triggers firing."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "long_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_long_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla2",
"type": "button_1",
"subtype": "button_long_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "long_press")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "long_press"
async def test_if_fires_on_mqtt_message_template(hass, device_reg, calls, mqtt_mock):
"""Test triggers firing."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
" \"payload\": \"{{ 'foo_press'|regex_replace('foo', 'short') }}\","
' "topic": "foobar/triggers/button{{ sqrt(16)|round }}",'
' "type": "button_short_press",'
' "subtype": "button_1",'
' "value_template": "{{ value_json.button }}"}'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
" \"payload\": \"{{ 'foo_press'|regex_replace('foo', 'long') }}\","
' "topic": "foobar/triggers/button{{ sqrt(16)|round }}",'
' "type": "button_long_press",'
' "subtype": "button_2",'
' "value_template": "{{ value_json.button }}"}'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla2",
"type": "button_1",
"subtype": "button_long_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button4", '{"button":"short_press"}')
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(hass, "foobar/triggers/button4", '{"button":"long_press"}')
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "long_press"
async def test_if_fires_on_mqtt_message_late_discover(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers firing of MQTT device triggers discovered after setup."""
data0 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "long_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_long_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla0/config", data0)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla2",
"type": "button_1",
"subtype": "button_long_press",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("long_press")},
},
},
]
},
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "short_press"
# Fake long press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "long_press")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "long_press"
async def test_if_fires_on_mqtt_message_after_update(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers firing after update."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
data2 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/buttonOne",'
' "type": "button_long_press",'
' "subtype": "button_2" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "")
await hass.async_block_till_done()
assert len(calls) == 1
# Update the trigger with different topic
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data2)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "")
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_mqtt_message(hass, "foobar/triggers/buttonOne", "")
await hass.async_block_till_done()
assert len(calls) == 2
# Update the trigger with same topic
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data2)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "")
await hass.async_block_till_done()
assert len(calls) == 2
async_fire_mqtt_message(hass, "foobar/triggers/buttonOne", "")
await hass.async_block_till_done()
assert len(calls) == 3
async def test_no_resubscribe_same_topic(hass, device_reg, mqtt_mock):
"""Test subscription to topics without change."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
call_count = mqtt_mock.async_subscribe.call_count
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
assert mqtt_mock.async_subscribe.call_count == call_count
async def test_not_fires_on_mqtt_message_after_remove_by_mqtt(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers not firing after removal."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
# Remove the trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
# Rediscover the trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 2
async def test_not_fires_on_mqtt_message_after_remove_from_registry(
hass, device_reg, calls, mqtt_mock
):
"""Test triggers not firing after removal."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
"action": {
"service": "test.automation",
"data_template": {"some": ("short_press")},
},
},
]
},
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
# Remove the device
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove(hass, device_reg, mqtt_mock):
"""Test attach and removal of trigger."""
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
calls = []
def callback(trigger):
calls.append(trigger["trigger"]["payload"])
remove = await async_attach_trigger(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
callback,
None,
)
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "short_press"
# Remove the trigger
remove()
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove_late(hass, device_reg, mqtt_mock):
"""Test attach and removal of trigger ."""
data0 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla0/config", data0)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
calls = []
def callback(trigger):
calls.append(trigger["trigger"]["payload"])
remove = await async_attach_trigger(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
callback,
None,
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
# Fake short press.
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0] == "short_press"
# Remove the trigger
remove()
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_attach_remove_late2(hass, device_reg, mqtt_mock):
"""Test attach and removal of trigger ."""
data0 = (
'{ "device":{"identifiers":["0AFFD2"]},'
' "state_topic": "foobar/sensor",'
' "unique_id": "unique" }'
)
data1 = (
'{ "automation_type":"trigger",'
' "device":{"identifiers":["0AFFD2"]},'
' "payload": "short_press",'
' "topic": "foobar/triggers/button1",'
' "type": "button_short_press",'
' "subtype": "button_1" }'
)
async_fire_mqtt_message(hass, "homeassistant/sensor/bla0/config", data0)
await hass.async_block_till_done()
device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")})
calls = []
def callback(trigger):
calls.append(trigger["trigger"]["payload"])
remove = await async_attach_trigger(
hass,
{
"platform": "device",
"domain": DOMAIN,
"device_id": device_entry.id,
"discovery_id": "bla1",
"type": "button_short_press",
"subtype": "button_1",
},
callback,
None,
)
# Remove the trigger
remove()
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
# Verify the triggers are no longer active
async_fire_mqtt_message(hass, "foobar/triggers/button1", "short_press")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT device registry integration."""
registry = dr.async_get(hass)
data = json.dumps(
{
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
assert device.connections == {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT device registry integration."""
registry = dr.async_get(hass)
data = json.dumps(
{
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"identifiers": ["helloworld"],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
registry = dr.async_get(hass)
config = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"identifiers": ["helloworld"],
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")})
assert device is not None
assert device.name == "Milk"
async def test_cleanup_trigger(hass, device_reg, entity_reg, mqtt_mock):
"""Test trigger discovery topic is cleaned when device is removed from registry."""
config = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers[0]["type"] == "foo"
device_reg.async_remove_device(device_entry.id)
await hass.async_block_till_done()
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
# Verify retained discovery topic has been cleared
mqtt_mock.async_publish.assert_called_once_with(
"homeassistant/device_automation/bla/config", "", 0, True
)
async def test_cleanup_device(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry when trigger is removed."""
config = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
data = json.dumps(config)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", data)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert triggers[0]["type"] == "foo"
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_cleanup_device_several_triggers(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry when the last trigger is removed."""
config1 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo2",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data2)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 2
assert triggers[0]["type"] == "foo"
assert triggers[1]["type"] == "foo2"
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 1
assert triggers[0]["type"] == "foo2"
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_cleanup_device_with_entity1(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry for device with entity.
Trigger removed first, then entity.
"""
config1 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"name": "test_binary_sensor",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
"unique_id": "veryunique",
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", data2)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 3 # 2 binary_sensor triggers + device trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 2 # 2 binary_sensor triggers
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_cleanup_device_with_entity2(hass, device_reg, entity_reg, mqtt_mock):
"""Test removal from device registry for device with entity.
Entity removed first, then trigger.
"""
config1 = {
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {"identifiers": ["helloworld"]},
}
config2 = {
"name": "test_binary_sensor",
"state_topic": "test-topic",
"device": {"identifiers": ["helloworld"]},
"unique_id": "veryunique",
}
data1 = json.dumps(config1)
data2 = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data1)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", data2)
await hass.async_block_till_done()
# Verify device registry entry is created
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 3 # 2 binary_sensor triggers + device trigger
async_fire_mqtt_message(hass, "homeassistant/binary_sensor/bla2/config", "")
await hass.async_block_till_done()
# Verify device registry entry is not cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is not None
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert len(triggers) == 1 # device trigger
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
# Verify device registry entry is cleared
device_entry = device_reg.async_get_device({("mqtt", "helloworld")})
assert device_entry is None
async def test_trigger_debug_info(hass, mqtt_mock):
"""Test debug_info.
This is a test helper for MQTT debug_info.
"""
registry = dr.async_get(hass)
config1 = {
"platform": "mqtt",
"automation_type": "trigger",
"topic": "test-topic",
"type": "foo",
"subtype": "bar",
"device": {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
}
config2 = {
"platform": "mqtt",
"automation_type": "trigger",
"topic": "test-topic2",
"type": "foo",
"subtype": "bar",
"device": {
"connections": [[dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12"]],
},
}
data = json.dumps(config1)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", data)
data = json.dumps(config2)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla2/config", data)
await hass.async_block_till_done()
device = registry.async_get_device(
set(), {(dr.CONNECTION_NETWORK_MAC, "02:5b:26:a8:dc:12")}
)
assert device is not None
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 2
topic_map = {
"homeassistant/device_automation/bla1/config": config1,
"homeassistant/device_automation/bla2/config": config2,
}
assert (
topic_map[debug_info_data["triggers"][0]["discovery_data"]["topic"]]
!= topic_map[debug_info_data["triggers"][1]["discovery_data"]["topic"]]
)
assert (
debug_info_data["triggers"][0]["discovery_data"]["payload"]
== topic_map[debug_info_data["triggers"][0]["discovery_data"]["topic"]]
)
assert (
debug_info_data["triggers"][1]["discovery_data"]["payload"]
== topic_map[debug_info_data["triggers"][1]["discovery_data"]["topic"]]
)
async_fire_mqtt_message(hass, "homeassistant/device_automation/bla1/config", "")
await hass.async_block_till_done()
debug_info_data = await debug_info.info_for_device(hass, device.id)
assert len(debug_info_data["entities"]) == 0
assert len(debug_info_data["triggers"]) == 1
assert (
debug_info_data["triggers"][0]["discovery_data"]["topic"]
== "homeassistant/device_automation/bla2/config"
)
assert debug_info_data["triggers"][0]["discovery_data"]["payload"] == config2
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from webob import exc
from senlin.api.common import util
from senlin.api.middleware import fault
from senlin.api.openstack.v1 import cluster_policies as cp_mod
from senlin.common import exception as senlin_exc
from senlin.common import policy
from senlin.rpc import client as rpc_client
from senlin.tests.unit.api import shared
from senlin.tests.unit.common import base
@mock.patch.object(policy, 'enforce')
class ClusterPolicyControllerTest(shared.ControllerTest, base.SenlinTestCase):
"""Tests the API class which acts as the WSGI controller."""
def setUp(self):
super(ClusterPolicyControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8777
cfgopts = DummyConfig()
self.controller = cp_mod.ClusterPolicyController(options=cfgopts)
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_index(self, mock_call, mock_parse, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'test_cluster'
req = self._get('/cluster_policies/%s' % cid)
engine_resp = [
{
'id': 'fake_id',
'cluster_id': 'fake cluster id',
'policy_id': 'fake policy id',
'enabled': True,
'data': {},
'cluster_name': 'test_cluster',
'policy_name': 'test_policy',
'policy_type': 'ScalingPolicy',
}
]
mock_call.return_value = engine_resp
obj = mock.Mock()
mock_parse.return_value = obj
result = self.controller.index(req, cluster_id=cid)
self.assertEqual(engine_resp, result['cluster_policies'])
mock_parse.assert_called_once_with(
'ClusterPolicyListRequest', req, mock.ANY)
mock_call.assert_called_once_with(
req.context, 'cluster_policy_list', obj)
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_index_with_params(self, mock_call, mock_parse,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'FAKE_CLUSTER'
params = {
'sort': 'enabled',
'enabled': 'True',
}
req = self._get('/cluster_policies/%s' % cid, params=params)
mock_call.return_value = []
obj = mock.Mock()
mock_parse.return_value = obj
result = self.controller.index(req, cluster_id=cid)
self.assertEqual([], result['cluster_policies'])
mock_parse.assert_called_once_with(
'ClusterPolicyListRequest', req,
{
'sort': 'enabled',
'enabled': True,
'identity': 'FAKE_CLUSTER'
})
mock_call.assert_called_once_with(
req.context, 'cluster_policy_list', obj)
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_index_invalid_params(self, mock_call,
mock_parse,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'FAKE_CLUSTER'
params = {
'enabled': 'True',
'balrog': 'you shall not pass!'
}
req = self._get('/cluster_policies/%s' % cid, params=params)
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index,
req, cluster_id=cid)
self.assertEqual('Invalid parameter balrog',
str(ex))
self.assertEqual(0, mock_parse.call_count)
self.assertEqual(0, mock_call.call_count)
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_index_invalid_sort(self, mock_call,
mock_parse, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
cid = 'FAKE_CLUSTER'
params = {
'enabled': 'True',
'sort': 'bad sort'
}
req = self._get('/cluster_policies/%s' % cid, params=params)
mock_parse.side_effect = exc.HTTPBadRequest("bad sort")
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index,
req, cluster_id=cid)
self.assertEqual("bad sort", str(ex))
mock_parse.assert_called_once_with(
'ClusterPolicyListRequest', req, mock.ANY)
self.assertEqual(0, mock_call.call_count)
def test_cluster_policy_index_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
cid = 'FAKE_CLUSTER'
req = self._get('/cluster_policy/%s' % cid)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, cluster_id=cid)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_get_success(self, mock_call,
mock_parse, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
cid = 'FAKE_CLUSTER'
pid = 'FAKE_POLICY'
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
engine_resp = {
'id': 'fake_id',
'cluster_id': cid,
'policy_id': pid,
'enabled': True,
'data': {},
'cluster_name': 'test_cluster',
'policy_name': 'test_policy',
'policy_type': 'ScalingPolicy',
}
obj = mock.Mock()
mock_parse.return_value = obj
mock_call.return_value = engine_resp
response = self.controller.get(req, cluster_id=cid, policy_id=pid)
self.assertEqual(engine_resp, response['cluster_policy'])
mock_parse.assert_called_once_with(
'ClusterPolicyGetRequest', req,
{
'identity': cid,
'policy_id': pid
})
mock_call.assert_called_once_with(
req.context, 'cluster_policy_get', obj)
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_cluster_policy_get_not_found(self, mock_call,
mock_parse, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
cid = 'FAKE_CLUSTER'
pid = 'FAKE_POLICY'
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
error = senlin_exc.PolicyBindingNotFound(policy=pid, identity=cid)
mock_call.side_effect = shared.to_remote_error(error)
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.get,
req, cluster_id=cid,
policy_id=pid)
self.assertEqual(404, resp.json['code'])
self.assertEqual('PolicyBindingNotFound', resp.json['error']['type'])
mock_parse.assert_called_once_with(
'ClusterPolicyGetRequest', mock.ANY,
{
'identity': 'FAKE_CLUSTER',
'policy_id': 'FAKE_POLICY'
})
mock_call.assert_called_once_with(
req.context, 'cluster_policy_get', mock.ANY)
def test_action_get_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', False)
cid = 'FAKE_CLUSTER'
pid = 'FAKE_POLICY'
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
resp = shared.request_with_middleware(fault.FaultWrapper,
self.controller.get,
req, cluster_id=cid,
policy_id=pid)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', str(resp))
@mock.patch.object(util, 'parse_request')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_action_get_bad_params(self, mock_call, mock_parse,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'get', True)
cid = 'FAKE_CLUSTER'
pid = ['Fake']
req = self._get('/cluster_policies/%(cid)s/%(pid)s'
'' % {'cid': cid, 'pid': pid})
mock_parse.side_effect = exc.HTTPBadRequest("bad param")
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.get,
req, cluster_id=cid, policy_id=pid)
self.assertEqual("bad param", str(ex))
mock_parse.assert_called_once_with(
'ClusterPolicyGetRequest', req,
{
'identity': cid,
'policy_id': pid
})
self.assertEqual(0, mock_call.call_count)
|
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test of Policy Engine For Nova."""
import os.path
import mock
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
import requests_mock
from nova import context
from nova import exception
from nova.policies import servers as servers_policy
from nova import policy
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import fake_policy
from nova import utils
class PolicyFileTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyFileTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.target = {}
def test_modified_policy_reloads(self):
with utils.tempdir() as tmpdir:
tmpfilename = os.path.join(tmpdir, 'policy')
self.flags(policy_file=tmpfilename, group='oslo_policy')
# NOTE(uni): context construction invokes policy check to determine
# is_admin or not. As a side-effect, policy reset is needed here
# to flush existing policy cache.
policy.reset()
# NOTE(gmann): We do not need to log policy warnings for unit
# tests.
policy.init(suppress_deprecation_warnings=True)
rule = oslo_policy.RuleDefault('example:test', "")
policy._ENFORCER.register_defaults([rule])
action = "example:test"
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": ""}')
policy.authorize(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
policyfile.write('{"example:test": "!"}')
policy._ENFORCER.load_rules(True)
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class PolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
rules = [
oslo_policy.RuleDefault("true", '@'),
oslo_policy.RuleDefault("example:allowed", '@'),
oslo_policy.RuleDefault("example:denied", "!"),
oslo_policy.RuleDefault("old_action_not_default", "@"),
oslo_policy.RuleDefault("new_action", "@"),
oslo_policy.RuleDefault("old_action_default", "rule:admin_api"),
oslo_policy.RuleDefault("example:get_http",
"http://www.example.com"),
oslo_policy.RuleDefault("example:my_file",
"role:compute_admin or "
"project_id:%(project_id)s"),
oslo_policy.RuleDefault("example:early_and_fail", "! and @"),
oslo_policy.RuleDefault("example:early_or_success", "@ or !"),
oslo_policy.RuleDefault("example:lowercase_admin",
"role:admin or role:sysadmin"),
oslo_policy.RuleDefault("example:uppercase_admin",
"role:ADMIN or role:sysadmin"),
]
policy.reset()
# NOTE(gmann): We do not need to log policy warnings for unit
# tests.
policy.init(suppress_deprecation_warnings=True)
# before a policy rule can be used, its default has to be registered.
policy._ENFORCER.register_defaults(rules)
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.target = {}
def test_authorize_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(oslo_policy.PolicyNotRegistered, policy.authorize,
self.context, action, self.target)
def test_authorize_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
def test_authorize_bad_action_noraise(self):
action = "example:denied"
result = policy.authorize(self.context, action, self.target, False)
self.assertFalse(result)
def test_authorize_good_action(self):
action = "example:allowed"
result = policy.authorize(self.context, action, self.target)
self.assertTrue(result)
@requests_mock.mock()
def test_authorize_http_true(self, req_mock):
req_mock.post('http://www.example.com/',
text='True')
action = "example:get_http"
target = {}
result = policy.authorize(self.context, action, target)
self.assertTrue(result)
@requests_mock.mock()
def test_authorize_http_false(self, req_mock):
req_mock.post('http://www.example.com/',
text='False')
action = "example:get_http"
target = {}
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, target)
def test_templatized_authorization(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
action = "example:my_file"
policy.authorize(self.context, action, target_mine)
# check we fallback to context.project_id
# TODO(johngarbutt): longer term we need to remove this and make
# the target a required param.
policy.authorize(self.context, action)
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, target_not_mine)
def test_early_AND_authorization(self):
action = "example:early_and_fail"
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
def test_early_OR_authorization(self):
action = "example:early_or_success"
policy.authorize(self.context, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince) we mix case in the Admin role here to ensure
# case is ignored
admin_context = context.RequestContext('admin',
'fake',
roles=['AdMiN'])
policy.authorize(admin_context, lowercase_action, self.target)
policy.authorize(admin_context, uppercase_action, self.target)
@mock.patch.object(policy.LOG, 'warning')
def test_warning_when_deprecated_user_based_rule_used(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:servers:index",
"project_id:%(project_id)s or user_id:%(user_id)s")])
mock_warning.assert_called_once_with(
u"The user_id attribute isn't supported in the rule "
"'%s'. All the user_id based policy enforcement will be removed "
"in the future.", "os_compute_api:servers:index")
@mock.patch.object(policy.LOG, 'warning')
def test_no_warning_for_user_based_resource(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:os-keypairs:index",
"user_id:%(user_id)s")])
mock_warning.assert_not_called()
@mock.patch.object(policy.LOG, 'warning')
def test_no_warning_for_no_user_based_rule(self, mock_warning):
policy._warning_for_deprecated_user_based_rules(
[("os_compute_api:servers:index",
"project_id:%(project_id)s")])
mock_warning.assert_not_called()
@requests_mock.mock()
def test_authorize_raise_invalid_scope(self, req_mock):
req_mock.post('http://www.example.com/',
text='False')
action = "example:get_http"
target = {}
with mock.patch('oslo_policy.policy.Enforcer.authorize') as auth_mock:
auth_mock.side_effect = oslo_policy.InvalidScope(
action, self.context.system_scope, 'invalid_scope')
exc = self.assertRaises(exception.PolicyNotAuthorized,
policy.authorize, self.context,
action, target)
self.assertEqual(
"Policy doesn't allow %s to be performed." % action,
exc.format_message())
@mock.patch.object(policy.LOG, 'warning')
def test_verify_deprecated_policy_using_old_action(self, mock_warning):
old_policy = "old_action_not_default"
new_policy = "new_action"
default_rule = "rule:admin_api"
using_old_action = policy.verify_deprecated_policy(
old_policy, new_policy, default_rule, self.context)
mock_warning.assert_called_once_with("Start using the new "
"action '%(new_policy)s'. The existing action '%(old_policy)s' "
"is being deprecated and will be removed in future release.",
{'new_policy': new_policy, 'old_policy': old_policy})
self.assertTrue(using_old_action)
def test_verify_deprecated_policy_using_new_action(self):
old_policy = "old_action_default"
new_policy = "new_action"
default_rule = "rule:admin_api"
using_old_action = policy.verify_deprecated_policy(
old_policy, new_policy, default_rule, self.context)
self.assertFalse(using_old_action)
class IsAdminCheckTestCase(test.NoDBTestCase):
def setUp(self):
super(IsAdminCheckTestCase, self).setUp()
# NOTE(gmann): We do not need to log policy warnings for unit
# tests.
policy.init(suppress_deprecation_warnings=True)
def test_init_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'True')
self.assertTrue(check.expected)
def test_init_false(self):
check = policy.IsAdminCheck('is_admin', 'nottrue')
self.assertEqual(check.kind, 'is_admin')
self.assertEqual(check.match, 'False')
self.assertFalse(check.expected)
def test_call_true(self):
check = policy.IsAdminCheck('is_admin', 'True')
self.assertTrue(check('target', dict(is_admin=True),
policy._ENFORCER))
self.assertFalse(check('target', dict(is_admin=False),
policy._ENFORCER))
def test_call_false(self):
check = policy.IsAdminCheck('is_admin', 'False')
self.assertFalse(check('target', dict(is_admin=True),
policy._ENFORCER))
self.assertTrue(check('target', dict(is_admin=False),
policy._ENFORCER))
def test_check_is_admin(self):
ctxt = context.RequestContext(
user_id='fake-user', project_id='fake-project')
with mock.patch('oslo_policy.policy.Enforcer.authorize') as mock_auth:
result = policy.check_is_admin(ctxt)
self.assertEqual(mock_auth.return_value, result)
mock_auth.assert_called_once_with(
'context_is_admin',
{'user_id': 'fake-user', 'project_id': 'fake-project'},
ctxt)
class AdminRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(AdminRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RoleBasedPolicyFixture())
self.context = context.RequestContext('fake', 'fake', roles=['member'])
self.actions = policy.get_rules().keys()
self.target = {}
def test_authorize_admin_actions_with_nonadmin_context_throws(self):
"""Check if non-admin context passed to admin actions throws
Policy not authorized exception
"""
for action in self.actions:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.context, action, self.target)
class RealRolePolicyTestCase(test.NoDBTestCase):
def setUp(self):
super(RealRolePolicyTestCase, self).setUp()
self.policy = self.useFixture(nova_fixtures.RealPolicyFixture())
self.non_admin_context = context.RequestContext('fake', 'fake',
roles=['member'])
self.admin_context = context.RequestContext('fake', 'fake', True,
roles=['admin', 'member'])
self.target = {}
self.fake_policy = jsonutils.loads(fake_policy.policy_data)
self.admin_only_rules = (
"compute:aggregates:images",
"compute:server:topology:host:index",
"network:attach_external_network",
"os_compute_api:servers:create:forced_host",
"compute:servers:create:requested_destination",
"os_compute_api:servers:detail:get_all_tenants",
"os_compute_api:servers:index:get_all_tenants",
"os_compute_api:servers:allow_all_filters",
"os_compute_api:servers:show:host_status",
"os_compute_api:servers:show:host_status:unknown-only",
"os_compute_api:servers:migrations:force_complete",
"os_compute_api:servers:migrations:delete",
"os_compute_api:os-admin-actions:inject_network_info",
"os_compute_api:os-admin-actions:reset_state",
"os_compute_api:os-aggregates:index",
"os_compute_api:os-aggregates:create",
"os_compute_api:os-aggregates:show",
"os_compute_api:os-aggregates:update",
"os_compute_api:os-aggregates:delete",
"os_compute_api:os-aggregates:add_host",
"os_compute_api:os-aggregates:remove_host",
"os_compute_api:os-aggregates:set_metadata",
"os_compute_api:os-evacuate",
"os_compute_api:os-extended-server-attributes",
"os_compute_api:os-flavor-access:remove_tenant_access",
"os_compute_api:os-flavor-access:add_tenant_access",
"os_compute_api:os-flavor-extra-specs:create",
"os_compute_api:os-flavor-extra-specs:update",
"os_compute_api:os-flavor-extra-specs:delete",
"os_compute_api:os-flavor-manage:create",
"os_compute_api:os-flavor-manage:update",
"os_compute_api:os-flavor-manage:delete",
"os_compute_api:os-hosts:update",
"os_compute_api:os-hosts:reboot",
"os_compute_api:os-hosts:shutdown",
"os_compute_api:os-hosts:start",
"os_compute_api:os-instance-actions:events",
"os_compute_api:os-lock-server:unlock:unlock_override",
"os_compute_api:os-migrate-server:migrate",
"os_compute_api:os-migrate-server:migrate_live",
"os_compute_api:os-quota-sets:update",
"os_compute_api:os-quota-sets:delete",
"os_compute_api:os-server-diagnostics",
"os_compute_api:os-server-groups:index:all_projects",
"os_compute_api:os-services:update",
"os_compute_api:os-services:delete",
"os_compute_api:os-shelve:shelve_offload",
"os_compute_api:os-availability-zone:detail",
"os_compute_api:os-assisted-volume-snapshots:create",
"os_compute_api:os-assisted-volume-snapshots:delete",
"os_compute_api:os-console-auth-tokens",
"os_compute_api:os-quota-class-sets:update",
"os_compute_api:os-server-external-events:create",
"os_compute_api:os-volumes-attachments:swap",
"os_compute_api:servers:create:zero_disk_flavor",
"os_compute_api:os-baremetal-nodes:list",
"os_compute_api:os-baremetal-nodes:show",
"os_compute_api:servers:migrations:index",
"os_compute_api:servers:migrations:show",
"os_compute_api:os-simple-tenant-usage:list",
"os_compute_api:os-migrations:index",
"os_compute_api:os-services:list",
"os_compute_api:os-instance-actions:events:details",
"os_compute_api:os-instance-usage-audit-log:list",
"os_compute_api:os-instance-usage-audit-log:show",
"os_compute_api:os-hosts:list",
"os_compute_api:os-hosts:show",
"os_compute_api:os-hypervisors:list",
"os_compute_api:os-hypervisors:list-detail",
"os_compute_api:os-hypervisors:show",
"os_compute_api:os-hypervisors:statistics",
"os_compute_api:os-hypervisors:uptime",
"os_compute_api:os-hypervisors:search",
"os_compute_api:os-hypervisors:servers",
"os_compute_api:limits:other_project",
)
self.admin_or_owner_rules = (
"compute:server:topology:index",
"os_compute_api:servers:start",
"os_compute_api:servers:stop",
"os_compute_api:servers:trigger_crash_dump",
"os_compute_api:os-create-backup",
"os_compute_api:ips:index",
"os_compute_api:ips:show",
"os_compute_api:os-keypairs:create",
"os_compute_api:os-keypairs:delete",
"os_compute_api:os-keypairs:index",
"os_compute_api:os-keypairs:show",
"os_compute_api:os-lock-server:lock",
"os_compute_api:os-lock-server:unlock",
"os_compute_api:os-pause-server:pause",
"os_compute_api:os-pause-server:unpause",
"os_compute_api:os-quota-sets:show",
"os_compute_api:os-quota-sets:detail",
"os_compute_api:server-metadata:index",
"os_compute_api:server-metadata:show",
"os_compute_api:server-metadata:delete",
"os_compute_api:server-metadata:create",
"os_compute_api:server-metadata:update",
"os_compute_api:server-metadata:update_all",
"os_compute_api:os-suspend-server:suspend",
"os_compute_api:os-suspend-server:resume",
"os_compute_api:servers:confirm_resize",
"os_compute_api:servers:create",
"os_compute_api:servers:create:attach_network",
"os_compute_api:servers:create:attach_volume",
"os_compute_api:servers:create:trusted_certs",
"os_compute_api:servers:create_image",
"os_compute_api:servers:delete",
"os_compute_api:servers:detail",
"os_compute_api:servers:index",
"os_compute_api:servers:reboot",
"os_compute_api:servers:rebuild",
"os_compute_api:servers:rebuild:trusted_certs",
"os_compute_api:servers:resize",
"os_compute_api:servers:revert_resize",
"os_compute_api:servers:show",
"os_compute_api:servers:show:flavor-extra-specs",
"os_compute_api:servers:update",
"os_compute_api:servers:create_image:allow_volume_backed",
"os_compute_api:os-admin-password",
"os_compute_api:os-attach-interfaces:create",
"os_compute_api:os-attach-interfaces:delete",
"os_compute_api:os-console-output",
"os_compute_api:os-remote-consoles",
"os_compute_api:os-deferred-delete:restore",
"os_compute_api:os-deferred-delete:force",
"os_compute_api:os-flavor-access",
"os_compute_api:os-flavor-extra-specs:index",
"os_compute_api:os-flavor-extra-specs:show",
"os_compute_api:os-floating-ips:add",
"os_compute_api:os-floating-ips:remove",
"os_compute_api:os-floating-ips:create",
"os_compute_api:os-floating-ips:delete",
"os_compute_api:os-multinic:add",
"os_compute_api:os-multinic:remove",
"os_compute_api:os-rescue",
"os_compute_api:os-unrescue",
"os_compute_api:os-security-groups:create",
"os_compute_api:os-security-groups:update",
"os_compute_api:os-security-groups:delete",
"os_compute_api:os-security-groups:rule:create",
"os_compute_api:os-security-groups:rule:delete",
"os_compute_api:os-security-groups:add",
"os_compute_api:os-security-groups:remove",
"os_compute_api:os-server-password:clear",
"os_compute_api:os-server-tags:delete",
"os_compute_api:os-server-tags:delete_all",
"os_compute_api:os-server-tags:update",
"os_compute_api:os-server-tags:update_all",
"os_compute_api:os-server-groups:index",
"os_compute_api:os-server-groups:show",
"os_compute_api:os-server-groups:create",
"os_compute_api:os-server-groups:delete",
"os_compute_api:os-shelve:shelve",
"os_compute_api:os-shelve:unshelve",
"os_compute_api:os-volumes:create",
"os_compute_api:os-volumes:delete",
"os_compute_api:os-volumes:snapshots:create",
"os_compute_api:os-volumes:snapshots:delete",
"os_compute_api:os-volumes-attachments:create",
"os_compute_api:os-volumes-attachments:delete",
"os_compute_api:os-volumes-attachments:update",
"os_compute_api:os-simple-tenant-usage:show",
"os_compute_api:os-security-groups:get",
"os_compute_api:os-security-groups:show",
"os_compute_api:os-security-groups:list",
"os_compute_api:os-volumes-attachments:index",
"os_compute_api:os-volumes-attachments:show",
"os_compute_api:os-attach-interfaces:list",
"os_compute_api:os-attach-interfaces:show",
"os_compute_api:os-instance-actions:list",
"os_compute_api:os-instance-actions:show",
"os_compute_api:os-server-password:show",
"os_compute_api:os-server-tags:index",
"os_compute_api:os-server-tags:show",
"os_compute_api:os-floating-ips:list",
"os_compute_api:os-floating-ips:show",
"os_compute_api:os-volumes:list",
"os_compute_api:os-volumes:detail",
"os_compute_api:os-volumes:show",
"os_compute_api:os-volumes:snapshots:show",
"os_compute_api:os-volumes:snapshots:list",
"os_compute_api:os-volumes:snapshots:detail",
"os_compute_api:os-networks:list",
"os_compute_api:os-networks:show",
"os_compute_api:os-tenant-networks:list",
"os_compute_api:os-tenant-networks:show",
)
self.allow_all_rules = (
"os_compute_api:os-quota-sets:defaults",
"os_compute_api:os-availability-zone:list",
"os_compute_api:limits",
"os_compute_api:extensions",
"os_compute_api:os-floating-ip-pools",
)
self.allow_nobody_rules = (
servers_policy.CROSS_CELL_RESIZE,
)
def test_all_rules_in_sample_file(self):
special_rules = ["context_is_admin", "admin_or_owner", "default"]
for (name, rule) in self.fake_policy.items():
if name in special_rules:
continue
self.assertIn(name, policy.get_rules())
def test_admin_only_rules(self):
for rule in self.admin_only_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
policy.authorize(self.admin_context, rule, self.target)
def test_admin_or_owner_rules(self):
for rule in self.admin_or_owner_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.non_admin_context, rule, self.target)
policy.authorize(self.non_admin_context, rule,
{'project_id': 'fake', 'user_id': 'fake'})
def test_allow_all_rules(self):
for rule in self.allow_all_rules:
policy.authorize(self.non_admin_context, rule, self.target)
def test_allow_nobody_rules(self):
"""No one can perform these operations, not even admin."""
for rule in self.allow_nobody_rules:
self.assertRaises(exception.PolicyNotAuthorized, policy.authorize,
self.admin_context, rule, self.target)
def test_rule_missing(self):
rules = policy.get_rules()
# eliqiao os_compute_api:os-quota-class-sets:show requires
# admin=True or quota_class match, this rule won't belong to
# admin_only, non_admin, admin_or_user, empty_rule
special_rules = ('admin_api', 'admin_or_owner', 'context_is_admin',
'os_compute_api:os-quota-class-sets:show',
'project_admin_api', 'project_member_api',
'project_reader_api', 'project_reader_or_admin')
result = set(rules.keys()) - set(self.admin_only_rules +
self.admin_or_owner_rules +
self.allow_all_rules +
self.allow_nobody_rules + special_rules)
self.assertEqual(set([]), result)
|
|
"""The Synology DSM component."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
import async_timeout
from synology_dsm import SynologyDSM
from synology_dsm.api.core.security import SynoCoreSecurity
from synology_dsm.api.core.system import SynoCoreSystem
from synology_dsm.api.core.upgrade import SynoCoreUpgrade
from synology_dsm.api.core.utilization import SynoCoreUtilization
from synology_dsm.api.dsm.information import SynoDSMInformation
from synology_dsm.api.dsm.network import SynoDSMNetwork
from synology_dsm.api.storage.storage import SynoStorage
from synology_dsm.api.surveillance_station import SynoSurveillanceStation
from synology_dsm.exceptions import (
SynologyDSMAPIErrorException,
SynologyDSMLoginFailedException,
SynologyDSMRequestException,
)
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import ServiceCall, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
CONF_DEVICE_TOKEN,
CONF_SERIAL,
CONF_VOLUMES,
COORDINATOR_CAMERAS,
COORDINATOR_CENTRAL,
COORDINATOR_SWITCHES,
DEFAULT_SCAN_INTERVAL,
DEFAULT_USE_SSL,
DEFAULT_VERIFY_SSL,
DOMAIN,
ENTITY_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
PLATFORMS,
SERVICE_REBOOT,
SERVICE_SHUTDOWN,
SERVICES,
STORAGE_DISK_BINARY_SENSORS,
STORAGE_DISK_SENSORS,
STORAGE_VOL_SENSORS,
SYNO_API,
SYSTEM_LOADED,
UNDO_UPDATE_LISTENER,
UTILISATION_SENSORS,
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_USE_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [CONFIG_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
ATTRIBUTION = "Data provided by Synology"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up Synology DSM sensors from legacy config file."""
conf = config.get(DOMAIN)
if conf is None:
return True
for dsm_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=dsm_conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Synology DSM sensors."""
# Migrate old unique_id
@callback
def _async_migrator(entity_entry: entity_registry.RegistryEntry):
"""Migrate away from ID using label."""
# Reject if new unique_id
if "SYNO." in entity_entry.unique_id:
return None
entries = {
**STORAGE_DISK_BINARY_SENSORS,
**STORAGE_DISK_SENSORS,
**STORAGE_VOL_SENSORS,
**UTILISATION_SENSORS,
}
infos = entity_entry.unique_id.split("_")
serial = infos.pop(0)
label = infos.pop(0)
device_id = "_".join(infos)
# Removed entity
if (
"Type" in entity_entry.unique_id
or "Device" in entity_entry.unique_id
or "Name" in entity_entry.unique_id
):
return None
entity_type = None
for entity_key, entity_attrs in entries.items():
if (
device_id
and entity_attrs[ENTITY_NAME] == "Status"
and "Status" in entity_entry.unique_id
and "(Smart)" not in entity_entry.unique_id
):
if "sd" in device_id and "disk" in entity_key:
entity_type = entity_key
continue
if "volume" in device_id and "volume" in entity_key:
entity_type = entity_key
continue
if entity_attrs[ENTITY_NAME] == label:
entity_type = entity_key
new_unique_id = "_".join([serial, entity_type])
if device_id:
new_unique_id += f"_{device_id}"
_LOGGER.info(
"Migrating unique_id from [%s] to [%s]",
entity_entry.unique_id,
new_unique_id,
)
return {"new_unique_id": new_unique_id}
await entity_registry.async_migrate_entries(hass, entry.entry_id, _async_migrator)
# Migrate existing entry configuration
if entry.data.get(CONF_VERIFY_SSL) is None:
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_VERIFY_SSL: DEFAULT_VERIFY_SSL}
)
# Continue setup
api = SynoApi(hass, entry)
try:
await api.async_setup()
except (SynologyDSMLoginFailedException, SynologyDSMRequestException) as err:
_LOGGER.debug(
"Unable to connect to DSM '%s' during setup: %s", entry.unique_id, err
)
raise ConfigEntryNotReady from err
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.unique_id] = {
UNDO_UPDATE_LISTENER: entry.add_update_listener(_async_update_listener),
SYNO_API: api,
SYSTEM_LOADED: True,
}
# Services
await _async_setup_services(hass)
# For SSDP compat
if not entry.data.get(CONF_MAC):
network = await hass.async_add_executor_job(getattr, api.dsm, "network")
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_MAC: network.macs}
)
async def async_coordinator_update_data_cameras():
"""Fetch all camera data from api."""
if not hass.data[DOMAIN][entry.unique_id][SYSTEM_LOADED]:
raise UpdateFailed("System not fully loaded")
if SynoSurveillanceStation.CAMERA_API_KEY not in api.dsm.apis:
return None
surveillance_station = api.surveillance_station
try:
async with async_timeout.timeout(10):
await hass.async_add_executor_job(surveillance_station.update)
except SynologyDSMAPIErrorException as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
return {
"cameras": {
camera.id: camera for camera in surveillance_station.get_all_cameras()
}
}
async def async_coordinator_update_data_central():
"""Fetch all device and sensor data from api."""
try:
await api.async_update()
except Exception as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
return None
async def async_coordinator_update_data_switches():
"""Fetch all switch data from api."""
if not hass.data[DOMAIN][entry.unique_id][SYSTEM_LOADED]:
raise UpdateFailed("System not fully loaded")
if SynoSurveillanceStation.HOME_MODE_API_KEY not in api.dsm.apis:
return None
surveillance_station = api.surveillance_station
return {
"switches": {
"home_mode": await hass.async_add_executor_job(
surveillance_station.get_home_mode_status
)
}
}
hass.data[DOMAIN][entry.unique_id][COORDINATOR_CAMERAS] = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"{entry.unique_id}_cameras",
update_method=async_coordinator_update_data_cameras,
update_interval=timedelta(seconds=30),
)
hass.data[DOMAIN][entry.unique_id][COORDINATOR_CENTRAL] = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"{entry.unique_id}_central",
update_method=async_coordinator_update_data_central,
update_interval=timedelta(
minutes=entry.options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
),
)
hass.data[DOMAIN][entry.unique_id][COORDINATOR_SWITCHES] = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"{entry.unique_id}_switches",
update_method=async_coordinator_update_data_switches,
update_interval=timedelta(seconds=30),
)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
entry_data = hass.data[DOMAIN][entry.unique_id]
entry_data[UNDO_UPDATE_LISTENER]()
await entry_data[SYNO_API].async_unload()
hass.data[DOMAIN].pop(entry.unique_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistantType, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
async def _async_setup_services(hass: HomeAssistantType):
"""Service handler setup."""
async def service_handler(call: ServiceCall):
"""Handle service call."""
serial = call.data.get(CONF_SERIAL)
dsm_devices = hass.data[DOMAIN]
if serial:
dsm_device = dsm_devices.get(serial)
elif len(dsm_devices) == 1:
dsm_device = next(iter(dsm_devices.values()))
serial = next(iter(dsm_devices))
else:
_LOGGER.error(
"More than one DSM configured, must specify one of serials %s",
sorted(dsm_devices),
)
return
if not dsm_device:
_LOGGER.error("DSM with specified serial %s not found", serial)
return
_LOGGER.debug("%s DSM with serial %s", call.service, serial)
dsm_api = dsm_device[SYNO_API]
dsm_device[SYSTEM_LOADED] = False
if call.service == SERVICE_REBOOT:
await dsm_api.async_reboot()
elif call.service == SERVICE_SHUTDOWN:
await dsm_api.async_shutdown()
for service in SERVICES:
hass.services.async_register(DOMAIN, service, service_handler)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, hass: HomeAssistantType, entry: ConfigEntry):
"""Initialize the API wrapper class."""
self._hass = hass
self._entry = entry
# DSM APIs
self.dsm: SynologyDSM = None
self.information: SynoDSMInformation = None
self.network: SynoDSMNetwork = None
self.security: SynoCoreSecurity = None
self.storage: SynoStorage = None
self.surveillance_station: SynoSurveillanceStation = None
self.system: SynoCoreSystem = None
self.upgrade: SynoCoreUpgrade = None
self.utilisation: SynoCoreUtilization = None
# Should we fetch them
self._fetching_entities = {}
self._with_information = True
self._with_security = True
self._with_storage = True
self._with_surveillance_station = True
self._with_system = True
self._with_upgrade = True
self._with_utilisation = True
async def async_setup(self):
"""Start interacting with the NAS."""
self.dsm = SynologyDSM(
self._entry.data[CONF_HOST],
self._entry.data[CONF_PORT],
self._entry.data[CONF_USERNAME],
self._entry.data[CONF_PASSWORD],
self._entry.data[CONF_SSL],
self._entry.data[CONF_VERIFY_SSL],
timeout=self._entry.options.get(CONF_TIMEOUT),
device_token=self._entry.data.get(CONF_DEVICE_TOKEN),
)
await self._hass.async_add_executor_job(self.dsm.login)
# check if surveillance station is used
self._with_surveillance_station = bool(
self.dsm.apis.get(SynoSurveillanceStation.CAMERA_API_KEY)
)
_LOGGER.debug(
"State of Surveillance_station during setup of '%s': %s",
self._entry.unique_id,
self._with_surveillance_station,
)
self._async_setup_api_requests()
await self._hass.async_add_executor_job(self._fetch_device_configuration)
await self.async_update()
@callback
def subscribe(self, api_key, unique_id):
"""Subscribe an entity to API fetches."""
_LOGGER.debug("Subscribe new entity: %s", unique_id)
if api_key not in self._fetching_entities:
self._fetching_entities[api_key] = set()
self._fetching_entities[api_key].add(unique_id)
@callback
def unsubscribe() -> None:
"""Unsubscribe an entity from API fetches (when disable)."""
_LOGGER.debug("Unsubscribe entity: %s", unique_id)
self._fetching_entities[api_key].remove(unique_id)
if len(self._fetching_entities[api_key]) == 0:
self._fetching_entities.pop(api_key)
return unsubscribe
@callback
def _async_setup_api_requests(self):
"""Determine if we should fetch each API, if one entity needs it."""
# Entities not added yet, fetch all
if not self._fetching_entities:
_LOGGER.debug(
"Entities not added yet, fetch all for '%s'", self._entry.unique_id
)
return
# surveillance_station is updated by own coordinator
self.dsm.reset(self.surveillance_station)
# Determine if we should fetch an API
self._with_system = bool(self.dsm.apis.get(SynoCoreSystem.API_KEY))
self._with_security = bool(
self._fetching_entities.get(SynoCoreSecurity.API_KEY)
)
self._with_storage = bool(self._fetching_entities.get(SynoStorage.API_KEY))
self._with_upgrade = bool(self._fetching_entities.get(SynoCoreUpgrade.API_KEY))
self._with_utilisation = bool(
self._fetching_entities.get(SynoCoreUtilization.API_KEY)
)
self._with_information = bool(
self._fetching_entities.get(SynoDSMInformation.API_KEY)
)
# Reset not used API, information is not reset since it's used in device_info
if not self._with_security:
_LOGGER.debug(
"Disable security api from being updated for '%s'",
self._entry.unique_id,
)
self.dsm.reset(self.security)
self.security = None
if not self._with_storage:
_LOGGER.debug(
"Disable storage api from being updatedf or '%s'", self._entry.unique_id
)
self.dsm.reset(self.storage)
self.storage = None
if not self._with_system:
_LOGGER.debug(
"Disable system api from being updated for '%s'", self._entry.unique_id
)
self.dsm.reset(self.system)
self.system = None
if not self._with_upgrade:
_LOGGER.debug(
"Disable upgrade api from being updated for '%s'", self._entry.unique_id
)
self.dsm.reset(self.upgrade)
self.upgrade = None
if not self._with_utilisation:
_LOGGER.debug(
"Disable utilisation api from being updated for '%s'",
self._entry.unique_id,
)
self.dsm.reset(self.utilisation)
self.utilisation = None
def _fetch_device_configuration(self):
"""Fetch initial device config."""
self.information = self.dsm.information
self.network = self.dsm.network
self.network.update()
if self._with_security:
_LOGGER.debug("Enable security api updates for '%s'", self._entry.unique_id)
self.security = self.dsm.security
if self._with_storage:
_LOGGER.debug("Enable storage api updates for '%s'", self._entry.unique_id)
self.storage = self.dsm.storage
if self._with_upgrade:
_LOGGER.debug("Enable upgrade api updates for '%s'", self._entry.unique_id)
self.upgrade = self.dsm.upgrade
if self._with_system:
_LOGGER.debug("Enable system api updates for '%s'", self._entry.unique_id)
self.system = self.dsm.system
if self._with_utilisation:
_LOGGER.debug(
"Enable utilisation api updates for '%s'", self._entry.unique_id
)
self.utilisation = self.dsm.utilisation
if self._with_surveillance_station:
_LOGGER.debug(
"Enable surveillance_station api updates for '%s'",
self._entry.unique_id,
)
self.surveillance_station = self.dsm.surveillance_station
async def async_reboot(self):
"""Reboot NAS."""
try:
await self._hass.async_add_executor_job(self.system.reboot)
except (SynologyDSMLoginFailedException, SynologyDSMRequestException) as err:
_LOGGER.error(
"Reboot of '%s' not possible, please try again later",
self._entry.unique_id,
)
_LOGGER.debug("Exception:%s", err)
async def async_shutdown(self):
"""Shutdown NAS."""
try:
await self._hass.async_add_executor_job(self.system.shutdown)
except (SynologyDSMLoginFailedException, SynologyDSMRequestException) as err:
_LOGGER.error(
"Shutdown of '%s' not possible, please try again later",
self._entry.unique_id,
)
_LOGGER.debug("Exception:%s", err)
async def async_unload(self):
"""Stop interacting with the NAS and prepare for removal from hass."""
try:
await self._hass.async_add_executor_job(self.dsm.logout)
except (SynologyDSMAPIErrorException, SynologyDSMRequestException) as err:
_LOGGER.debug(
"Logout from '%s' not possible:%s", self._entry.unique_id, err
)
async def async_update(self, now=None):
"""Update function for updating API information."""
_LOGGER.debug("Start data update for '%s'", self._entry.unique_id)
self._async_setup_api_requests()
try:
await self._hass.async_add_executor_job(
self.dsm.update, self._with_information
)
except (SynologyDSMLoginFailedException, SynologyDSMRequestException) as err:
_LOGGER.warning(
"Connection error during update, fallback by reloading the entry"
)
_LOGGER.debug(
"Connection error during update of '%s' with exception: %s",
self._entry.unique_id,
err,
)
await self._hass.config_entries.async_reload(self._entry.entry_id)
return
class SynologyDSMBaseEntity(CoordinatorEntity):
"""Representation of a Synology NAS entry."""
def __init__(
self,
api: SynoApi,
entity_type: str,
entity_info: dict[str, str],
coordinator: DataUpdateCoordinator,
):
"""Initialize the Synology DSM entity."""
super().__init__(coordinator)
self._api = api
self._api_key = entity_type.split(":")[0]
self.entity_type = entity_type.split(":")[-1]
self._name = f"{api.network.hostname} {entity_info[ENTITY_NAME]}"
self._class = entity_info[ENTITY_CLASS]
self._enable_default = entity_info[ENTITY_ENABLE]
self._icon = entity_info[ENTITY_ICON]
self._unit = entity_info[ENTITY_UNIT]
self._unique_id = f"{self._api.information.serial}_{entity_type}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name."""
return self._name
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def device_class(self) -> str:
"""Return the class of this device."""
return self._class
@property
def extra_state_attributes(self) -> dict[str, any]:
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def device_info(self) -> dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._api.information.serial)},
"name": "Synology NAS",
"manufacturer": "Synology",
"model": self._api.information.model,
"sw_version": self._api.information.version_string,
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enable_default
async def async_added_to_hass(self):
"""Register entity for updates from API."""
self.async_on_remove(self._api.subscribe(self._api_key, self.unique_id))
await super().async_added_to_hass()
class SynologyDSMDeviceEntity(SynologyDSMBaseEntity):
"""Representation of a Synology NAS disk or volume entry."""
def __init__(
self,
api: SynoApi,
entity_type: str,
entity_info: dict[str, str],
coordinator: DataUpdateCoordinator,
device_id: str = None,
):
"""Initialize the Synology DSM disk or volume entity."""
super().__init__(api, entity_type, entity_info, coordinator)
self._device_id = device_id
self._device_name = None
self._device_manufacturer = None
self._device_model = None
self._device_firmware = None
self._device_type = None
if "volume" in entity_type:
volume = self._api.storage.get_volume(self._device_id)
# Volume does not have a name
self._device_name = volume["id"].replace("_", " ").capitalize()
self._device_manufacturer = "Synology"
self._device_model = self._api.information.model
self._device_firmware = self._api.information.version_string
self._device_type = (
volume["device_type"]
.replace("_", " ")
.replace("raid", "RAID")
.replace("shr", "SHR")
)
elif "disk" in entity_type:
disk = self._api.storage.get_disk(self._device_id)
self._device_name = disk["name"]
self._device_manufacturer = disk["vendor"]
self._device_model = disk["model"].strip()
self._device_firmware = disk["firm"]
self._device_type = disk["diskType"]
self._name = f"{self._api.network.hostname} {self._device_name} {entity_info[ENTITY_NAME]}"
self._unique_id += f"_{self._device_id}"
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._api.storage)
@property
def device_info(self) -> dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._api.information.serial, self._device_id)},
"name": f"Synology NAS ({self._device_name} - {self._device_type})",
"manufacturer": self._device_manufacturer,
"model": self._device_model,
"sw_version": self._device_firmware,
"via_device": (DOMAIN, self._api.information.serial),
}
|
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from . import bitcoin
from .bitcoin import *
import struct
import traceback
import sys
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class UnknownTxinType(Exception):
pass
class NotRecognizedRedeemScript(Exception):
pass
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# enum-like type
# From the Python Cookbook, downloaded from http://code.activestate.com/recipes/67107/
class EnumException(Exception):
pass
class Enumeration:
def __init__(self, name, enumList):
self.__doc__ = name
lookup = { }
reverseLookup = { }
i = 0
uniqueNames = [ ]
uniqueValues = [ ]
for x in enumList:
if isinstance(x, tuple):
x, i = x
if not isinstance(x, str):
raise EnumException("enum name is not a string: " + x)
if not isinstance(i, int):
raise EnumException("enum value is not an integer: " + i)
if x in uniqueNames:
raise EnumException("enum name is not unique: " + x)
if i in uniqueValues:
raise EnumException("enum value is not unique for " + x)
uniqueNames.append(x)
uniqueValues.append(i)
lookup[x] = i
reverseLookup[i] = x
i = i + 1
self.lookup = lookup
self.reverseLookup = reverseLookup
def __getattr__(self, attr):
if attr not in self.lookup:
raise AttributeError
return self.lookup[attr]
def whatis(self, value):
return self.reverseLookup[value]
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
opcodes = Enumeration("Opcodes", [
("OP_0", 0), ("OP_PUSHDATA1",76), "OP_PUSHDATA2", "OP_PUSHDATA4", "OP_1NEGATE", "OP_RESERVED",
"OP_1", "OP_2", "OP_3", "OP_4", "OP_5", "OP_6", "OP_7",
"OP_8", "OP_9", "OP_10", "OP_11", "OP_12", "OP_13", "OP_14", "OP_15", "OP_16",
"OP_NOP", "OP_VER", "OP_IF", "OP_NOTIF", "OP_VERIF", "OP_VERNOTIF", "OP_ELSE", "OP_ENDIF", "OP_VERIFY",
"OP_RETURN", "OP_TOALTSTACK", "OP_FROMALTSTACK", "OP_2DROP", "OP_2DUP", "OP_3DUP", "OP_2OVER", "OP_2ROT", "OP_2SWAP",
"OP_IFDUP", "OP_DEPTH", "OP_DROP", "OP_DUP", "OP_NIP", "OP_OVER", "OP_PICK", "OP_ROLL", "OP_ROT",
"OP_SWAP", "OP_TUCK", "OP_CAT", "OP_SUBSTR", "OP_LEFT", "OP_RIGHT", "OP_SIZE", "OP_INVERT", "OP_AND",
"OP_OR", "OP_XOR", "OP_EQUAL", "OP_EQUALVERIFY", "OP_RESERVED1", "OP_RESERVED2", "OP_1ADD", "OP_1SUB", "OP_2MUL",
"OP_2DIV", "OP_NEGATE", "OP_ABS", "OP_NOT", "OP_0NOTEQUAL", "OP_ADD", "OP_SUB", "OP_MUL", "OP_DIV",
"OP_MOD", "OP_LSHIFT", "OP_RSHIFT", "OP_BOOLAND", "OP_BOOLOR",
"OP_NUMEQUAL", "OP_NUMEQUALVERIFY", "OP_NUMNOTEQUAL", "OP_LESSTHAN",
"OP_GREATERTHAN", "OP_LESSTHANOREQUAL", "OP_GREATERTHANOREQUAL", "OP_MIN", "OP_MAX",
"OP_WITHIN", "OP_RIPEMD160", "OP_SHA1", "OP_SHA256", "OP_HASH160",
"OP_HASH256", "OP_CODESEPARATOR", "OP_CHECKSIG", "OP_CHECKSIGVERIFY", "OP_CHECKMULTISIG",
"OP_CHECKMULTISIGVERIFY",
("OP_SINGLEBYTE_END", 0xF0),
("OP_DOUBLEBYTE_BEGIN", 0xF000),
"OP_PUBKEY", "OP_PUBKEYHASH",
("OP_INVALIDOPCODE", 0xFFFF),
])
def script_GetOp(_bytes):
i = 0
while i < len(_bytes):
vch = None
opcode = _bytes[i]
i += 1
if opcode >= opcodes.OP_SINGLEBYTE_END:
opcode <<= 8
opcode |= _bytes[i]
i += 1
if opcode <= opcodes.OP_PUSHDATA4:
nSize = opcode
if opcode == opcodes.OP_PUSHDATA1:
nSize = _bytes[i]
i += 1
elif opcode == opcodes.OP_PUSHDATA2:
(nSize,) = struct.unpack_from('<H', _bytes, i)
i += 2
elif opcode == opcodes.OP_PUSHDATA4:
(nSize,) = struct.unpack_from('<I', _bytes, i)
i += 4
vch = _bytes[i:i + nSize]
i += nSize
yield opcode, vch, i
def script_GetOpName(opcode):
return (opcodes.whatis(opcode)).replace("OP_", "")
def decode_script(bytes):
result = ''
for (opcode, vch, i) in script_GetOp(bytes):
if len(result) > 0: result += " "
if opcode <= opcodes.OP_PUSHDATA4:
result += "%d:"%(opcode,)
result += short_hex(vch)
else:
result += script_GetOpName(opcode)
return result
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = [ x for x in script_GetOp(_bytes) ]
except Exception as e:
# coinbase transactions raise an exception
print_error("parse_scriptSig: cannot find address in input script (coinbase?)",
bh2u(_bytes))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
if item[0] == 0:
# segwit embedded into p2sh
# witness version 0
d['address'] = bitcoin.hash160_to_p2sh(bitcoin.hash_160(item))
if len(item) == 22:
d['type'] = 'p2wpkh-p2sh'
elif len(item) == 34:
d['type'] = 'p2wsh-p2sh'
else:
print_error("unrecognized txin type", bh2u(item))
elif opcodes.OP_1 <= item[0] <= opcodes.OP_16:
# segwit embedded into p2sh
# witness version 1-16
pass
else:
# assert item[0] == 0x30
# pay-to-pubkey
d['type'] = 'p2pk'
d['address'] = "(pubkey)"
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# p2pkh TxIn transactions push a signature
# (71-73 bytes) and then their public key
# (33 or 65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("parse_scriptSig: cannot find address in input script (p2pkh?)",
bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if match_decoded(decoded, match):
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
try:
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
except NotRecognizedRedeemScript:
print_error("parse_scriptSig: cannot find address in input script (p2sh?)",
bh2u(_bytes))
# we could still guess:
# d['address'] = hash160_to_p2sh(hash_160(decoded[-1][1]))
return
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = hash160_to_p2sh(hash_160(bfh(redeemScript)))
return
print_error("parse_scriptSig: cannot find address in input script (unknown)",
bh2u(_bytes))
def parse_redeemScript(s):
dec2 = [ x for x in script_GetOp(s) ]
try:
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
except IndexError:
raise NotRecognizedRedeemScript()
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
raise NotRecognizedRedeemScript()
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = multisig_script(pubkeys, m)
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes):
decoded = [x for x in script_GetOp(_bytes)]
# The Genesis Block, self-payments, and pay-by-IP-address payments look like:
# 65 BYTES:... CHECKSIG
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_PUBKEY, bh2u(decoded[0][1])
# Pay-by-Bitcoin-address TxOuts look like:
# DUP HASH160 20 BYTES:... EQUALVERIFY CHECKSIG
match = [ opcodes.OP_DUP, opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUALVERIFY, opcodes.OP_CHECKSIG ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2pkh(decoded[2][1])
# p2sh
match = [ opcodes.OP_HASH160, opcodes.OP_PUSHDATA4, opcodes.OP_EQUAL ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash160_to_p2sh(decoded[1][1])
# segwit address
match = [ opcodes.OP_0, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
return TYPE_ADDRESS, hash_to_segwit_addr(decoded[1][1])
return TYPE_SCRIPT, bh2u(_bytes)
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
if scriptSig:
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except BaseException:
traceback.print_exc(file=sys.stderr)
print_error('failed to parse scriptSig', bh2u(scriptSig))
else:
d['scriptSig'] = ''
return d
def parse_witness(vds, txin):
n = vds.read_compact_size()
if n == 0:
return
if n == 0xffffffff:
txin['value'] = vds.read_uint64()
n = vds.read_compact_size()
# now 'n' is the number of items in the witness
w = list(bh2u(vds.read_bytes(vds.read_compact_size())) for i in range(n))
add_w = lambda x: var_int(len(x) // 2) + x
txin['witness'] = var_int(n) + ''.join(add_w(i) for i in w)
# FIXME: witness version > 0 will probably fail here.
# For native segwit, we would need the scriptPubKey of the parent txn
# to determine witness program version, and properly parse the witness.
# In case of p2sh-segwit, we can tell based on the scriptSig in this txn.
# The code below assumes witness version 0.
# p2sh-segwit should work in that case; for native segwit we need to tell
# between p2wpkh and p2wsh; we do this based on number of witness items,
# hence (FIXME) p2wsh with n==2 (maybe n==1 ?) will probably fail.
# If v==0 and n==2, we need parent scriptPubKey to distinguish between p2wpkh and p2wsh.
try:
if txin['type'] == 'coinbase':
pass
elif txin['type'] == 'p2wsh-p2sh' or n > 2:
try:
m, n, x_pubkeys, pubkeys, witnessScript = parse_redeemScript(bfh(w[-1]))
except NotRecognizedRedeemScript:
raise UnknownTxinType()
txin['signatures'] = parse_sig(w[1:-1])
txin['num_sig'] = m
txin['x_pubkeys'] = x_pubkeys
txin['pubkeys'] = pubkeys
txin['witnessScript'] = witnessScript
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wsh'
txin['address'] = bitcoin.script_to_p2wsh(txin['witnessScript'])
elif txin['type'] == 'p2wpkh-p2sh' or n == 2:
txin['num_sig'] = 1
txin['x_pubkeys'] = [w[1]]
txin['pubkeys'] = [safe_parse_pubkey(w[1])]
txin['signatures'] = parse_sig([w[0]])
if not txin.get('scriptSig'): # native segwit script
txin['type'] = 'p2wpkh'
txin['address'] = bitcoin.public_key_to_p2wpkh(bfh(txin['pubkeys'][0]))
else:
raise UnknownTxinType()
except UnknownTxinType:
txin['type'] = 'unknown'
# FIXME: GUI might show 'unknown' address (e.g. for a non-multisig p2wsh)
except BaseException:
txin['type'] = 'unknown'
traceback.print_exc(file=sys.stderr)
print_error('failed to parse witness', txin.get('witness'))
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
is_segwit = (n_vin == 0)
if is_segwit:
marker = vds.read_bytes(1)
assert marker == b'\x01'
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
if is_segwit:
for i in range(n_vin):
txin = d['inputs'][i]
parse_witness(vds, txin)
d['lockTime'] = vds.read_uint32()
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, raw):
"""Add new signatures to a transaction"""
d = deserialize(raw)
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sigs1 = txin.get('signatures')
sigs2 = d['inputs'][i].get('signatures')
for sig in sigs2:
if sig in sigs1:
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
# der to string
order = ecdsa.ecdsa.generator_secp256k1.order()
r, s = ecdsa.util.sigdecode_der(bfh(sig[:-2]), order)
sig_string = ecdsa.util.sigencode_string(r, s, order)
compressed = True
for recid in range(4):
public_key = MyVerifyingKey.from_signature(sig_string, recid, pre_hash, curve = SECP256k1)
pubkey = bh2u(point_to_ser(public_key.pubkey.point, compressed))
if pubkey in pubkeys:
public_key.verify_digest(sig_string, pre_hash, sigdecode = ecdsa.util.sigdecode_string)
j = pubkeys.index(pubkey)
print_error("adding sig", i, j, pubkey, sig)
self._inputs[i]['signatures'][j] = sig
#self._inputs[i]['x_pubkeys'][j] = pubkey
break
# redo raw
self.raw = self.serialize()
def deserialize(self):
if self.raw is None:
return
#self.raw = self.serialize()
if self._inputs is not None:
return
d = deserialize(self.raw)
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0):
self = klass(None)
self._inputs = inputs
self._outputs = outputs
self.locktime = locktime
return self
@classmethod
def pay_script(self, output_type, addr):
if output_type == TYPE_SCRIPT:
return addr
elif output_type == TYPE_ADDRESS:
return bitcoin.address_to_script(addr)
elif output_type == TYPE_PUBKEY:
return bitcoin.public_key_to_p2pk_script(addr)
else:
raise TypeError('Unknown output type')
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long
sig_list = [ "00" * 0x48 ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def serialize_witness(self, txin, estimate_size=False):
add_w = lambda x: var_int(len(x)//2) + x
if not self.is_segwit_input(txin):
return '00'
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
if txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
witness = var_int(2) + add_w(sig_list[0]) + add_w(pubkeys[0])
elif txin['type'] in ['p2wsh', 'p2wsh-p2sh']:
n = len(sig_list) + 2
witness_script = multisig_script(pubkeys, txin['num_sig'])
witness = var_int(n) + '00' + ''.join(add_w(x) for x in sig_list) + add_w(witness_script)
else:
witness = txin.get('witness', None)
if not witness:
raise BaseException('wrong txin type:', txin['type'])
if self.is_txin_complete(txin) or estimate_size:
value_field = ''
else:
value_field = var_int(0xffffffff) + int_to_hex(txin['value'], 8)
return value_field + witness
@classmethod
def is_segwit_input(cls, txin):
has_nonzero_witness = txin.get('witness', '00') != '00'
return cls.is_segwit_inputtype(txin['type']) or has_nonzero_witness
@classmethod
def is_segwit_inputtype(cls, txin_type):
return txin_type in ('p2wpkh', 'p2wpkh-p2sh', 'p2wsh', 'p2wsh-p2sh')
@classmethod
def input_script(self, txin, estimate_size=False):
_type = txin['type']
if _type == 'coinbase':
return txin['scriptSig']
pubkeys, sig_list = self.get_siglist(txin, estimate_size)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type in ['p2wpkh', 'p2wsh']:
return ''
elif _type == 'p2wpkh-p2sh':
pubkey = safe_parse_pubkey(pubkeys[0])
scriptSig = bitcoin.p2wpkh_nested_script(pubkey)
return push_script(scriptSig)
elif _type == 'p2wsh-p2sh':
witness_script = self.get_preimage_script(txin)
scriptSig = bitcoin.p2wsh_nested_script(witness_script)
return push_script(scriptSig)
elif _type == 'address':
script += push_script(pubkeys[0])
elif _type == 'unknown':
return txin['scriptSig']
return script
@classmethod
def is_txin_complete(self, txin):
num_sig = txin.get('num_sig', 1)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
# only for non-segwit
if txin['type'] == 'p2pkh':
return bitcoin.address_to_script(txin['address'])
elif txin['type'] in ['p2sh', 'p2wsh', 'p2wsh-p2sh']:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif txin['type'] in ['p2wpkh', 'p2wpkh-p2sh']:
pubkey = txin['pubkeys'][0]
pkh = bh2u(bitcoin.hash_160(bfh(pubkey)))
return '76a9' + push_script(pkh) + '88ac'
elif txin['type'] == 'p2pk':
pubkey = txin['pubkeys'][0]
return bitcoin.public_key_to_p2pk_script(pubkey)
else:
raise TypeError('Unknown txin type', txin['type'])
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def get_outpoint_from_txin(cls, txin):
if txin['type'] == 'coinbase':
return None
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
return prevout_hash + ':%d' % prevout_n
@classmethod
def serialize_input(self, txin, script):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
return s
def set_rbf(self, rbf):
nSequence = 0xffffffff - (2 if rbf else 1)
for txin in self.inputs():
txin['sequence'] = nSequence
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[0], o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(output_type, addr)
s += var_int(len(script)//2)
s += script
return s
def serialize_preimage(self, i):
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(1, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txin = inputs[i]
# TODO: py3 hex
if self.is_segwit_input(txin):
hashPrevouts = bh2u(Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs))))
hashSequence = bh2u(Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs))))
hashOutputs = bh2u(Hash(bfh(''.join(self.serialize_output(o) for o in outputs))))
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
amount = int_to_hex(txin['value'], 8)
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
preimage = nVersion + hashPrevouts + hashSequence + outpoint + scriptCode + amount + nSequence + hashOutputs + nLocktime + nHashType
else:
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.get_preimage_script(txin) if i==k else '') for k, txin in enumerate(inputs))
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
preimage = nVersion + txins + txouts + nLocktime + nHashType
return preimage
def is_segwit(self):
return any(self.is_segwit_input(x) for x in self.inputs())
def serialize(self, estimate_size=False, witness=True):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size)) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
if witness and self.is_segwit():
marker = '00'
flag = '01'
witness = ''.join(self.serialize_witness(x, estimate_size) for x in inputs)
return nVersion + marker + flag + txins + txouts + witness + nLocktime
else:
return nVersion + txins + txouts + nLocktime
def hash(self):
print("warning: deprecated tx.hash()")
return self.txid()
def txid(self):
all_segwit = all(self.is_segwit_input(x) for x in self.inputs())
if not all_segwit and not self.is_complete():
return None
ser = self.serialize(witness=False)
return bh2u(Hash(bfh(ser))[::-1])
def wtxid(self):
ser = self.serialize(witness=True)
return bh2u(Hash(bfh(ser))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
return sum(x['value'] for x in self.inputs())
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
return self.input_value() - self.output_value()
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1 for x in self.inputs()])
@profiler
def estimated_size(self):
"""Return an estimated virtual tx size in vbytes.
BIP-0141 defines 'Virtual transaction size' to be weight/4 rounded up.
This definition is only for humans, and has little meaning otherwise.
If we wanted sub-byte precision, fee calculation should use transaction
weights, but for simplicity we approximate that with (virtual_size)x4
"""
weight = self.estimated_weight()
return self.virtual_size_from_weight(weight)
@classmethod
def estimated_input_weight(cls, txin, is_segwit_tx):
'''Return an estimate of serialized input weight in weight units.'''
script = cls.input_script(txin, True)
input_size = len(cls.serialize_input(txin, script)) // 2
if cls.is_segwit_input(txin):
assert is_segwit_tx
witness_size = len(cls.serialize_witness(txin, True)) // 2
else:
witness_size = 1 if is_segwit_tx else 0
return 4 * input_size + witness_size
@classmethod
def estimated_output_size(cls, address):
"""Return an estimate of serialized output size in bytes."""
script = bitcoin.address_to_script(address)
# 8 byte value + 1 byte script len + script
return 9 + len(script) // 2
@classmethod
def virtual_size_from_weight(cls, weight):
return weight // 4 + (weight % 4 > 0)
def estimated_total_size(self):
"""Return an estimated total transaction size in bytes."""
return len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None else len(self.raw) // 2 # ASCII hex string
def estimated_witness_size(self):
"""Return an estimate of witness size in bytes."""
if not self.is_segwit():
return 0
inputs = self.inputs()
estimate = not self.is_complete()
witness = ''.join(self.serialize_witness(x, estimate) for x in inputs)
witness_size = len(witness) // 2 + 2 # include marker and flag
return witness_size
def estimated_base_size(self):
"""Return an estimated base transaction size in bytes."""
return self.estimated_total_size() - self.estimated_witness_size()
def estimated_weight(self):
"""Return an estimate of transaction weight."""
total_tx_size = self.estimated_total_size()
base_tx_size = self.estimated_base_size()
return 3 * base_tx_size + total_tx_size
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig',-1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
def sign(self, keypairs):
for i, txin in enumerate(self.inputs()):
num = txin['num_sig']
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, x_pubkey in enumerate(x_pubkeys):
signatures = list(filter(None, txin['signatures']))
if len(signatures) == num:
# txin is complete
break
if x_pubkey in keypairs.keys():
print_error("adding signature for", x_pubkey)
sec, compressed = keypairs.get(x_pubkey)
pubkey = public_key_from_private_key(sec, compressed)
# add signature
pre_hash = Hash(bfh(self.serialize_preimage(i)))
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = bitcoin.MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
txin['signatures'][j] = bh2u(sig) + '01'
#txin['x_pubkeys'][j] = pubkey
txin['pubkeys'][j] = pubkey # needed for fd keys
self._inputs[i] = txin
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, x, v in self.outputs():
if type == TYPE_ADDRESS:
addr = x
elif type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(x))
else:
addr = 'SCRIPT ' + x
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:GtacoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_GTACOIND' not in vars():
ENABLE_GTACOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "GTACOIND" not in os.environ:
os.environ["GTACOIND"] = BUILDDIR + '/src/gtacoind' + EXEEXT
if "GTACOINCLI" not in os.environ:
os.environ["GTACOINCLI"] = BUILDDIR + '/src/gtacoin-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/gtacoin/gtacoin2/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/gtacoin/gtacoin2/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_GTACOIND == 1):
print("No rpc tests to run. Wallet, utils, and gtacoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'p2p-fullblocktest.py',
'walletbackup.py',
'bip68-112-113-p2p.py',
'wallet.py',
'wallet-hd.py',
'wallet-dump.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
'blockchain.py',
'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'p2p-versionbits-warning.py',
'p2p-segwit.py',
'segwit.py',
'importprunedfunds.py',
'signmessages.py',
'p2p-compactblocks.py',
'nulldummy.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'rpcbind_test.py',
'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
'p2p-feefilter.py',
'pruning.py', # leave pruning last as it takes a REALLY long time
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print(stdout)
print('stderr:\n' if not stderr == '' else '', stderr)
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie gtacoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed={}".format(len(self.test_list) + self.portseed_offset)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `gtacoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import alarm_base
from heat.engine.resources.openstack.heat import none_resource
from heat.engine import support
from heat.engine import translation
class AodhAlarm(alarm_base.BaseAlarm):
"""A resource that implements alarming service of Aodh.
A resource that allows for the setting alarms based on threshold evaluation
for a collection of samples. Also, you can define actions to take if state
of watched resource will be satisfied specified conditions. For example, it
can watch for the memory consumption and when it reaches 70% on a given
instance if the instance has been up for more than 10 min, some action will
be called.
"""
support_status = support.SupportStatus(
status=support.DEPRECATED,
message=_('Theshold alarm relies on ceilometer-api and has been '
'deprecated in aodh since Ocata. Use '
'OS::Aodh::GnocchiAggregationByResourcesAlarm instead.'),
version='10.0.0',
previous_status=support.SupportStatus(version='2014.1'))
PROPERTIES = (
COMPARISON_OPERATOR, EVALUATION_PERIODS, METER_NAME, PERIOD,
STATISTIC, THRESHOLD, MATCHING_METADATA, QUERY,
) = (
'comparison_operator', 'evaluation_periods', 'meter_name', 'period',
'statistic', 'threshold', 'matching_metadata', 'query',
)
properties_schema = {
COMPARISON_OPERATOR: properties.Schema(
properties.Schema.STRING,
_('Operator used to compare specified statistic with threshold.'),
constraints=[alarm_base.BaseAlarm.QF_OP_VALS],
update_allowed=True
),
EVALUATION_PERIODS: properties.Schema(
properties.Schema.INTEGER,
_('Number of periods to evaluate over.'),
update_allowed=True
),
METER_NAME: properties.Schema(
properties.Schema.STRING,
_('Meter name watched by the alarm.'),
required=True
),
PERIOD: properties.Schema(
properties.Schema.INTEGER,
_('Period (seconds) to evaluate over.'),
update_allowed=True
),
STATISTIC: properties.Schema(
properties.Schema.STRING,
_('Meter statistic to evaluate.'),
constraints=[
constraints.AllowedValues(['count', 'avg', 'sum', 'min',
'max']),
],
update_allowed=True
),
THRESHOLD: properties.Schema(
properties.Schema.NUMBER,
_('Threshold to evaluate against.'),
required=True,
update_allowed=True
),
MATCHING_METADATA: properties.Schema(
properties.Schema.MAP,
_('Meter should match this resource metadata (key=value) '
'additionally to the meter_name.'),
default={},
update_allowed=True
),
QUERY: properties.Schema(
properties.Schema.LIST,
_('A list of query factors, each comparing '
'a Sample attribute with a value. '
'Implicitly combined with matching_metadata, if any.'),
update_allowed=True,
support_status=support.SupportStatus(version='2015.1'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
alarm_base.BaseAlarm.QF_FIELD: properties.Schema(
properties.Schema.STRING,
_('Name of attribute to compare. '
'Names of the form metadata.user_metadata.X '
'or metadata.metering.X are equivalent to what '
'you can address through matching_metadata; '
'the former for Nova meters, '
'the latter for all others. '
'To see the attributes of your Samples, '
'use `ceilometer --debug sample-list`.')
),
alarm_base.BaseAlarm.QF_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the attribute.'),
default='string',
constraints=[alarm_base.BaseAlarm.QF_TYPE_VALS],
support_status=support.SupportStatus(version='8.0.0')
),
alarm_base.BaseAlarm.QF_OP: properties.Schema(
properties.Schema.STRING,
_('Comparison operator.'),
constraints=[alarm_base.BaseAlarm.QF_OP_VALS]
),
alarm_base.BaseAlarm.QF_VALUE: properties.Schema(
properties.Schema.STRING,
_('String value with which to compare.')
)
}
)
)
}
properties_schema.update(alarm_base.common_properties_schema)
def get_alarm_props(self, props):
"""Apply all relevant compatibility xforms."""
kwargs = self.actions_to_urls(props)
kwargs['type'] = self.alarm_type
if kwargs.get(self.METER_NAME) in alarm_base.NOVA_METERS:
prefix = 'user_metadata.'
else:
prefix = 'metering.'
rule = {}
for field in ['period', 'evaluation_periods', 'threshold',
'statistic', 'comparison_operator', 'meter_name']:
if field in kwargs:
rule[field] = kwargs[field]
del kwargs[field]
mmd = props.get(self.MATCHING_METADATA) or {}
query = props.get(self.QUERY) or []
# make sure the matching_metadata appears in the query like this:
# {field: metadata.$prefix.x, ...}
for m_k, m_v in mmd.items():
key = 'metadata.%s' % prefix
if m_k.startswith('metadata.'):
m_k = m_k[len('metadata.'):]
if m_k.startswith('metering.') or m_k.startswith('user_metadata.'):
# check prefix
m_k = m_k.split('.', 1)[-1]
key = '%s%s' % (key, m_k)
# NOTE(prazumovsky): type of query value must be a string, but
# matching_metadata value type can not be a string, so we
# must convert value to a string type.
query.append(dict(field=key, op='eq', value=str(m_v)))
if self.MATCHING_METADATA in kwargs:
del kwargs[self.MATCHING_METADATA]
if self.QUERY in kwargs:
del kwargs[self.QUERY]
if query:
rule['query'] = query
kwargs['threshold_rule'] = rule
return kwargs
def handle_create(self):
props = self.get_alarm_props(self.properties)
props['name'] = self.physical_resource_name()
alarm = self.client().alarm.create(props)
self.resource_id_set(alarm['alarm_id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
new_props = json_snippet.properties(self.properties_schema,
self.context)
self.client().alarm.update(self.resource_id,
self.get_alarm_props(new_props))
def parse_live_resource_data(self, resource_properties, resource_data):
record_reality = {}
threshold_data = resource_data.get('threshold_rule').copy()
threshold_data.update(resource_data)
props_upd_allowed = (set(self.PROPERTIES +
alarm_base.COMMON_PROPERTIES) -
{self.METER_NAME, alarm_base.TIME_CONSTRAINTS} -
set(alarm_base.INTERNAL_PROPERTIES))
for key in props_upd_allowed:
record_reality.update({key: threshold_data.get(key)})
return record_reality
def handle_check(self):
self.client().alarm.get(self.resource_id)
class CombinationAlarm(none_resource.NoneResource):
"""A resource that implements combination of Aodh alarms.
This resource is now deleted from Aodh, so will directly inherit from
NoneResource (placeholder resource). For old resources (which not a
placeholder resource), still can be deleted through client. Any newly
created resources will be considered as placeholder resources like none
resource. We will schedule to delete it from heat resources list.
"""
default_client_name = 'aodh'
entity = 'alarm'
support_status = support.SupportStatus(
status=support.HIDDEN,
message=_('OS::Aodh::CombinationAlarm is deprecated and has been '
'removed from Aodh, use OS::Aodh::CompositeAlarm instead.'),
version='9.0.0',
previous_status=support.SupportStatus(
status=support.DEPRECATED,
version='7.0.0',
previous_status=support.SupportStatus(version='2014.1')
)
)
class EventAlarm(alarm_base.BaseAlarm):
"""A resource that implements event alarms.
Allows users to define alarms which can be evaluated based on events
passed from other OpenStack services. The events can be emitted when
the resources from other OpenStack services have been updated, created
or deleted, such as 'compute.instance.reboot.end',
'scheduler.select_destinations.end'.
"""
alarm_type = 'event'
support_status = support.SupportStatus(version='8.0.0')
PROPERTIES = (
EVENT_TYPE, QUERY
) = (
'event_type', 'query'
)
properties_schema = {
EVENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('Event type to evaluate against. '
'If not specified will match all events.'),
update_allowed=True,
default='*'
),
QUERY: properties.Schema(
properties.Schema.LIST,
_('A list for filtering events. Query conditions used '
'to filter specific events when evaluating the alarm.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.MAP,
schema={
alarm_base.BaseAlarm.QF_FIELD: properties.Schema(
properties.Schema.STRING,
_('Name of attribute to compare.')
),
alarm_base.BaseAlarm.QF_TYPE: properties.Schema(
properties.Schema.STRING,
_('The type of the attribute.'),
default='string',
constraints=[alarm_base.BaseAlarm.QF_TYPE_VALS]
),
alarm_base.BaseAlarm.QF_OP: properties.Schema(
properties.Schema.STRING,
_('Comparison operator.'),
constraints=[alarm_base.BaseAlarm.QF_OP_VALS]
),
alarm_base.BaseAlarm.QF_VALUE: properties.Schema(
properties.Schema.STRING,
_('String value with which to compare.')
)
}
)
)
}
properties_schema.update(alarm_base.common_properties_schema)
def get_alarm_props(self, props):
"""Apply all relevant compatibility xforms."""
kwargs = self.actions_to_urls(props)
kwargs['type'] = self.alarm_type
rule = {}
for prop in (self.EVENT_TYPE, self.QUERY):
if prop in kwargs:
del kwargs[prop]
query = props.get(self.QUERY)
if query:
rule[self.QUERY] = query
event_type = props.get(self.EVENT_TYPE)
if event_type:
rule[self.EVENT_TYPE] = event_type
kwargs['event_rule'] = rule
return kwargs
def handle_create(self):
props = self.get_alarm_props(self.properties)
props['name'] = self.physical_resource_name()
alarm = self.client().alarm.create(props)
self.resource_id_set(alarm['alarm_id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
new_props = json_snippet.properties(self.properties_schema,
self.context)
self.client().alarm.update(self.resource_id,
self.get_alarm_props(new_props))
class LBMemberHealthAlarm(alarm_base.BaseAlarm):
"""A resource that implements a Loadbalancer Member Health Alarm.
Allows setting alarms based on the health of load balancer pool members,
where the health of a member is determined by the member reporting an
operating_status of ERROR beyond an initial grace period after creation
(120 seconds by default).
"""
alarm_type = "loadbalancer_member_health"
support_status = support.SupportStatus(version='13.0.0')
PROPERTIES = (
POOL, STACK, AUTOSCALING_GROUP_ID
) = (
"pool", "stack", "autoscaling_group_id"
)
RULE_PROPERTIES = (
POOL_ID, STACK_ID
) = (
"pool_id", "stack_id"
)
properties_schema = {
POOL: properties.Schema(
properties.Schema.STRING,
_("Name or ID of the loadbalancer pool for which the health of "
"each member will be evaluated."),
update_allowed=True,
required=True,
),
STACK: properties.Schema(
properties.Schema.STRING,
_("Name or ID of the root / top level Heat stack containing the "
"loadbalancer pool and members. An update will be triggered "
"on the root Stack if an unhealthy member is detected in the "
"loadbalancer pool."),
update_allowed=False,
required=True,
),
AUTOSCALING_GROUP_ID: properties.Schema(
properties.Schema.STRING,
_("ID of the Heat autoscaling group that contains the "
"loadbalancer members. Unhealthy members will be marked "
"as such before an update is triggered on the root stack."),
update_allowed=True,
required=True,
),
}
properties_schema.update(alarm_base.common_properties_schema)
def get_alarm_props(self, props):
"""Apply all relevant compatibility xforms."""
kwargs = self.actions_to_urls(props)
kwargs['type'] = self.alarm_type
for prop in (self.POOL, self.STACK, self.AUTOSCALING_GROUP_ID):
if prop in kwargs:
del kwargs[prop]
rule = {
self.POOL_ID: props[self.POOL],
self.STACK_ID: props[self.STACK],
self.AUTOSCALING_GROUP_ID: props[self.AUTOSCALING_GROUP_ID]
}
kwargs["loadbalancer_member_health_rule"] = rule
return kwargs
def translation_rules(self, properties):
translation_rules = [
translation.TranslationRule(
properties,
translation.TranslationRule.RESOLVE,
[self.POOL],
client_plugin=self.client_plugin('octavia'),
finder='get_pool'
),
]
return translation_rules
def handle_create(self):
props = self.get_alarm_props(self.properties)
props['name'] = self.physical_resource_name()
alarm = self.client().alarm.create(props)
self.resource_id_set(alarm['alarm_id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
new_props = json_snippet.properties(self.properties_schema,
self.context)
self.client().alarm.update(self.resource_id,
self.get_alarm_props(new_props))
def resource_mapping():
return {
'OS::Aodh::Alarm': AodhAlarm,
'OS::Aodh::CombinationAlarm': CombinationAlarm,
'OS::Aodh::EventAlarm': EventAlarm,
'OS::Aodh::LBMemberHealthAlarm': LBMemberHealthAlarm,
}
|
|
#!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synchronize the install_requires sections in all setup.py files with the currently
installed versions of all packages.
The two required params are the root to the DataONE Python software stack and the new
version number to use in the next release of the stack. We keep the version numbers for
all the packages in the d1_python repository in sync.
"""
import argparse
import logging
import os
import pkgutil
import re
import sys
import pkg_resources
import d1_dev.util
import d1_common.iter.path
import d1_common.util
import d1_common.utils.ulog
def main():
parser = argparse.ArgumentParser(
description="Sync the install_requires sections in setup.py files"
)
parser.add_argument("path", help="Root of Python source tree")
parser.add_argument("d1_version", help="Version to use for new D1 Py stack release")
parser.add_argument("--exclude", nargs="+", help="Exclude glob patterns")
parser.add_argument(
"--no-recursive",
dest="recursive",
action="store_false",
help="Search directories recursively",
)
parser.add_argument(
"--ignore-invalid", action="store_true", help="Ignore invalid paths"
)
parser.add_argument(
"--no-default-excludes",
dest="default_excludes",
action="store_false",
help="Don't add default glob exclude patterns",
)
parser.add_argument("--debug", action="store_true", help="Debug level logging")
parser.add_argument(
"--diff",
dest="show_diff",
action="store_true",
help="Show diff and do not modify any files",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Perform a trial run without changing any files",
)
args = parser.parse_args()
d1_common.utils.ulog.setup(args.debug)
for setup_path in d1_common.iter.path.path_generator(
path_list=[args.path],
include_glob_list=["setup.py"],
exclude_glob_list=args.exclude,
recursive=args.recursive,
ignore_invalid=args.ignore_invalid,
default_excludes=args.default_excludes,
):
try:
update_deps_on_file(args, setup_path, args.show_diff, args.d1_version)
except Exception as e:
logging.error(str(e))
update_version_const(
"d1_common", ["const.py"], args.d1_version, args.show_diff, args.dry_run
)
update_version_const(
"d1_gmn", ["version.py"], args.d1_version, args.show_diff, args.dry_run
)
update_version_const(
"d1_cli", ["version.py"], args.d1_version, args.show_diff, args.dry_run
)
def update_deps_on_file(args, setup_path, show_diff, d1_version):
logging.info('Updating setup.py... path="{}"'.format(setup_path))
try:
r = d1_dev.util.redbaron_module_path_to_tree(setup_path)
r = update_deps_on_tree(r, d1_version)
except Exception as e:
logging.error('Update failed. error="{}" path="{}"'.format(str(e), setup_path))
if args.debug:
raise
else:
d1_dev.util.update_module_file(r, setup_path, show_diff, dry_run=args.dry_run)
def update_deps_on_tree(r, d1_version):
r = update_install_requires(r, d1_version)
r = update_version(r, d1_version)
return r
def update_install_requires(r, d1_version):
dep_node = find_call_argument_node(r, "install_requires")
for str_node in dep_node.value:
# logging.debug(str_node.help(True))
update_dep_str(str_node, d1_version)
return r
def update_version(r, d1_version):
n = find_call_argument_node(r, "version")
n.value = "'{}'".format(d1_version)
return r
def find_call_argument_node(r, value_str):
node_list = r("CallArgumentNode")
for n in node_list:
if hasattr(n.target, "value") and n.target.value == value_str:
return n
raise UpdateException('CallArgumentNode not found. value="{}"'.format(value_str))
def update_dep_str(str_node, d1_version):
try:
package_name, old_version_str = parse_dep_str(str_node.value)
except UpdateException as e:
logging.debug(
'Dependency not updated. dep="{}" cause="{}"'.format(str_node.value, str(e))
)
else:
new_version_str = get_package_version(package_name, d1_version)
old_str_node_str = str_node.value
new_str_node_str = '"{} >= {}"'.format(package_name, new_version_str)
if old_str_node_str != new_str_node_str:
str_node.value = new_str_node_str
logging.debug(
'Dependency updated. package="{}" old="{}" new="{}"'.format(
package_name, old_version_str, new_version_str
)
)
else:
logging.debug(
'Dependency update not required. package="{}" version="{}"'.format(
package_name, old_version_str
)
)
def parse_dep_str(dep_str):
m = re.match(r"(.*)\s*>=\s*(.*)", dep_str)
if not m:
raise UpdateException('Dependency not set to ">="')
return m.group(1).strip("'\" "), m.group(2).strip("'\" ")
def get_package_version(package_name, d1_version):
if package_name.startswith("dataone."):
return d1_version
else:
return pkg_resources.get_distribution(package_name).version
def update_version_const(base_name, path_list, d1_version, only_diff, dry_run):
module_path = get_module_path(base_name, path_list)
logging.debug('Updating version in module. path="{}"'.format(module_path))
r = d1_dev.util.redbaron_module_path_to_tree(module_path)
for n in r("AssignmentNode"):
if n.target.value in ("VERSION", "__version__"):
n.value.value = "'{}'".format(d1_version)
d1_dev.util.update_module_file(r, module_path, only_diff, dry_run)
break
def get_module_path(base_str, path_list):
return os.path.join(os.path.split(pkgutil.get_loader(base_str).path)[0], *path_list)
class UpdateException(Exception):
pass
if __name__ == "__main__":
sys.exit(main())
|
|
#!/usr/bin/python
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""
Responsible for generating the testing decoders based on
parsed table representations.
"""
# This file generates testing code for our class decoder. The decoder
# tables are specifically written to minimize the number of decoder
# classes needed to parse valid ARM instructions. For testing, this is
# a problem. We can't (easily) tell if the intended instruction rules
# of ARM are being met, since there is not a one-to-one mapping from
# class decoders to rules.
#
# For example, consider the following two rows (from armv7.table):
#
# | 0011x - = Binary4RegisterShiftedOp => Defs12To15RdRnRsRmNotPc
# Rsb_Rule_144_A1_P288
# cccc0000011snnnnddddssss0tt1mmmm
# RegsNotPc
# | 0100x - = Binary4RegisterShiftedOp => Defs12To15RdRnRsRmNotPc
# Add_Rule_7_A1_P26
# cccc0000100snnnnddddssss0tt1mmmm
# RegsNotPc
#
# Both rows state to return a Binary4RegisterShiftedOp class decoder.
# The sequence of four symbols correspond to (in order presented):
#
# baseline - The name of the class decoder that should be used for testing.
# actual - The name of the class decoder to use in sel_ldr
# rule - A unique name identifying the rule from the manual that
# defines what the selected class decoder is to decode.
# pattern - The sequence of bits defines by the rule (above)
# constraints - Any additional constraints assumed by the rule.
#
# All but the baseline is optional. The remaining fields provide
# additional documentation and information for testing (which is used
# by this file). If the actual is not specified (prefixed by '=>')
# then it is assumed to have the same value as the baseline.
#
# If these two rows had a mergable bit pattern (which they do not),
# these rows would still not mergable since the actions are
# different. However, for sel_ldr, they both state to use a
# Binary4RegisterShiftedOp. The remaining identifiers are added data
# for testing only.
#
# We fix this by defining a notion of "action_filter" where one can
# choose to keep only those fields that are applicable. For sel_ldr,
# it's only 'actual'. For testing, it will include other fields,
# depending on the context.
#
# Note: The current ARM instruction table has both new and old
# actions. Old actions only define the 'InstClass' entry. If the
# remaining fields are omitted, the corresponding testing for those
# entries are omitted.
#
# Note: See dgen_decoder_output.py for more details on how we build a
# decoder for sel_ldr.
#
# For testing, we would like to know the specific instruction rule
# that was being tested. Further, we would like to know what
# instruction rule was chosen for each decoder class selection made by
# the parse tables. To do this, we do two levels of wrapping.
#
# This file generates a set of wrapper classes, each a subclass of
# NamedClassDecoder. One is generated for each InstClass needed by
# sel_ldr (i.e. only the 'actual' field). These named classes correspond
# to what sel_ldr will select.
#
# The named version of each named InstClass is:
#
# class NamedInstClass : public NamedClassDecoder {
# public:
# NamedInstClass()
# : NamedClassDecoder(decoder_, "InstClass")
# {}
#
# private:
# Binary3RegisterShiftedTest decoder_;
# NACL_DISALLOW_COPY_AND_ASSIGN(NamedInstClass);
#};
#
# This makes sure that each decoder class can be identified using a
# separate class decoder. For rows without rules, the corresponding
# named class 'NamedInstClass' will be used. If a row also has
# a rule, the 'NamedInstClass' is converted to 'NamedRuleInstClass' where
# 'Rule' is the name of the rule.
#
# The base class for NamedClassDecoder is specified in
# "named_class_decoder.h". This file defines a class that takes a
# ClassDecoder (reference) C and a print name NAME, and builds a
# corresponding ClassDecoder that acts like C, but will print out
# NAME. The behaviour of C is maintained by dispatching each virtual
# on the NamedClassDecoder to the corresponding virtual on C.
#
# We then define the class decoder Decoder, by defining a derived
# instance of DecoderState as follows:
#
# class NamedDecoder : DecoderState {
# public:
# explicit NamedDecoder();
# const NamedClassDecoder& decode_named(const Instruction) const;
# virtual const ClassDecoder& decode(const Instruction) const;
# ...
# };
#
# The method decode is the expected API for the NamedDecoder, which is
# an instance of DecoderState (defined in decode.h). The method
# decode_named is the same, but returns NamedClassDecoder's so that
# good error messages can be generated by the test harnesses for
# ClassDecoder's (see decoder_tester.h for more details on
# ClassDecoder test harnesses).
#
# To the NamedDecoder, we add a constant field NamedClassDecoder for
# each possible class decoder method decode_named could return, or
# that we could use in automatically generated tests. These fields
# allow us to only create the corresponding decoder classes once
# (during constructor initialization).
#
# Finally, we add a method corresponding to each defined decoder
# table. The forms of these decoders is:
#
# inline const NamedClassDecoder& decode_TABLE(
# const nacl_arm_dec::Instruction inst) const;
#
# Each of these methods are defined as inline methods so that they can
# be optimized away in the corresponding top level methods (i.e.
# decode_named and decode).
#
# For testing, there are three files generated:
#
# decoder_named_classes.h
# decoder_named_decoder.h
# decoder_named.cc
# decoder_tests.cc
#
# File decoder_named_classes.h defines the class declarations for the
# generated Rule classes, and named class decoder classes. File
# decoder_named_decoder.h defines the decoder class NamedDecoder
# (discussed above). decoder_named.cc contains the corresponding
# implementations of the constructors and methods of these classes.
#
# decoder_tests.cc generates an automatic test harness executable,
# that will test each instruction Rule. Each test generates all
# possible matches the the corresponding Pattern of the table rule,
# and calls the corresponding tester associated with the class decoder
# of that row. By default, the tester is presumed to be named.
#
# InstClassTester
#
# If the row defines a Constraints identifier, then the tester
#
# InstClassTesterConstraints
#
# is used instead.
import dgen_core
import dgen_opt
import dgen_output
import dgen_decoder
import dgen_actuals
import dgen_baselines
"""The current command line arguments to use"""
_cl_args = {}
# The following defines naming conventions used for identifiers.
# Note: DECODER will be replaced by 'actual' and 'baseline', defining
# how both types of symbols are generated.
CLASS = '%(DECODER)s_%(rule)s'
NAMED_CLASS = 'Named%(DECODER)s_%(rule)s'
INSTANCE = '%(DECODER_class)s_instance_'
BASE_TESTER='%(decoder_base)sTester%(base_test_case)s'
BASE_BASE_TESTER='%(decoder_base)sTester%(qualifier)s'
DECODER_TESTER='%(baseline)sTester_%(test_case)s'
def _safety_to_check(safety):
return [s for s in safety if not isinstance(s, str)]
def _interesting_patterns(patterns):
""" Filters out non-interesting patterns."""
# Only include rows not corresponding to rule pattern,
# and not always true.
return [ p for p in patterns if (
(not p.column or p.column.name() != '$pattern')
and not p.matches_any())]
def _install_action(decoder, action, values):
"""Install common names needed to generate code for the given action,
and adds it to the values map.
"""
# This code is somewhat inefficient in that most cases, most of the
# added strings are not needed. On the other hand, by having a
# single routine that generates all action specific names at one
# spot, it is much easier to change definitions.
values['baseline'] = action.baseline()
values['actual'] = action.actual()
values['decoder_base'] = decoder.base_class(values['baseline'])
values['rule'] = action.rule()
values['qualifier'] = ''.join([s for s in action.safety()
if isinstance(s, str)])
if action.constraints():
values['qualifier'] += (action.constraints().other
if action.constraints().other else '')
else:
values['qualifier'] =''
values['pattern'] = action.pattern()
# Add dummies for row cases, in case not set up. See
# function _install_row_cases) for more details on these fields.
for field in [ 'base_test_case', 'test_case', 'test_pattern' ]:
if not values.get(field):
values[field] = ''
values['baseline_class'] = _decoder_replace(CLASS, 'baseline') % values
values['actual_class'] = _decoder_replace(CLASS, 'actual') % values
_install_baseline_and_actuals('named_DECODER_class', NAMED_CLASS, values)
_install_baseline_and_actuals('DECODER_instance', INSTANCE, values)
values['base_tester'] = BASE_TESTER % values
values['base_base_tester'] = BASE_BASE_TESTER % values
values['decoder_tester'] = DECODER_TESTER % values
def _decoder_replace(string, basis):
return string.replace('DECODER', basis)
def _install_key_pattern(key, pattern, basis, values):
# Replace DECODER in key and pattern with basis, then
# install into values.
values[_decoder_replace(key, basis)] = (
_decoder_replace(pattern, basis) % values)
def _install_baseline_and_actuals(key, pattern, values):
# Replace DECODER with 'baseline' and 'actual', apply it
# to the key and pattern, and then install into values.
for basis in ['baseline', 'actual']:
_install_key_pattern(key, pattern, basis, values)
def _generate_baseline_and_actual(code, symbol, decoder,
values, out, actions=['rule']):
""" Generates code to define the given symbol. Does so for both
baseline and actual decoders, filtering using actions.
code - The code to generate.
symbol - The symbol being defined.
decoder - The decoder (tables) to use.
values - The name map to use to generate code.
actions - The fields to keep when generating code.
"""
generated_symbols = set()
# Generate one for each type of basline decoder.
baseline_actions = actions[:]
baseline_actions.insert(0, 'baseline');
baseline_code = _decoder_replace(code, 'baseline')
baseline_symbol = _decoder_replace(symbol, 'baseline');
for d in decoder.action_filter(baseline_actions).decoders():
_install_action(decoder, d, values);
sym_name = (baseline_symbol % values)
if sym_name not in generated_symbols:
out.write(baseline_code % values)
generated_symbols.add(sym_name)
# Generate one for each actual type that is different than the
# baseline.
actual_actions = actions[:]
actual_actions.insert(0, 'actual-not-baseline')
actual_code = _decoder_replace(code, 'actual')
actual_symbol = _decoder_replace(symbol, 'actual')
for d in decoder.action_filter(actual_actions).decoders():
# Note: 'actual-not-baseline' sets actual to None if same as baseline.
if d.actual():
_install_action(decoder, d, values);
sym_name = (actual_symbol % values)
if sym_name not in generated_symbols:
out.write(actual_code % values)
generated_symbols.add(sym_name)
# Defines the header for decoder_bases.h
NAMED_BASES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
#include "%(FILENAME_BASE)s_baselines.h"
namespace nacl_arm_test {
"""
GENERATED_BASELINE_HEADER="""
/*
* Define named class decoders for each automatically generated baseline
* decoder.
*/
"""
NAMED_GEN_BASE_DECLARE="""class Named%(gen_base)s
: public NamedClassDecoder {
public:
Named%(gen_base)s()
: NamedClassDecoder(decoder_, "%(gen_base)s")
{}
private:
nacl_arm_dec::%(gen_base)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(Named%(gen_base)s);
};
"""
NAMED_BASES_H_FOOTER="""
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
NAMED_BASES_H_SUFFIX = '_named_bases.h'
def generate_named_bases_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for testing generated baselines.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith(NAMED_BASES_H_SUFFIX)
_cl_args = cl_args
decoder = dgen_baselines.AddBaselinesToDecoder(decoder)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len(NAMED_BASES_H_SUFFIX)],
'decoder_name': decoder_name,
}
out.write(NAMED_BASES_H_HEADER % values)
_generate_generated_baseline(decoder, out)
out.write(NAMED_BASES_H_FOOTER % values)
def _generate_generated_baseline(decoder, out):
""" Generates code to define the given symbol. Does so for
the generated baseline decoders, filtering using actions.
"""
generated_symbols = set()
values = {}
out.write(GENERATED_BASELINE_HEADER % values)
for d in decoder.action_filter(['generated_baseline']).decoders():
gen_base = d.find('generated_baseline')
if gen_base and gen_base not in generated_symbols:
values['gen_base'] = gen_base
out.write(NAMED_GEN_BASE_DECLARE % values)
generated_symbols.add(gen_base)
# Defines the header for decoder_named_classes.h
NAMED_CLASSES_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
#include "%(FILENAME_BASE)s_actuals.h"
#include "%(FILENAME_BASE)s_named_bases.h"
"""
RULE_CLASSES_HEADER="""
/*
* Define rule decoder classes.
*/
namespace nacl_arm_dec {
"""
RULE_CLASS="""class %(DECODER_class)s
: public %(DECODER)s {
};
"""
RULE_CLASS_SYM="%(DECODER_class)s"
NAMED_DECODERS_HEADER="""} // nacl_arm_dec
namespace nacl_arm_test {
/*
* Define named class decoders for each class decoder.
* The main purpose of these classes is to introduce
* instances that are named specifically to the class decoder
* and/or rule that was used to parse them. This makes testing
* much easier in that error messages use these named classes
* to clarify what row in the corresponding table was used
* to select this decoder. Without these names, debugging the
* output of the test code would be nearly impossible
*/
"""
NAMED_CLASS_DECLARE="""class %(named_DECODER_class)s
: public NamedClassDecoder {
public:
%(named_DECODER_class)s()
: NamedClassDecoder(decoder_, "%(DECODER)s %(rule)s")
{}
private:
nacl_arm_dec::%(DECODER_class)s decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(%(named_DECODER_class)s);
};
"""
NAMED_CLASS_DECLARE_SYM="%(named_DECODER_class)s"
NAMED_CLASSES_H_FOOTER="""
// Defines the default parse action if the table doesn't define
// an action.
class NotImplementedNamed : public NamedClassDecoder {
public:
NotImplementedNamed()
: NamedClassDecoder(decoder_, "not implemented")
{}
private:
nacl_arm_dec::NotImplemented decoder_;
NACL_DISALLOW_COPY_AND_ASSIGN(NotImplementedNamed);
};
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_named_classes_h(decoder, decoder_name, filename, out, cl_args):
"""Defines named classes needed for decoder testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_classes.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_classes.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_CLASSES_H_HEADER % values)
out.write(RULE_CLASSES_HEADER)
_generate_baseline_and_actual(RULE_CLASS, RULE_CLASS_SYM,
decoder, values, out)
out.write(NAMED_DECODERS_HEADER)
_generate_baseline_and_actual(NAMED_CLASS_DECLARE, NAMED_CLASS_DECLARE_SYM,
decoder, values, out)
out.write(NAMED_CLASSES_H_FOOTER % values)
NAMED_DECODER_H_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#ifndef %(IFDEF_NAME)s
#define %(IFDEF_NAME)s
#include "native_client/src/trusted/validator_arm/decode.h"
#include "%(FILENAME_BASE)s_named_classes.h"
#include "native_client/src/trusted/validator_arm/named_class_decoder.h"
namespace nacl_arm_test {
// Defines a (named) decoder class selector for instructions
class Named%(decoder_name)s : nacl_arm_dec::DecoderState {
public:
explicit Named%(decoder_name)s();
// Parses the given instruction, returning the named class
// decoder to use.
const NamedClassDecoder& decode_named(
const nacl_arm_dec::Instruction) const;
// Parses the given instruction, returning the class decoder
// to use.
virtual const nacl_arm_dec::ClassDecoder& decode(
const nacl_arm_dec::Instruction) const;
// The following fields define the set of class decoders
// that can be returned by the API function "decode_named". They
// are created once as instance fields, and then returned
// by the table methods above. This speeds up the code since
// the class decoders need to only be bulit once (and reused
// for each call to "decode_named")."""
DECODER_STATE_FIELD="""
const %(named_DECODER_class)s %(DECODER_instance)s;"""
DECODER_STATE_FIELD_NAME="%(named_DECODER_class)s"
DECODER_STATE_DECODER_COMMENTS="""
private:
// The following list of methods correspond to each decoder table,
// and implements the pattern matching of the corresponding bit
// patterns. After matching the corresponding bit patterns, they
// either call other methods in this list (corresponding to another
// decoder table), or they return the instance field that implements
// the class decoder that should be used to decode the particular
// instruction."""
DECODER_STATE_DECODER="""
inline const NamedClassDecoder& decode_%(table)s(
const nacl_arm_dec::Instruction inst) const;"""
NAMED_DECODER_H_FOOTER="""
// Defines default action if parse tables don't define what action
// to take.
const NotImplementedNamed not_implemented_;
};
} // namespace nacl_arm_test
#endif // %(IFDEF_NAME)s
"""
def generate_named_decoder_h(decoder, decoder_name, filename, out, cl_args):
"""Generates the named decoder for testing.
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('_named_decoder.h')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'IFDEF_NAME' : dgen_output.ifdef_name(filename),
'FILENAME_BASE': filename[:-len('_named_decoder.h')],
'decoder_name': decoder_name,
}
out.write(NAMED_DECODER_H_HEADER % values)
_generate_baseline_and_actual(DECODER_STATE_FIELD, DECODER_STATE_FIELD_NAME,
decoder, values, out)
out.write(DECODER_STATE_DECODER_COMMENTS)
for table in decoder.tables():
values['table'] = table.name
out.write(DECODER_STATE_DECODER % values)
out.write(NAMED_DECODER_H_FOOTER % values)
# Defines the source for DECODER_named.cc
NAMED_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#include "%(FILENAME_BASE)s_decoder.h"
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Instruction;
namespace nacl_arm_test {
Named%(decoder_name)s::Named%(decoder_name)s()
{}
"""
PARSE_TABLE_METHOD_HEADER="""
/*
* Implementation of table %(table_name)s.
* Specified by: %(citation)s
*/
const NamedClassDecoder& Named%(decoder_name)s::decode_%(table_name)s(
const nacl_arm_dec::Instruction inst) const {
"""
METHOD_HEADER_TRACE="""
fprintf(stderr, "decode %(table_name)s\\n");
"""
METHOD_DISPATCH_BEGIN="""
if (%s"""
METHOD_DISPATCH_CONTINUE=""" &&
%s"""
METHOD_DISPATCH_END=") {"""
METHOD_DISPATCH_TRACE="""
fprintf(stderr, "count = %s\\n");"""
PARSE_TABLE_METHOD_ROW="""
return %(action)s;
"""
METHOD_DISPATCH_CLOSE=""" }
"""
PARSE_TABLE_METHOD_FOOTER="""
// Catch any attempt to fall through...
return not_implemented_;
}
"""
NAMED_CC_FOOTER="""
const NamedClassDecoder& Named%(decoder_name)s::
decode_named(const nacl_arm_dec::Instruction inst) const {
return decode_%(entry_table_name)s(inst);
}
const nacl_arm_dec::ClassDecoder& Named%(decoder_name)s::
decode(const nacl_arm_dec::Instruction inst) const {
return decode_named(inst).named_decoder();
}
} // namespace nacl_arm_test
"""
def generate_named_cc(decoder, decoder_name, filename, out, cl_args):
"""Implementation of the test decoder in .cc file
Args:
tables: list of Table objects to process.
decoder_name: The name of the decoder state to build.
filename: The (localized) name for the .h file.
out: a COutput object to write to.
cl_args: A dictionary of additional command line arguments.
"""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
assert filename.endswith('.cc')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'FILENAME_BASE' : filename[:-len('.cc')],
'decoder_name': decoder_name,
'entry_table_name': decoder.primary.name,
}
out.write(NAMED_CC_HEADER % values)
_generate_decoder_method_bodies(decoder, values, out)
out.write(NAMED_CC_FOOTER % values)
def _generate_decoder_method_bodies(decoder, values, out):
global _cl_args
for table in decoder.tables():
# Add the default row as the last in the optimized row, so that
# it is applied if all other rows do not.
opt_rows = sorted(
dgen_opt.optimize_rows(
table.action_filter(['baseline', 'rule']).rows(False)))
if table.default_row:
opt_rows.append(table.default_row)
opt_rows = table.add_column_to_rows(opt_rows)
print ("Table %s: %d rows minimized to %d"
% (table.name, len(table.rows()), len(opt_rows)))
values['table_name'] = table.name
values['citation'] = table.citation,
out.write(PARSE_TABLE_METHOD_HEADER % values)
if _cl_args.get('trace') == 'True':
out.write(METHOD_HEADER_TRACE % values)
# Add message to stop compilation warnings if this table
# doesn't require subtables to select a class decoder.
if not table.methods():
out.write(" UNREFERENCED_PARAMETER(inst);")
count = 0
for row in opt_rows:
count = count + 1
if row.action.__class__.__name__ == 'DecoderAction':
_install_action(decoder, row.action, values)
action = '%(baseline_instance)s' % values
elif row.action.__class__.__name__ == 'DecoderMethod':
action = 'decode_%s(inst)' % row.action.name
else:
raise Exception('Bad table action: %s' % row.action)
# Each row consists of a set of bit patterns defining if the row
# is applicable. Convert this into a sequence of anded C test
# expressions. For example, convert the following pair of bit
# patterns:
#
# xxxx1010xxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxxxxxxxxxxxxxxxxxxxx0101
#
# Each instruction is masked to get the the bits, and then
# tested against the corresponding expected bits. Hence, the
# above example is converted to:
#
# ((inst & 0x0F000000) != 0x0C000000) &&
# ((inst & 0x0000000F) != 0x00000005)
out.write(METHOD_DISPATCH_BEGIN %
row.patterns[0].to_commented_bool())
for p in row.patterns[1:]:
out.write(METHOD_DISPATCH_CONTINUE % p.to_commented_bool())
out.write(METHOD_DISPATCH_END)
if _cl_args.get('trace') == 'True':
out.write(METHOD_DISPATCH_TRACE % count)
values['action'] = action
out.write(PARSE_TABLE_METHOD_ROW % values)
out.write(METHOD_DISPATCH_CLOSE)
out.write(PARSE_TABLE_METHOD_FOOTER % values)
# Define the source for DECODER_tests.cc
TEST_CC_HEADER="""%(FILE_HEADER)s
%(NOT_TCB_MESSAGE)s
#include "gtest/gtest.h"
#include "native_client/src/trusted/validator_arm/actual_vs_baseline.h"
#include "native_client/src/trusted/validator_arm/baseline_vs_baseline.h"
#include "native_client/src/trusted/validator_arm/actual_classes.h"
#include "native_client/src/trusted/validator_arm/baseline_classes.h"
#include "native_client/src/trusted/validator_arm/inst_classes_testers.h"
#include "native_client/src/trusted/validator_arm/arm_helpers.h"
#include "native_client/src/trusted/validator_arm/gen/arm32_decode_named_bases.h"
using nacl_arm_dec::Instruction;
using nacl_arm_dec::ClassDecoder;
using nacl_arm_dec::Register;
using nacl_arm_dec::RegisterList;
namespace nacl_arm_test {
// The following classes are derived class decoder testers that
// add row pattern constraints and decoder restrictions to each tester.
// This is done so that it can be used to make sure that the
// corresponding pattern is not tested for cases that would be excluded
// due to row checks, or restrictions specified by the row restrictions.
"""
CONSTRAINT_TESTER_CLASS_HEADER="""
// %(row_comment)s
class %(base_tester)s
: public %(base_base_tester)s {
public:
%(base_tester)s(const NamedClassDecoder& decoder)
: %(base_base_tester)s(decoder) {}"""
CONSTRAINT_TESTER_RESTRICTIONS_HEADER="""
virtual bool PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_SANITY_HEADER="""
virtual bool ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder);"""
CONSTRAINT_TESTER_CLASS_CLOSE="""
};
"""
CONSTRAINT_TESTER_PARSE_HEADER="""
bool %(base_tester)s
::PassesParsePreconditions(
nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {"""
ROW_CONSTRAINTS_HEADER="""
// Check that row patterns apply to pattern being checked.'"""
PATTERN_CONSTRAINT_RESTRICTIONS_HEADER="""
// Check pattern restrictions of row."""
CONSTRAINT_CHECK="""
// %(comment)s
if (%(code)s) return false;"""
CONSTRAINT_TESTER_CLASS_FOOTER="""
// Check other preconditions defined for the base decoder.
return %(base_base_tester)s::
PassesParsePreconditions(inst, decoder);
}
"""
SAFETY_TESTER_HEADER="""
bool %(base_tester)s
::ApplySanityChecks(nacl_arm_dec::Instruction inst,
const NamedClassDecoder& decoder) {
NC_PRECOND(%(base_base_tester)s::
ApplySanityChecks(inst, decoder));"""
SAFETY_TESTER_CHECK="""
// safety: %(comment)s
EXPECT_TRUE(%(code)s);"""
DEFS_SAFETY_CHECK="""
// defs: %(comment)s;
EXPECT_TRUE(decoder.defs(inst).IsSame(%(code)s));"""
SAFETY_TESTER_FOOTER="""
return true;
}
"""
TESTER_CLASS_HEADER="""
// The following are derived class decoder testers for decoder actions
// associated with a pattern of an action. These derived classes introduce
// a default constructor that automatically initializes the expected decoder
// to the corresponding instance in the generated DecoderState.
"""
TESTER_CLASS="""
// %(row_comment)s
class %(decoder_tester)s
: public %(base_tester)s {
public:
%(decoder_tester)s()
: %(base_tester)s(
state_.%(baseline_instance)s)
{}
};
"""
TEST_HARNESS="""
// Defines a gtest testing harness for tests.
class %(decoder_name)sTests : public ::testing::Test {
protected:
%(decoder_name)sTests() {}
};
// The following functions test each pattern specified in parse
// decoder tables.
"""
TEST_FUNCTION_ACTUAL_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s baseline_tester;
%(named_actual_class)s actual;
ActualVsBaselineTester a_vs_b_tester(actual, baseline_tester);
a_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s tester;
tester.Test("%(pattern)s");
}
"""
TEST_FUNCTION_BASELINE_VS_BASELINE="""
// %(row_comment)s
TEST_F(%(decoder_name)sTests,
BvB_%(decoder_tester)s_Test%(test_pattern)s) {
%(decoder_tester)s old_baseline_tester;
Named%(gen_decoder)s gen_baseline;
BaselineVsBaselineTester b_vs_b_tester(gen_baseline, old_baseline_tester);
b_vs_b_tester.Test("%(pattern)s");
}
"""
TEST_CC_FOOTER="""
} // namespace nacl_arm_test
int main(int argc, char* argv[]) {
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
"""
def generate_tests_cc(decoder, decoder_name, out, cl_args, tables):
"""Generates pattern tests for the rows in the given list of tables
in the given decoder."""
global _cl_args
if not decoder.primary: raise Exception('No tables provided.')
_cl_args = cl_args
# Generate actuals from descriptions in tables, for each of the
# tables that should automatically generate the corresponding
# needed actual class decoders.
actuals = cl_args.get('auto-actual')
if actuals:
decoder = dgen_actuals.AddAutoActualsToDecoder(decoder, actuals)
decoder = dgen_baselines.AddBaselinesToDecoder(decoder, tables)
baselines = cl_args.get('test-base')
if not baselines: baselines = []
decoder = _decoder_restricted_to_tables(decoder, tables)
values = {
'FILE_HEADER': dgen_output.HEADER_BOILERPLATE,
'NOT_TCB_MESSAGE' : dgen_output.NOT_TCB_BOILERPLATE,
'decoder_name': decoder_name,
}
out.write(TEST_CC_HEADER % values)
_generate_constraint_testers(decoder, values, out)
_generate_rule_testers(decoder, values, out)
out.write(TEST_HARNESS % values)
_generate_test_patterns_with_baseline_tests(decoder, values, out, baselines)
out.write(TEST_CC_FOOTER % values)
def _filter_test_action(action, with_patterns, with_rules):
"""Filters the actions to pull out relavant entries, based on whether we
want to include patterns and rules.
"""
action_fields = ['actual', 'baseline', 'generated_baseline',
'constraints'] + dgen_decoder.METHODS
if with_patterns:
action_fields += ['pattern' ]
if with_rules:
action_fields += ['rule']
return action.action_filter(action_fields)
def _filter_test_row(row, with_patterns=False, with_rules=True):
"""Filters a row t pulll out actions with relavant entries, based on
whether we want to include patterns and rules.
"""
return row.copy_with_action(
_filter_test_action(row.action, with_patterns, with_rules))
def _install_row_cases(row, values):
"""Installs row case names, based on values entries."""
# First define base testers that add row constraints and safety checks.
constraint_rows_map = values.get('constraint_rows')
if constraint_rows_map:
base_row = _filter_test_row(row, with_rules=False)
values['base_test_case'] = (
'Case%s' % constraint_rows_map[dgen_core.neutral_repr(base_row)])
else:
values['base_test_case'] = ''
# Add test decoders associated with the row in the table.
decoder_rows_map = values.get('decoder_rows')
if decoder_rows_map:
decoder_row = _filter_test_row(row)
values['test_case'] = (
'Case%s' % decoder_rows_map[dgen_core.neutral_repr(decoder_row)])
else:
values['test_case'] = ''
# Encorporate patterns with each row.
pattern_rows_map = values.get('test_rows')
if pattern_rows_map:
pattern_row = _filter_test_row(row, with_patterns=True)
values['test_pattern'] = (
'Case%s' % pattern_rows_map[dgen_core.neutral_repr(pattern_row)])
else:
values['test_pattern'] = ''
def _install_test_row(row, decoder, values,
with_patterns=False, with_rules=True):
"""Installs data associated with the given row into the values map.
Installs the baseline class, rule name, and constraints associated
with the row. If with_patterns is specified, then pattern information and
actual class information is also inserted.
"""
action = _filter_test_action(row.action, with_patterns, with_rules)
values['row_comment'] = dgen_output.commented_string(
repr(row.copy_with_action(action)))
_install_action(decoder, action, values)
return action
def _rows_to_test(decoder, values, with_patterns=False, with_rules=True):
"""Returns the rows of the decoder that define enough information
that testing can be done.
"""
generated_names = set()
rows = []
for table in decoder.tables():
for row in table.rows():
if (isinstance(row.action, dgen_core.DecoderAction) and
row.action.pattern()):
new_row = row.copy_with_action(
_install_test_row(row, decoder, values, with_patterns, with_rules))
constraint_tester = dgen_core.neutral_repr(new_row)
if constraint_tester not in generated_names:
generated_names.add(constraint_tester)
rows.append(new_row)
return sorted(rows)
def _row_filter_interesting_patterns(row):
"""Builds a copy of the row, removing uninteresting column patterns."""
return row.copy_with_patterns(_interesting_patterns(row.patterns))
def _generate_constraint_testers(decoder, values, out):
"""Generates the testers needed to implement the constraints
associated with each row having a pattern.
"""
rows = _rows_to_test(decoder, values, with_rules=False)
values['constraint_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values)
safety_to_check = _safety_to_check(action.safety())
defs_to_check = action.defs()
out.write(CONSTRAINT_TESTER_CLASS_HEADER % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_RESTRICTIONS_HEADER % values);
if safety_to_check or defs_to_check:
out.write(CONSTRAINT_TESTER_SANITY_HEADER % values)
out.write(CONSTRAINT_TESTER_CLASS_CLOSE % values)
if row.patterns or action.constraints().restrictions:
out.write(CONSTRAINT_TESTER_PARSE_HEADER % values)
if row.patterns:
out.write(ROW_CONSTRAINTS_HEADER % values);
for p in row.patterns:
not_p = p.negate()
values['comment'] = dgen_output.commented_string(repr(not_p), ' ')
values['code'] = not_p.to_bool()
out.write(CONSTRAINT_CHECK % values)
if action.constraints().restrictions:
out.write(PATTERN_CONSTRAINT_RESTRICTIONS_HEADER)
for c in action.constraints().restrictions:
not_c = c.negate()
values['comment'] = dgen_output.commented_string(repr(not_c), ' ')
values['code'] = not_c.to_bool()
out.write(CONSTRAINT_CHECK % values)
out.write(CONSTRAINT_TESTER_CLASS_FOOTER % values)
if safety_to_check or defs_to_check:
out.write(SAFETY_TESTER_HEADER % values)
for check in safety_to_check:
values['comment'] = dgen_output.commented_string(
repr(check), ' ')
values['code'] = check.to_bool()
out.write(SAFETY_TESTER_CHECK % values)
if defs_to_check:
values['comment'] = dgen_output.commented_string(
repr(defs_to_check), ' ')
values['code'] = defs_to_check.to_register_list()
out.write(DEFS_SAFETY_CHECK % values)
out.write(SAFETY_TESTER_FOOTER % values)
def _generate_rule_testers(decoder, values, out):
"""Generates the testers that tests the rule associated with
each row having a pattern.
"""
out.write(TESTER_CLASS_HEADER % values)
rows = _rows_to_test(decoder, values)
values['decoder_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
_install_test_row(row, decoder, values)
out.write(TESTER_CLASS % values)
def _decoder_restricted_to_tables(decoder, tables):
"""Returns a copy of the decoder, with only the given table names (
or all tables if no names are specified.
"""
if not tables:
return decoder
new_decoder = dgen_core.Decoder()
for tbl in [tbl for tbl in decoder.tables() if tbl.name in tables]:
new_decoder.add(tbl)
new_decoder.set_class_defs(decoder.get_class_defs())
return new_decoder
def _generate_test_patterns_with_baseline_tests(
decoder, values, out, baseline_test_tables):
_generate_test_patterns(decoder, values, out, False)
_generate_test_patterns(
_decoder_restricted_to_tables(decoder, baseline_test_tables),
values, out, True)
def _generate_test_patterns(decoder, values, out, add_baseline_tests):
"""Generates a test function for each row having a pattern associated
with the table row.
"""
rows = _rows_to_test(decoder, values, with_patterns=True)
values['test_rows'] = _index_neutral_map(rows)
for r in rows:
_install_row_cases(r, values)
row = _row_filter_interesting_patterns(r)
action = _install_test_row(row, decoder, values, with_patterns=True)
if add_baseline_tests:
if action.find('generated_baseline'):
values['gen_decoder'] = action.find('generated_baseline')
out.write(TEST_FUNCTION_BASELINE_VS_BASELINE % values)
elif action.actual() == action.baseline():
out.write(TEST_FUNCTION_BASELINE % values)
else:
out.write(TEST_FUNCTION_ACTUAL_VS_BASELINE % values)
def _index_neutral_map(values):
"""Returns a dictionary from each neutral_repr(value) in list
values, to its corresponding index. This is done to reduce the
number of compares to find the index, speeding up code
generation.
"""
lookup_map = {}
index = 0
for v in values:
lookup_map[dgen_core.neutral_repr(v)] = index
index += 1
return lookup_map
|
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import numpy as np
import scipy as sp
import jax
import jax.numpy as jnp
import pytest
from tensornetwork.backends.jax import jax_backend
import jax.config as config
import tensornetwork.backends.jax.jitted_functions as jitted_functions
# pylint: disable=no-member
config.update("jax_enable_x64", True)
np_randn_dtypes = [np.float32, np.float16, np.float64]
np_dtypes = np_randn_dtypes + [np.complex64, np.complex128]
np_not_half = [np.float32, np.float64, np.complex64, np.complex128]
def test_tensordot():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 3, 4)))
b = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.tensordot(a, b, ((1, 2), (1, 2)))
expected = np.array([[24.0, 24.0], [24.0, 24.0]])
np.testing.assert_allclose(expected, actual)
def test_tensordot_int():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(2 * np.ones((3, 3, 3)))
b = backend.convert_to_tensor(np.ones((3, 3, 3)))
actual = backend.tensordot(a, b, 1)
expected = jax.numpy.tensordot(a, b, 1)
np.testing.assert_allclose(expected, actual)
def test_reshape():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.shape_tuple(backend.reshape(a, np.array((6, 4, 1))))
assert actual == (6, 4, 1)
def test_transpose():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]))
actual = backend.transpose(a, [2, 0, 1])
expected = np.array([[[1.0, 3.0], [5.0, 7.0]], [[2.0, 4.0], [6.0, 8.0]]])
np.testing.assert_allclose(expected, actual)
def test_transpose_noperm():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]))
actual = backend.transpose(a) # [2, 1, 0]
actual = backend.transpose(actual, perm=[0, 2, 1])
expected = np.array([[[1.0, 3.0], [5.0, 7.0]], [[2.0, 4.0], [6.0, 8.0]]])
np.testing.assert_allclose(expected, actual)
def test_shape_concat():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(2 * np.ones((1, 3, 1)))
b = backend.convert_to_tensor(np.ones((1, 2, 1)))
expected = backend.shape_concat((a, b), axis=1)
actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]])
np.testing.assert_allclose(expected, actual)
def test_slice():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(
np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]))
actual = backend.slice(a, (1, 1), (2, 2))
expected = np.array([[5., 6.], [8., 9.]])
np.testing.assert_allclose(expected, actual)
def test_slice_raises_error():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(
np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]))
with pytest.raises(ValueError):
backend.slice(a, (1, 1), (2, 2, 2))
def test_shape_tensor():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(np.ones([2, 3, 4]))
assert isinstance(backend.shape_tensor(a), tuple)
actual = backend.shape_tensor(a)
expected = np.array([2, 3, 4])
np.testing.assert_allclose(expected, actual)
def test_shape_tuple():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(np.ones([2, 3, 4]))
actual = backend.shape_tuple(a)
assert actual == (2, 3, 4)
def test_shape_prod():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4]))
actual = np.array(backend.shape_prod(a))
assert actual == 2**24
def test_sqrt():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(np.array([4., 9.]))
actual = backend.sqrt(a)
expected = np.array([2, 3])
np.testing.assert_allclose(expected, actual)
def test_convert_to_tensor():
backend = jax_backend.JaxBackend()
array = np.ones((2, 3, 4))
actual = backend.convert_to_tensor(array)
expected = jax.jit(lambda x: x)(array)
assert isinstance(actual, type(expected))
np.testing.assert_allclose(expected, actual)
def test_outer_product():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 1)))
b = backend.convert_to_tensor(np.ones((1, 2, 2)))
actual = backend.outer_product(a, b)
expected = np.array([[[[[2.0, 2.0], [2.0, 2.0]]]], [[[[2.0, 2.0], [2.0,
2.0]]]]])
np.testing.assert_allclose(expected, actual)
def test_einsum():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 1)))
b = backend.convert_to_tensor(np.ones((1, 2, 2)))
actual = backend.einsum('ij,jil->l', a, b)
expected = np.array([4.0, 4.0])
np.testing.assert_allclose(expected, actual)
def test_convert_bad_test():
backend = jax_backend.JaxBackend()
with pytest.raises(TypeError, match="Expected"):
backend.convert_to_tensor(tf.ones((2, 2)))
def test_norm():
backend = jax_backend.JaxBackend()
a = backend.convert_to_tensor(np.ones((2, 2)))
assert backend.norm(a) == 2
@pytest.mark.parametrize("dtype", np_dtypes)
def test_eye(dtype):
backend = jax_backend.JaxBackend()
a = backend.eye(N=4, M=5, dtype=dtype)
np.testing.assert_allclose(np.eye(N=4, M=5, dtype=dtype), a)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_ones(dtype):
backend = jax_backend.JaxBackend()
a = backend.ones((4, 4), dtype=dtype)
np.testing.assert_allclose(np.ones((4, 4), dtype=dtype), a)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_zeros(dtype):
backend = jax_backend.JaxBackend()
a = backend.zeros((4, 4), dtype=dtype)
np.testing.assert_allclose(np.zeros((4, 4), dtype=dtype), a)
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_randn(dtype):
backend = jax_backend.JaxBackend()
a = backend.randn((4, 4), dtype=dtype)
assert a.shape == (4, 4)
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_random_uniform(dtype):
backend = jax_backend.JaxBackend()
a = backend.random_uniform((4, 4), dtype=dtype)
assert a.shape == (4, 4)
@pytest.mark.parametrize("dtype", [np.complex64, np.complex128])
def test_randn_non_zero_imag(dtype):
backend = jax_backend.JaxBackend()
a = backend.randn((4, 4), dtype=dtype)
assert np.linalg.norm(np.imag(a)) != 0.0
@pytest.mark.parametrize("dtype", [np.complex64, np.complex128])
def test_random_uniform_non_zero_imag(dtype):
backend = jax_backend.JaxBackend()
a = backend.random_uniform((4, 4), dtype=dtype)
assert np.linalg.norm(np.imag(a)) != 0.0
@pytest.mark.parametrize("dtype", np_dtypes)
def test_eye_dtype(dtype):
backend = jax_backend.JaxBackend()
a = backend.eye(N=4, M=4, dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", np_dtypes)
def test_ones_dtype(dtype):
backend = jax_backend.JaxBackend()
a = backend.ones((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", np_dtypes)
def test_zeros_dtype(dtype):
backend = jax_backend.JaxBackend()
a = backend.zeros((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_randn_dtype(dtype):
backend = jax_backend.JaxBackend()
a = backend.randn((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_random_uniform_dtype(dtype):
backend = jax_backend.JaxBackend()
a = backend.random_uniform((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_randn_seed(dtype):
backend = jax_backend.JaxBackend()
a = backend.randn((4, 4), seed=10, dtype=dtype)
b = backend.randn((4, 4), seed=10, dtype=dtype)
np.testing.assert_allclose(a, b)
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_random_uniform_seed(dtype):
backend = jax_backend.JaxBackend()
a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
b = backend.random_uniform((4, 4), seed=10, dtype=dtype)
np.testing.assert_allclose(a, b)
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def test_random_uniform_boundaries(dtype):
lb = 1.2
ub = 4.8
backend = jax_backend.JaxBackend()
a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype)
assert ((a >= 0).all() and (a <= 1).all() and (b >= lb).all() and
(b <= ub).all())
def test_random_uniform_behavior():
seed = 10
key = jax.random.PRNGKey(seed)
backend = jax_backend.JaxBackend()
a = backend.random_uniform((4, 4), seed=seed)
b = jax.random.uniform(key, (4, 4))
np.testing.assert_allclose(a, b)
def test_conj():
backend = jax_backend.JaxBackend()
real = np.random.rand(2, 2, 2)
imag = np.random.rand(2, 2, 2)
a = backend.convert_to_tensor(real + 1j * imag)
actual = backend.conj(a)
expected = real - 1j * imag
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", np_randn_dtypes)
def index_update(dtype):
backend = jax_backend.JaxBackend()
tensor = backend.randn((4, 2, 3), dtype=dtype, seed=10)
out = backend.index_update(tensor, tensor > 0.1, 0.0)
tensor = np.array(tensor)
tensor[tensor > 0.1] = 0.0
np.testing.assert_allclose(tensor, out)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_eigsh_valid_init_operator_with_shape(dtype):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
init = backend.randn((D,), dtype=dtype, seed=10)
tmp = backend.randn((D, D), dtype=dtype, seed=10)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, H):
return jax.numpy.dot(H, x)
eta1, U1 = backend.eigsh_lanczos(mv, [H], init)
eta2, U2 = np.linalg.eigh(H)
v2 = U2[:, 0]
v2 = v2 / sum(v2)
v1 = np.reshape(U1[0], (D))
v1 = v1 / sum(v1)
np.testing.assert_allclose(eta1[0], min(eta2))
np.testing.assert_allclose(v1, v2)
def test_eigsh_small_number_krylov_vectors():
backend = jax_backend.JaxBackend()
init = np.array([1, 1], dtype=np.float64)
H = np.array([[1, 2], [2, 4]], dtype=np.float64)
def mv(x, H):
return jax.numpy.dot(H, x)
eta1, _ = backend.eigsh_lanczos(mv, [H], init, numeig=1, num_krylov_vecs=2)
np.testing.assert_almost_equal(eta1, [0])
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_eigsh_lanczos_1(dtype):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
init = backend.randn((D,), dtype=dtype, seed=10)
tmp = backend.randn((D, D), dtype=dtype, seed=10)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, H):
return jax.numpy.dot(H, x)
eta1, U1 = backend.eigsh_lanczos(mv, [H], init)
eta2, U2 = np.linalg.eigh(H)
v2 = U2[:, 0]
v2 = v2 / sum(v2)
v1 = np.reshape(U1[0], (D))
v1 = v1 / sum(v1)
np.testing.assert_allclose(eta1[0], min(eta2))
np.testing.assert_allclose(v1, v2)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_eigsh_lanczos_2(dtype):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
tmp = backend.randn((D, D), dtype=dtype, seed=10)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, H):
return jax.numpy.dot(H, x)
eta1, U1 = backend.eigsh_lanczos(mv, [H], shape=(D,), dtype=dtype)
eta2, U2 = np.linalg.eigh(H)
v2 = U2[:, 0]
v2 = v2 / sum(v2)
v1 = np.reshape(U1[0], (D))
v1 = v1 / sum(v1)
np.testing.assert_allclose(eta1[0], min(eta2))
np.testing.assert_allclose(v1, v2)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("numeig", [1, 2, 3, 4])
def test_eigsh_lanczos_reorthogonalize(dtype, numeig):
backend = jax_backend.JaxBackend()
D = 24
np.random.seed(10)
tmp = backend.randn((D, D), dtype=dtype, seed=10)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, H):
return jax.numpy.dot(H, x)
eta1, U1 = backend.eigsh_lanczos(
mv, [H],
shape=(D,),
dtype=dtype,
numeig=numeig,
num_krylov_vecs=D,
reorthogonalize=True,
ndiag=1,
tol=1E-12,
delta=1E-12)
eta2, U2 = np.linalg.eigh(H)
np.testing.assert_allclose(eta1[0:numeig], eta2[0:numeig])
for n in range(numeig):
v2 = U2[:, n]
v2 /= np.sum(v2) #fix phases
v1 = np.reshape(U1[n], (D))
v1 /= np.sum(v1)
np.testing.assert_allclose(v1, v2, rtol=1E-5, atol=1E-5)
def test_eigsh_lanczos_raises():
backend = jax_backend.JaxBackend()
with pytest.raises(
ValueError, match='`num_krylov_vecs` >= `numeig` required!'):
backend.eigsh_lanczos(lambda x: x, numeig=10, num_krylov_vecs=9)
with pytest.raises(
ValueError,
match="Got numeig = 2 > 1 and `reorthogonalize = False`. "
"Use `reorthogonalize=True` for `numeig > 1`"):
backend.eigsh_lanczos(lambda x: x, numeig=2, reorthogonalize=False)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
backend.eigsh_lanczos(lambda x: x, shape=(10,), dtype=None)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
backend.eigsh_lanczos(lambda x: x, shape=None, dtype=np.float64)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
backend.eigsh_lanczos(lambda x: x)
with pytest.raises(
TypeError, match="Expected a `jax.array`. Got <class 'list'>"):
backend.eigsh_lanczos(lambda x: x, initial_state=[1, 2, 3])
@pytest.mark.parametrize("dtype", np_dtypes)
def test_index_update(dtype):
backend = jax_backend.JaxBackend()
tensor = backend.randn((4, 2, 3), dtype=dtype, seed=10)
out = backend.index_update(tensor, tensor > 0.1, 0.0)
np_tensor = np.array(tensor)
np_tensor[np_tensor > 0.1] = 0.0
np.testing.assert_allclose(out, np_tensor)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_broadcast_right_multiplication(dtype):
backend = jax_backend.JaxBackend()
tensor1 = backend.randn((2, 3), dtype=dtype, seed=10)
tensor2 = backend.randn((3,), dtype=dtype, seed=10)
out = backend.broadcast_right_multiplication(tensor1, tensor2)
np.testing.assert_allclose(out, np.array(tensor1) * np.array(tensor2))
def test_broadcast_right_multiplication_raises():
backend = jax_backend.JaxBackend()
tensor1 = backend.randn((2, 3))
tensor2 = backend.randn((3, 3))
with pytest.raises(ValueError):
backend.broadcast_right_multiplication(tensor1, tensor2)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_broadcast_left_multiplication(dtype):
backend = jax_backend.JaxBackend()
tensor1 = backend.randn((3,), dtype=dtype, seed=10)
tensor2 = backend.randn((3, 4, 2), dtype=dtype, seed=10)
out = backend.broadcast_left_multiplication(tensor1, tensor2)
np.testing.assert_allclose(out, np.reshape(tensor1, (3, 1, 1)) * tensor2)
def test_broadcast_left_multiplication_raises():
dtype = np.float64
backend = jax_backend.JaxBackend()
tensor1 = backend.randn((3, 3), dtype=dtype, seed=10)
tensor2 = backend.randn((2, 4, 3), dtype=dtype, seed=10)
with pytest.raises(ValueError):
backend.broadcast_left_multiplication(tensor1, tensor2)
def test_sparse_shape():
dtype = np.float64
backend = jax_backend.JaxBackend()
tensor = backend.randn((2, 3, 4), dtype=dtype, seed=10)
np.testing.assert_allclose(backend.sparse_shape(tensor), tensor.shape)
@pytest.mark.parametrize("dtype,method", [(np.float64, "sin"),
(np.complex128, "sin"),
(np.float64, "cos"),
(np.complex128, "cos"),
(np.float64, "exp"),
(np.complex128, "exp"),
(np.float64, "log"),
(np.complex128, "log")])
def test_elementwise_ops(dtype, method):
backend = jax_backend.JaxBackend()
tensor = backend.randn((4, 3, 2), dtype=dtype, seed=10)
if method == "log":
tensor = np.abs(tensor)
tensor1 = getattr(backend, method)(tensor)
tensor2 = getattr(np, method)(tensor)
np.testing.assert_almost_equal(tensor1, tensor2)
@pytest.mark.parametrize("dtype,method", [(np.float64, "expm"),
(np.complex128, "expm")])
def test_matrix_ops(dtype, method):
backend = jax_backend.JaxBackend()
matrix = backend.randn((4, 4), dtype=dtype, seed=10)
matrix1 = getattr(backend, method)(matrix)
matrix2 = getattr(sp.linalg, method)(matrix)
np.testing.assert_almost_equal(matrix1, matrix2)
@pytest.mark.parametrize("dtype,method", [(np.float64, "expm"),
(np.complex128, "expm")])
def test_matrix_ops_raises(dtype, method):
backend = jax_backend.JaxBackend()
matrix = backend.randn((4, 4, 4), dtype=dtype, seed=10)
with pytest.raises(ValueError, match=r".*Only matrices.*"):
getattr(backend, method)(matrix)
matrix = backend.randn((4, 3), dtype=dtype, seed=10)
with pytest.raises(ValueError, match=r".*N\*N matrix.*"):
getattr(backend, method)(matrix)
def test_jit():
backend = jax_backend.JaxBackend()
def fun(x, A, y):
return jax.numpy.dot(x, jax.numpy.dot(A, y))
fun_jit = backend.jit(fun)
x = jax.numpy.array(np.random.rand(4))
y = jax.numpy.array(np.random.rand(4))
A = jax.numpy.array(np.random.rand(4, 4))
res1 = fun(x, A, y)
res2 = fun_jit(x, A, y)
np.testing.assert_allclose(res1, res2)
def test_jit_args():
backend = jax_backend.JaxBackend()
def fun(x, A, y):
return jax.numpy.dot(x, jax.numpy.dot(A, y))
fun_jit = backend.jit(fun)
x = jax.numpy.array(np.random.rand(4))
y = jax.numpy.array(np.random.rand(4))
A = jax.numpy.array(np.random.rand(4, 4))
res1 = fun(x, A, y)
res2 = fun_jit(x, A, y)
res3 = fun_jit(x, y=y, A=A)
np.testing.assert_allclose(res1, res2)
np.testing.assert_allclose(res1, res3)
def compare_eigvals_and_eigvecs(U,
eta,
U_exact,
eta_exact,
rtol,
atol,
thresh=1E-8):
_, iy = np.nonzero(np.abs(eta[:, None] - eta_exact[None, :]) < thresh)
U_exact_perm = U_exact[:, iy]
U_exact_perm = U_exact_perm / np.expand_dims(np.sum(U_exact_perm, axis=0), 0)
U = U / np.expand_dims(np.sum(U, axis=0), 0)
np.testing.assert_allclose(U_exact_perm, U, atol=atol, rtol=rtol)
np.testing.assert_allclose(eta, eta_exact[iy], atol=atol, rtol=rtol)
##############################################################
# eigs and eigsh tests #
##############################################################
def generate_hermitian_matrix(be, dtype, D):
H = be.randn((D, D), dtype=dtype, seed=10)
H += H.T.conj()
return H
def generate_matrix(be, dtype, D):
return be.randn((D, D), dtype=dtype, seed=10)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize(
"solver, matrix_generator, exact_decomp, which",
[(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LM"),
(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LR"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"SA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LM")])
def test_eigs_eigsh_all_eigvals_with_init(dtype, solver, matrix_generator,
exact_decomp, which):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
init = backend.randn((D,), dtype=dtype, seed=10)
H = matrix_generator(backend, dtype, D)
def mv(x, H):
return jax.numpy.dot(H, x)
eta, U = solver(mv, [H], init, numeig=D, num_krylov_vecs=D, which=which)
eta_exact, U_exact = exact_decomp(H)
rtol = 1E-8
atol = 1E-8
compare_eigvals_and_eigvecs(
np.stack(U, axis=1), eta, U_exact, eta_exact, rtol, atol, thresh=1E-4)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize(
"solver, matrix_generator, exact_decomp, which",
[(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LM"),
(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LR"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"SA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LM")])
def test_eigs_eigsh_all_eigvals_no_init(dtype, solver, matrix_generator,
exact_decomp, which):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
H = matrix_generator(backend, dtype, D)
def mv(x, H):
return jax.numpy.dot(H, x)
eta, U = solver(
mv, [H],
shape=(D,),
dtype=dtype,
numeig=D,
num_krylov_vecs=D,
which=which)
eta_exact, U_exact = exact_decomp(H)
rtol = 1E-8
atol = 1E-8
compare_eigvals_and_eigvecs(
np.stack(U, axis=1), eta, U_exact, eta_exact, rtol, atol, thresh=1E-4)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize(
"solver, matrix_generator, exact_decomp, which",
[(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LM"),
(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LR"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"SA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LM")])
def test_eigs_eigsh_few_eigvals_with_init(dtype, solver, matrix_generator,
exact_decomp, which):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
init = backend.randn((D,), dtype=dtype, seed=10)
H = matrix_generator(backend, dtype, D)
def mv(x, H):
return jax.numpy.dot(H, x)
eta, U = solver(
mv, [H], init, numeig=4, num_krylov_vecs=16, maxiter=50, which=which)
eta_exact, U_exact = exact_decomp(H)
rtol = 1E-8
atol = 1E-8
compare_eigvals_and_eigvecs(
np.stack(U, axis=1), eta, U_exact, eta_exact, rtol, atol, thresh=1E-4)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize(
"solver, matrix_generator, exact_decomp, which",
[(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LM"),
(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LR"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"SA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LM")])
def test_eigs_eigsh_few_eigvals_no_init(dtype, solver, matrix_generator,
exact_decomp, which):
backend = jax_backend.JaxBackend()
D = 16
np.random.seed(10)
H = matrix_generator(backend, dtype, D)
def mv(x, H):
return jax.numpy.dot(H, x)
eta, U = solver(
mv, [H],
shape=(D,),
dtype=dtype,
numeig=4,
num_krylov_vecs=16,
which=which)
eta_exact, U_exact = exact_decomp(H)
rtol = 1E-8
atol = 1E-8
compare_eigvals_and_eigvecs(
np.stack(U, axis=1), eta, U_exact, eta_exact, rtol, atol, thresh=1E-4)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize(
"solver, matrix_generator, exact_decomp, which",
[(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LM"),
(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LR"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"SA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LM")])
def test_eigs_eigsh_large_ncv_with_init(dtype, solver, matrix_generator,
exact_decomp, which):
backend = jax_backend.JaxBackend()
D = 100
np.random.seed(10)
init = backend.randn((D,), dtype=dtype, seed=10)
H = matrix_generator(backend, dtype, D)
def mv(x, H):
return jax.numpy.dot(H, x)
eta, U = solver(
mv, [H], init, numeig=4, num_krylov_vecs=50, maxiter=50, which=which)
eta_exact, U_exact = exact_decomp(H)
rtol = 1E-8
atol = 1E-8
compare_eigvals_and_eigvecs(
np.stack(U, axis=1), eta, U_exact, eta_exact, rtol, atol, thresh=1E-4)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize(
"solver, matrix_generator, exact_decomp, which",
[(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LM"),
(jax_backend.JaxBackend().eigs, generate_matrix, np.linalg.eig, "LR"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"SA"),
(jax_backend.JaxBackend().eigsh, generate_hermitian_matrix, np.linalg.eigh,
"LM")])
def test_eigs_eigsh_large_matrix_with_init(dtype, solver, matrix_generator,
exact_decomp, which):
backend = jax_backend.JaxBackend()
D = 1000
np.random.seed(10)
init = backend.randn((D,), dtype=dtype, seed=10)
H = matrix_generator(backend, dtype, D)
def mv(x, H):
return jax.numpy.dot(H, x, precision=jax.lax.Precision.HIGHEST)
eta, U = solver(
mv, [H],
init,
numeig=4,
num_krylov_vecs=40,
maxiter=500,
which=which,
tol=1E-10)
eta_exact, U_exact = exact_decomp(H)
thresh = {
np.complex64: 1E-3,
np.float32: 1E-3,
np.float64: 1E-4,
np.complex128: 1E-4
}
rtol = 1E-8
atol = 1E-8
compare_eigvals_and_eigvecs(
np.stack(U, axis=1),
eta,
U_exact,
eta_exact,
rtol,
atol,
thresh=thresh[dtype])
def get_ham_params(dtype, N, which):
if which == 'uniform':
hop = -jnp.ones(N - 1, dtype=dtype)
pot = jnp.ones(N, dtype=dtype)
if dtype in (np.complex128, np.complex64):
hop -= 1j * jnp.ones(N - 1, dtype)
elif which == 'rand':
hop = (-1) * jnp.array(np.random.rand(N - 1).astype(dtype) - 0.5)
pot = jnp.array(np.random.rand(N).astype(dtype)) - 0.5
if dtype in (np.complex128, np.complex64):
hop -= 1j * jnp.array(np.random.rand(N - 1).astype(dtype) - 0.5)
return pot, hop
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("param_type", ['uniform', 'rand'])
@pytest.mark.parametrize("N", [14])
def test_eigsh_free_fermions(N, dtype, param_type):
"""
Find the lowest eigenvalues and eigenvectors
of a 1d free-fermion Hamiltonian on N sites.
The dimension of the hermitian matrix is
(2**N, 2**N).
"""
backend = jax_backend.JaxBackend(precision=jax.lax.Precision.HIGHEST)
np.random.seed(10)
pot, hop = get_ham_params(dtype, N, param_type)
P = jnp.diag(np.array([0, -1])).astype(dtype)
c = jnp.array([[0, 1], [0, 0]], dtype)
n = c.T @ c
eye = jnp.eye(2, dtype=dtype)
neye = jnp.kron(n, eye)
eyen = jnp.kron(eye, n)
ccT = jnp.kron(c @ P, c.T)
cTc = jnp.kron(c.T, c)
@jax.jit
def matvec(vec):
x = vec.reshape((4, 2**(N - 2)))
out = jnp.zeros(x.shape, x.dtype)
t1 = neye * pot[0] + eyen * pot[1] / 2
t2 = cTc * hop[0] - ccT * jnp.conj(hop[0])
out += jnp.einsum('ij,ki -> kj', x, t1 + t2)
x = x.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape((4, 2**(N - 2)))
out = out.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape(
(4, 2**(N - 2)))
for site in range(1, N - 2):
t1 = neye * pot[site] / 2 + eyen * pot[site + 1] / 2
t2 = cTc * hop[site] - ccT * jnp.conj(hop[site])
out += jnp.einsum('ij,ki -> kj', x, t1 + t2)
x = x.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape((4, 2**(N - 2)))
out = out.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape(
(4, 2**(N - 2)))
t1 = neye * pot[N - 2] / 2 + eyen * pot[N - 1]
t2 = cTc * hop[N - 2] - ccT * jnp.conj(hop[N - 2])
out += jnp.einsum('ij,ki -> kj', x, t1 + t2)
x = x.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape((4, 2**(N - 2)))
out = out.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape(
(4, 2**(N - 2)))
x = x.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape(2**N)
out = out.reshape((2, 2**(N - 1))).transpose((1, 0)).reshape(2**N)
return out.ravel()
H = np.diag(pot) + np.diag(hop.conj(), 1) + np.diag(hop, -1)
single_particle_energies = np.linalg.eigh(H)[0]
many_body_energies = []
for n in range(2**N):
many_body_energies.append(
np.sum(single_particle_energies[np.nonzero(
np.array(list(bin(n)[2:]), dtype=int)[::-1])[0]]))
many_body_energies = np.sort(many_body_energies)
init = jnp.array(np.random.randn(2**N)).astype(dtype)
init /= jnp.linalg.norm(init)
ncv = 20
numeig = 3
which = 'SA'
tol = 1E-10
maxiter = 30
atol = 1E-8
eta, _ = backend.eigsh(
A=matvec,
args=[],
initial_state=init,
num_krylov_vecs=ncv,
numeig=numeig,
which=which,
tol=tol,
maxiter=maxiter)
np.testing.assert_allclose(
eta, many_body_energies[:numeig], atol=atol, rtol=atol)
@pytest.mark.parametrize(
"solver, whichs",
[(jax_backend.JaxBackend().eigs, ["SM", "SR", "LI", "SI"]),
(jax_backend.JaxBackend().eigsh, ["SM", "BE"])])
def test_eigs_eigsh_raises(solver, whichs):
with pytest.raises(
ValueError, match='`num_krylov_vecs` >= `numeig` required!'):
solver(lambda x: x, numeig=10, num_krylov_vecs=9)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
solver(lambda x: x, shape=(10,), dtype=None)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
solver(lambda x: x, shape=None, dtype=np.float64)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
solver(lambda x: x)
with pytest.raises(
TypeError, match="Expected a `jax.array`. Got <class 'list'>"):
solver(lambda x: x, initial_state=[1, 2, 3])
for which in whichs:
with pytest.raises(
ValueError, match=f"which = {which}"
f" is currently not supported."):
solver(lambda x: x, which=which)
def test_eigs_dtype_raises():
solver = jax_backend.JaxBackend().eigs
with pytest.raises(TypeError, match="dtype"):
solver(lambda x: x, shape=(10,), dtype=np.int32,
num_krylov_vecs=10)
##################################################################
############# This test should just not crash ################
##################################################################
@pytest.mark.parametrize("dtype",
[np.float64, np.complex128, np.float32, np.complex64])
def test_eigs_bugfix(dtype):
backend = jax_backend.JaxBackend()
D = 200
mat = jax.numpy.array(np.random.rand(D, D).astype(dtype))
x = jax.numpy.array(np.random.rand(D).astype(dtype))
def matvec_jax(vector, matrix):
return matrix @ vector
backend.eigs(
matvec_jax, [mat],
numeig=1,
initial_state=x,
which='LR',
maxiter=10,
num_krylov_vecs=100,
tol=0.0001)
def test_sum():
np.random.seed(10)
backend = jax_backend.JaxBackend()
tensor = np.random.rand(2, 3, 4)
a = backend.convert_to_tensor(tensor)
actual = backend.sum(a, axis=(1, 2))
expected = np.sum(tensor, axis=(1, 2))
np.testing.assert_allclose(expected, actual)
actual = backend.sum(a, axis=(1, 2), keepdims=True)
expected = np.sum(a, axis=(1, 2), keepdims=True)
np.testing.assert_allclose(expected, actual)
def test_matmul():
np.random.seed(10)
backend = jax_backend.JaxBackend()
t1 = np.random.rand(10, 2, 3)
t2 = np.random.rand(10, 3, 4)
a = backend.convert_to_tensor(t1)
b = backend.convert_to_tensor(t2)
actual = backend.matmul(a, b)
expected = np.matmul(t1, t2)
np.testing.assert_allclose(expected, actual)
t3 = np.random.rand(10)
t4 = np.random.rand(11)
c = backend.convert_to_tensor(t3)
d = backend.convert_to_tensor(t4)
with pytest.raises(ValueError, match="inputs to"):
backend.matmul(c, d)
def test_gmres_raises():
backend = jax_backend.JaxBackend()
dummy_mv = lambda x: x
N = 10
b = jax.numpy.zeros((N,))
x0 = jax.numpy.zeros((N+1),)
diff = "If x0 is supplied, its shape"
with pytest.raises(ValueError, match=diff): # x0, b have different sizes
backend.gmres(dummy_mv, b, x0=x0)
x0 = jax.numpy.zeros((N,), dtype=jax.numpy.float32)
b = jax.numpy.zeros((N,), dtype=jax.numpy.float64)
diff = (f"If x0 is supplied, its dtype, {x0.dtype}, must match b's"
f", {b.dtype}.")
with pytest.raises(TypeError, match=diff): # x0, b have different dtypes
backend.gmres(dummy_mv, b, x0=x0)
x0 = jax.numpy.zeros((N,))
b = jax.numpy.zeros((N,)).reshape(2, N//2)
diff = "If x0 is supplied, its shape"
with pytest.raises(ValueError, match=diff): # x0, b have different shapes
backend.gmres(dummy_mv, b, x0=x0)
num_krylov_vectors = 0
diff = (f"num_krylov_vectors must be positive, not"
f"{num_krylov_vectors}.")
with pytest.raises(ValueError, match=diff): # num_krylov_vectors <= 0
backend.gmres(dummy_mv, b, num_krylov_vectors=num_krylov_vectors)
tol = -1.
diff = (f"tol = {tol} must be positive.")
with pytest.raises(ValueError, match=diff): # tol < 0
backend.gmres(dummy_mv, b, tol=tol)
atol = -1
diff = (f"atol = {atol} must be positive.")
with pytest.raises(ValueError, match=diff): # atol < 0
backend.gmres(dummy_mv, b, atol=atol)
M = lambda x: x
diff = "M is not supported by the Jax backend."
with pytest.raises(NotImplementedError, match=diff):
backend.gmres(dummy_mv, b, M=M)
A_kwargs = {"bee": "honey"}
diff = "A_kwargs is not supported by the Jax backend."
with pytest.raises(NotImplementedError, match=diff):
backend.gmres(dummy_mv, b, A_kwargs=A_kwargs)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_gmres_on_small_known_problem(dtype):
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
backend = jax_backend.JaxBackend()
A = jax.numpy.array(([[1, 1], [3, -4]]), dtype=dtype)
b = jax.numpy.array([3, 2], dtype=dtype)
x0 = jax.numpy.ones(2, dtype=dtype)
n_kry = 2
def A_mv(x):
return A @ x
tol = 100*jax.numpy.finfo(dtype).eps
x, _ = backend.gmres(A_mv, b, x0=x0, num_krylov_vectors=n_kry, tol=tol)
solution = jax.numpy.array([2., 1.], dtype=dtype)
eps = jax.numpy.linalg.norm(jax.numpy.abs(solution) - jax.numpy.abs(x))
assert eps < tol
@pytest.mark.parametrize("dtype", np_dtypes)
def test_gmres_with_args(dtype):
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
backend = jax_backend.JaxBackend()
A = jax.numpy.zeros((2, 2), dtype=dtype)
B = jax.numpy.array(([[0, 1], [3, 0]]), dtype=dtype)
C = jax.numpy.array(([[1, 0], [0, -4]]), dtype=dtype)
b = jax.numpy.array([3, 2], dtype=dtype)
x0 = jax.numpy.ones(2, dtype=dtype)
n_kry = 2
def A_mv(x, B, C):
return (A + B + C) @ x
tol = 100*jax.numpy.finfo(dtype).eps
x, _ = backend.gmres(A_mv, b, A_args=[B, C], x0=x0, num_krylov_vectors=n_kry,
tol=tol)
solution = jax.numpy.array([2., 1.], dtype=dtype)
eps = jax.numpy.linalg.norm(jax.numpy.abs(solution) - jax.numpy.abs(x))
assert eps < tol
@pytest.mark.parametrize("dtype", np_dtypes)
def test_gmres_on_larger_random_problem(dtype):
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
backend = jax_backend.JaxBackend()
matshape = (100, 100)
vecshape = (100,)
A = backend.randn(matshape, seed=10, dtype=dtype)
solution = backend.randn(vecshape, seed=10, dtype=dtype)
def A_mv(x):
return A @ x
b = A_mv(solution)
tol = b.size * jax.numpy.finfo(dtype).eps
x, _ = backend.gmres(A_mv, b, tol=tol, num_krylov_vectors=100)
err = jax.numpy.linalg.norm(jax.numpy.abs(x)-jax.numpy.abs(solution))
rtol = tol*jax.numpy.linalg.norm(b)
atol = tol
assert err < max(rtol, atol)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_gmres_not_matrix(dtype):
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
backend = jax_backend.JaxBackend()
matshape = (100, 100)
vecshape = (100,)
A = backend.randn(matshape, dtype=dtype, seed=10)
A = backend.reshape(A, (2, 50, 2, 50))
solution = backend.randn(vecshape, dtype=dtype, seed=10)
solution = backend.reshape(solution, (2, 50))
def A_mv(x):
return backend.einsum('ijkl,kl', A, x)
b = A_mv(solution)
tol = b.size * np.finfo(dtype).eps
x, _ = backend.gmres(A_mv, b, tol=tol, num_krylov_vectors=100)
err = jax.numpy.linalg.norm(jax.numpy.abs(x)-jax.numpy.abs(solution))
rtol = tol*jax.numpy.linalg.norm(b)
atol = tol
assert err < max(rtol, atol)
@pytest.mark.parametrize("dtype", np_dtypes)
@pytest.mark.parametrize("offset", range(-2, 2))
@pytest.mark.parametrize("axis1", range(0, 3))
@pytest.mark.parametrize("axis2", range(0, 3))
def test_diagonal(dtype, offset, axis1, axis2):
shape = (5, 5, 5, 5)
backend = jax_backend.JaxBackend()
array = backend.randn(shape, dtype=dtype, seed=10)
if axis1 == axis2:
with pytest.raises(ValueError):
actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
else:
actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
expected = jax.numpy.diagonal(array, offset=offset, axis1=axis1,
axis2=axis2)
np.testing.assert_allclose(actual, expected)
@pytest.mark.parametrize("dtype", np_dtypes)
@pytest.mark.parametrize("offset", range(-2, 2))
def test_diagflat(dtype, offset):
shape = (5, 5, 5, 5)
backend = jax_backend.JaxBackend()
array = backend.randn(shape, dtype=dtype, seed=10)
actual = backend.diagflat(array, k=offset)
expected = jax.numpy.diag(jax.numpy.ravel(array), k=offset)
np.testing.assert_allclose(actual, expected)
@pytest.mark.parametrize("dtype", np_dtypes)
@pytest.mark.parametrize("offset", range(-2, 2))
@pytest.mark.parametrize("axis1", range(0, 3))
@pytest.mark.parametrize("axis2", range(0, 3))
def test_trace(dtype, offset, axis1, axis2):
shape = (5, 5, 5, 5)
backend = jax_backend.JaxBackend()
array = backend.randn(shape, dtype=dtype, seed=10)
if axis1 == axis2:
with pytest.raises(ValueError):
actual = backend.trace(array, offset=offset, axis1=axis1, axis2=axis2)
else:
actual = backend.trace(array, offset=offset, axis1=axis1, axis2=axis2)
expected = jax.numpy.trace(array, offset=offset, axis1=axis1, axis2=axis2)
np.testing.assert_allclose(actual, expected)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_abs(dtype):
shape = (4, 3, 2)
backend = jax_backend.JaxBackend()
tensor = backend.randn(shape, dtype=dtype, seed=10)
actual = backend.abs(tensor)
expected = jax.numpy.abs(tensor)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_sign(dtype):
shape = (4, 3, 2)
backend = jax_backend.JaxBackend()
tensor = backend.randn(shape, dtype=dtype, seed=10)
actual = backend.sign(tensor)
expected = jax.numpy.sign(tensor)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("pivot_axis", [-1, 1, 2])
@pytest.mark.parametrize("dtype", np_dtypes)
def test_pivot(dtype, pivot_axis):
shape = (4, 3, 2, 8)
pivot_shape = (np.prod(shape[:pivot_axis]), np.prod(shape[pivot_axis:]))
backend = jax_backend.JaxBackend()
tensor = backend.randn(shape, dtype=dtype, seed=10)
expected = tensor.reshape(pivot_shape)
actual = backend.pivot(tensor, pivot_axis=pivot_axis)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype, atol", [(np.float32, 1E-6),
(np.float64, 1E-10),
(np.complex64, 1E-6),
(np.complex128, 1E-10)])
def test_inv(dtype, atol):
shape = (10, 10)
backend = jax_backend.JaxBackend()
matrix = backend.randn(shape, dtype=dtype, seed=10)
inv = backend.inv(matrix)
np.testing.assert_allclose(inv @ matrix, np.eye(10), atol=atol)
np.testing.assert_allclose(matrix @ inv, np.eye(10), atol=atol)
tensor = backend.randn((10, 10, 10), dtype=dtype, seed=10)
with pytest.raises(ValueError, match="input to"):
backend.inv(tensor)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_item(dtype):
backend = jax_backend.JaxBackend()
tensor = backend.randn((1,), dtype=dtype, seed=10)
assert backend.item(tensor) == tensor.item()
@pytest.mark.parametrize("dtype", np_dtypes)
def test_power(dtype):
shape = (4, 3, 2)
backend = jax_backend.JaxBackend()
base_tensor = backend.randn(shape, dtype=dtype, seed=10)
power_tensor = backend.randn(shape, dtype=dtype, seed=10)
actual = backend.power(base_tensor, power_tensor)
expected = jax.numpy.power(base_tensor, power_tensor)
np.testing.assert_allclose(expected, actual)
power = np.random.rand(1)[0]
actual = backend.power(base_tensor, power)
expected = jax.numpy.power(base_tensor, power)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", np_dtypes)
def test_eps(dtype):
backend = jax_backend.JaxBackend()
assert backend.eps(dtype) == np.finfo(dtype).eps
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Utilities to handle tensor tracer parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import re
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
TRACE_MODE_NAN_INF = 'nan-inf'
TRACE_MODE_PART_TENSOR = 'part-tensor'
TRACE_MODE_FULL_TENSOR = 'full-tensor'
TRACE_MODE_FULL_IF_NAN = 'trace-back-if-nan'
TRACE_MODE_NORM = 'norm'
TRACE_MODE_MAX_ABS = 'max-abs'
TRACE_MODE_SUMMARY = 'summary'
# summary mode to collects a finite set of signatures for each traced tensor,
# (such as norm, max, min, mean) and dumps it using tb summaries.
TRACE_MODE_FULL_TENSOR_SUMMARY = 'full_tensor_summary'
# Full tensor mode dumps the whole tensor values for the traced tensors without
# any processing on them; using tb summaries.
_FLAG_NAME_TRACE_STACK_SIZE = 'trace_stack_size'
_SUBMODE_BRIEF = 'brief'
_SUBMODE_DETAILED = 'detailed'
_FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
_FLAG_NAME_ENABLE = 'enable'
_FLAG_NAME_TRACE_MODE = 'trace_mode'
_FLAG_NAME_USE_COMPACT_TRACE = 'compact_trace'
_FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
_FLAG_NAME_TRACE_BEFORE_OPS = 'trace_before_included_ops'
_FLAG_NAME_TRACE_AFTER_OPS = 'trace_after_included_ops'
_FLAG_NAME_SUBMODE = 'submode'
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS = 'include_less_interesting_ops'
_FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
_FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
_FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
_FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
_FLAG_NAME_INCLUDED_CORES = 'included_cores'
_FLAG_NAME_TRACE_LEVEL = 'trace_level'
_FLAG_NAME_TRACE_DIR = 'trace_dir'
_FLAG_NAME_REPORT_FILE = 'report_file'
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
_FLAG_NAME_OP_RANGE = 'op_range'
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
# tensor tracer updates).
_FLAG_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
_FLAG_SUMMARY_SIGNATURES = 'signatures'
_FLAG_NAME_SUMMARY_PER_CORE = 'collect_summary_per_core'
_TT_DEFAULT_TRACE_LEVEL = 3
_TT_PREFIX = 'tensor_tracer'
_TT_NORM = 'norm'
_TT_MAX = 'max'
_TT_MIN = 'min'
_TT_MEAN = 'mean'
_TT_VAR = 'var'
_TT_SIZE = 'size'
TT_SUMMARY_NORM = '%s_%s' % (_TT_PREFIX, _TT_NORM)
TT_SUMMARY_MAX = '%s_%s' % (_TT_PREFIX, _TT_MAX)
TT_SUMMARY_MIN = '%s_%s' % (_TT_PREFIX, _TT_MIN)
TT_SUMMARY_MEAN = '%s_%s' % (_TT_PREFIX, _TT_MEAN)
TT_SUMMARY_VAR = '%s_%s' % (_TT_PREFIX, _TT_VAR)
TT_SUMMARY_SIZE = '%s_%s' % (_TT_PREFIX, _TT_SIZE)
TT_SUMMARY_SIGNATURES = (TT_SUMMARY_NORM, TT_SUMMARY_MAX, TT_SUMMARY_MIN,
TT_SUMMARY_MEAN, TT_SUMMARY_VAR, TT_SUMMARY_SIZE)
_TT_DEFAULT_TRACE_LEVEL = 3
class TTParameters(object):
"""A class that handles the parameters of Tensor Tracer."""
def __init__(self, env=None):
if env:
self._env = env
else:
self._env = os.environ
self._validate_flag_names()
self.trace_mode = self._get_trace_mode()
self.submode = self._get_submode()
self.trace_dir = self._get_trace_dir()
self.report_file_path = self._get_report_filepath()
self.op_range = self._get_op_range()
self.excluded_opname_re_list = self._flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPNAMES)
self.excluded_optype_re_list = self._flag_value_to_re_list(
_FLAG_NAME_EXCLUDED_OPTYPES)
self.included_opname_re_list = self._flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPNAMES)
self.included_optype_re_list = self._flag_value_to_re_list(
_FLAG_NAME_INCLUDED_OPTYPES)
self.is_conditional_trace = self._is_conditional_trace_mode()
self.trace_scalar_ops = self.is_flag_on(_FLAG_NAME_TRACE_SCALAR_OPS)
self.use_compact_trace = self.is_flag_on(_FLAG_NAME_USE_COMPACT_TRACE)
# _trace_ops_before_included and _trace_ops_after_included denotes to depth
# of tracing relative to the ops given in --included_opnames or
# --included_optypes
# For example, in the below graph
# op1 --> op2 --> op3 --> op4 --> op5
# If --included_opnames=op3 then only op3 will be traced.
# If also --trace_before_included_ops=2 (_trace_ops_before_included), then
# op1 and op2 will be traced as they are at most 2 hops apart from an
# included op. Similarly, if --trace_after_included_ops=2, then op4 and op5
# will also be traced.
self.trace_ops_before_included = self._get_flag_int_value(
_FLAG_NAME_TRACE_BEFORE_OPS, 0)
self.trace_ops_after_included = self._get_flag_int_value(
_FLAG_NAME_TRACE_AFTER_OPS, 0)
self.trace_stack_size = self._get_flag_int_value(
_FLAG_NAME_TRACE_STACK_SIZE, 1)
_, self.graph_dump_path = self.get_flag_value(
_FLAG_DUMP_BEFORE_AFTER_GRAPHS)
self.included_cores = self._flag_value_as_int_list(
_FLAG_NAME_INCLUDED_CORES)
self.include_less_interesting_ops = self.is_flag_on(
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS)
self.trace_level = self._get_flag_int_value(
_FLAG_NAME_TRACE_LEVEL, _TT_DEFAULT_TRACE_LEVEL)
self.summary_signatures = self._get_summary_signatures()
self.collect_summary_per_core = self.is_flag_on(_FLAG_NAME_SUMMARY_PER_CORE)
def _is_conditional_trace_mode(self):
return self.trace_mode == TRACE_MODE_FULL_IF_NAN
def _get_report_filepath(self):
"""Sets the path of the output report file."""
found, report_file_path = self.get_flag_value(
_FLAG_NAME_REPORT_FILE)
if found and report_file_path \
and self.use_test_undeclared_outputs_dir():
if os.path.isabs(report_file_path):
raise ValueError('If use_test_undeclared_outputs_dir is set,'
'report_file_path cannot be an absolute path (%s)'
%report_file_path)
outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
report_file_path = os.path.join(outputs_dir, report_file_path)
return report_file_path
def _get_op_range(self):
"""Sets the index range of the Ops that we will consider tracing."""
found, op_range = self.get_flag_value(_FLAG_NAME_OP_RANGE)
if not found or not op_range:
op_range = (-1, -1) # this means including all ops.
return op_range
match = _OP_RANGE_PAT.match(op_range)
if not match:
op_range = (-1, -1) # this means including all ops.
return op_range
op_range = (int(match.group(1)), int(match.group(2)))
return op_range
def _get_trace_dir(self):
found, trace_dir = self.get_flag_value(_FLAG_NAME_TRACE_DIR)
if found and trace_dir \
and self.use_test_undeclared_outputs_dir():
raise ValueError('Cannot not use --%s and --%s at the same time'
%(_FLAG_NAME_TRACE_DIR,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR))
if self.use_test_undeclared_outputs_dir():
trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
return trace_dir
def _get_trace_mode(self):
"""Checks if the given trace mode is valid."""
found, trace_mode = self.get_flag_value(_FLAG_NAME_TRACE_MODE)
if not found or not trace_mode:
trace_mode = TRACE_MODE_NORM
valid_trace_modes = [
TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR,
TRACE_MODE_NORM, TRACE_MODE_MAX_ABS, TRACE_MODE_FULL_IF_NAN,
TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY
]
if trace_mode not in valid_trace_modes:
raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.'
'Valid trace modes are: %s'%(trace_mode,
valid_trace_modes))
return trace_mode
def is_brief_mode(self):
return self.submode == _SUBMODE_BRIEF
def _get_submode(self):
"""Checks if the given submode is valid."""
found, submode = self.get_flag_value(_FLAG_NAME_SUBMODE)
if not found or not submode:
submode = _SUBMODE_DETAILED
if not submode:
return
valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]
if submode not in valid_submodes:
raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.'
'Valid submodes are: %s'%(submode,
valid_submodes))
return submode
@staticmethod
def match_next_flag(flags, pos):
"""Returns the match for the next TensorTracer flag.
Args:
flags: a string that contains the flags.
pos: where in flags to start the search.
Returns:
A pair where the first element is the regular-expression
match found and the second element indicates if the match
has a value.
"""
match = _FLAG_DOUBLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_SINGLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_EQUAL_PAT.match(flags, pos)
if match:
# The flag is found but is not given a value.
return match, False
# The flag is not found.
return None, False
def _validate_flag_names(self):
"""Validates if the TensorTrace flags passed are valid."""
valid_flag_names = [
_FLAG_NAME_ENABLE, _FLAG_NAME_TRACE_MODE, _FLAG_NAME_USE_COMPACT_TRACE,
_FLAG_NAME_TRACE_SCALAR_OPS, _FLAG_NAME_TRACE_BEFORE_OPS,
_FLAG_NAME_TRACE_AFTER_OPS, _FLAG_NAME_TRACE_STACK_SIZE,
_FLAG_NAME_SUBMODE, _FLAG_NAME_EXCLUDED_OPNAMES,
_FLAG_NAME_EXCLUDED_OPTYPES, _FLAG_NAME_INCLUDED_OPNAMES,
_FLAG_NAME_INCLUDED_OPTYPES, _FLAG_NAME_TRACE_DIR,
_FLAG_NAME_INCLUDED_CORES, _FLAG_NAME_REPORT_FILE,
_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR,
_FLAG_NAME_INCLUDE_LESS_INTERESTING_OPS, _FLAG_NAME_OP_RANGE,
_FLAG_DUMP_BEFORE_AFTER_GRAPHS, _FLAG_NAME_TRACE_LEVEL,
_FLAG_SUMMARY_SIGNATURES, _FLAG_NAME_SUMMARY_PER_CORE
]
tensor_tracer_flags = self._env.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return
pos = 0
while True:
match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos)
if not match:
break
flag_name = match.group(1)
if flag_name not in valid_flag_names:
raise ValueError(
'The flag name "%s" passed via the environment variable "%s" '
'is invalid. Valid flag names are:'
'\n%s'%(flag_name, _FLAGS_ENV_VAR, valid_flag_names))
pos = match.end()
def _get_summary_signatures(self):
"""Verifies and returns the summary signatures.
Returns:
A dictionary of the signature identifiers {signature: index} that will be
computed when trace_mode is summary.
"""
signatures = self._flag_value_as_list(_FLAG_SUMMARY_SIGNATURES)
tt_signatures = []
for signature in signatures:
signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature)
if signature in TT_SUMMARY_SIGNATURES:
tt_signatures.append(signature)
elif signature_with_prefix in TT_SUMMARY_SIGNATURES:
tt_signatures.append(signature_with_prefix)
else:
logging.warning('Unknown signature:%s. Supported signatures: %s' % (
signature, TT_SUMMARY_SIGNATURES))
if not tt_signatures:
# Default case collects norm and max only.
return {TT_SUMMARY_MAX: 0, TT_SUMMARY_NORM: 1}
else:
return {signature: idx for idx, signature in enumerate(tt_signatures)}
def get_signature_to_agg_fn_map(self):
"""Returns a map that contains the aggragate function for each signature."""
return {TT_SUMMARY_NORM: linalg_ops.norm,
TT_SUMMARY_MAX: math_ops.reduce_max,
TT_SUMMARY_MIN: math_ops.reduce_min,
TT_SUMMARY_MEAN: math_ops.reduce_mean,
TT_SUMMARY_VAR: math_ops.reduce_max, # Simply reduce max variance.
TT_SUMMARY_SIZE: math_ops.reduce_sum}
def _flag_value_as_list(self, wanted_flag_name):
"""Returns the string list of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
The list value of the flag.
"""
string_value_list = []
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
string_value_list = flag_value.split(',')
return string_value_list
def _flag_value_as_int_list(self, wanted_flag_name):
"""Returns the integer list of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
int_list = []
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
integer_values = flag_value.split(',')
int_list = [int(int_val) for int_val in integer_values]
except ValueError:
logging.warning('Cannot convert %s to int for flag %s', int_list,
wanted_flag_name)
return int_list
def _get_flag_int_value(self, wanted_flag_name, default_value):
"""Returns the int value of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
default_value: the default value for the flag, if not provided.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
flag_int_value = default_value
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
flag_int_value = int(flag_value)
except ValueError:
logging.warning('Cannot convert %s to int for flag %s' % (
flag_int_value, wanted_flag_name))
return flag_int_value
def get_flag_value(self, wanted_flag_name):
"""Returns the value of a TensorTracer flags.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
A pair where the first element indicates if the flag is
found and the second element is the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
tensor_tracer_flags = self._env.get(_FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return False, None
pos = 0
while True:
match, has_value = TTParameters.match_next_flag(
tensor_tracer_flags, pos)
if not match:
return False, None
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
if flag_name == wanted_flag_name:
return True, flag_value
pos = match.end()
raise RuntimeError('Should not reach here.')
def _flag_value_to_re_list(self, flag_name):
"""Converts list of strings to compiled RE."""
re_list = []
found, flag_value = self.get_flag_value(flag_name)
if not found or not flag_value:
return re_list
list_of_values = flag_value.split()
for v in list_of_values:
r = re.compile(v)
re_list.append(r)
return re_list
def is_flag_on(self, flag_name):
"""Returns True if the given flag is on."""
found, flag_value = self.get_flag_value(flag_name)
if not found:
return False
if flag_value is None:
return True
# Depends on the flag value.
flag_value = flag_value.lower()
enabled = flag_value in ['1', 't', 'true', 'y', 'yes']
return enabled
def is_enabled(self):
"""Returns True if TensorTracer is enabled."""
if self.is_flag_on(_FLAG_NAME_ENABLE):
logging.info('Tensor Tracer is enabled with flags %s.' %
self._env.get(_FLAGS_ENV_VAR))
return True
else:
return False
def use_test_undeclared_outputs_dir(self):
"""Decides the output directory of the report and trace files.
Args:
None.
Returns:
True if the output files should be written to the
test-undeclared-outputs-directory defined via an
env variable.
"""
return self.is_flag_on(_FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
|
|
# -*- coding: utf-8; -*-
# Details about the API are given at
# http://ccg.doc.gold.ac.uk/research/flowr/flowrweb/ (Click through
# "Editor" to switch to Admin mode, and then click "API".)
# See API.org in this directory for a summary of commands.
# Here is the simple sample Unix command to show the basic idea of how
# it works:
# curl --data "api_token=3pkrxjrduMASh4ZAIUbpWOmLLDkYNz9p&api_email=MYEMAIL&c=test_access" http://ccg.doc.gold.ac.uk/research/flowr/flowrweb/
# The example python call corresponding to the above is:
# python -c 'import flowr_web; print ( flowr_web.test_access() )'
# Note: before committing this code, run the hide-token.sh sed script
# or a variant to remove the tokens, before running it, run the
# show-token.sh script, or nt.sh NEWTOKEN
from hammock import Hammock as FloWrWeb
import json
import urllib
import sys
import base64
import filecmp
import re
from pprint import pprint
flowr = FloWrWeb('http://ccg.doc.gold.ac.uk/research/flowr/flowrweb/')
user = 'holtzermann17@gmail.com' # to be replaced before/after committing!
token = 'YwlF6aiZNiStiZlMVvQHSqJUbiPhFsuR' #
standard_headers = {'Accept-Charset': "UTF-8",
'Content-Type': "application/x-www-form-urlencoded;charset=UTF-8"}
## Relevant functions
def test_access():
"""Used to test token and email access."""
data={"api_token": token,
"api_email": user,
"c": "test_access"}
resp = flowr.POST(headers = standard_headers, data=data)
# notice, there is no JSON component this time
return resp._content
def list_all_nodes():
"""Get a list of available node types."""
data={"api_token": token,
"api_email": user,
"c": "list_all_nodes"}
resp = flowr.POST(headers = standard_headers, data=data)
# when it makes sense, the content is interpreted as JSON
return json.loads(resp._content)
# Initially returns an empty list
def list_user_charts():
"""Meta-data for all owned charts."""
data={"api_token": token,
"api_email": user,
"c": "list_user_charts"}
resp = flowr.POST(headers = standard_headers, data=data)
return json.loads(resp._content)
# For some reason the numbering starts at 2.
# After charts have been deleted, new charts are renumbered from the beginning.
def new_chart():
"""Creates a new chart."""
data={"api_token": token,
"api_email": user,
"c": "new_chart"}
resp = flowr.POST(headers = standard_headers, data=data)
# new chart id
return resp._content
def delete_chart(cid):
"""Deletes an unlocked user chart."""
data={"api_token": token,
"api_email": user,
"c": "delete_chart",
"cid": cid}
resp = flowr.POST(headers = standard_headers, data=data)
# returns "ok" if successful and "error" if unsuccessful
return resp._content
def add_node(cid,node_type):
"""Creates a new chart."""
data={"api_token": token,
"api_email": user,
"c": "add_node",
"cid": cid,
"type": node_type,}
resp = flowr.POST(headers = standard_headers, data=data)
# returns the type of the node with a name-and-number suffix,
# e.g. text.retrievers.Dictionary.Dictionary_0 would be the first
# Dictionary added. This *string* is the node's "nid" or "nodeID"
return resp._content
def delete_node(cid,nid):
"""Deletes a node from a chart."""
data={"api_token": token,
"api_email": user,
"c": "delete_node",
"cid": cid,
"nid": nid,}
resp = flowr.POST(headers = standard_headers, data=data)
# returns updated chart in json format
# an *empty* chart with everything deleted looks like this:
# {u'arrows': [], u'boxes': []}
return json.loads(resp._content)
def get_chart(cid):
"""Get current chart structure."""
data={"api_token": token,
"api_email": user,
"c": "get_chart",
"cid": cid}
resp = flowr.POST(headers = standard_headers, data=data)
# json, key features of which are nodeID
return json.loads(resp._content)
# JAC: I'm not sure what this does, presumably the chart needs to be run first.
def clear_output(cid):
"""Docstring TBA."""
data={"api_token": token,
"api_email": user,
"c": "clear_output",
"cid": cid}
resp = flowr.POST(headers = standard_headers, data=data)
# no json this time, just returns 'OK'
return resp._content
def run_chart(cid):
"""Run the chart."""
data={"api_token": token,
"api_email": user,
"c": "run_chart",
"cid": cid}
resp = flowr.POST(headers = standard_headers, data=data)
# returns url to check chart run status
return resp._content
def run_status(cid):
"""Run the chart."""
data={"api_token": token,
"api_email": user,
"c": "run_status",
"cid": cid}
resp = flowr.POST(headers = standard_headers, data=data)
# current status in json, e.g. {"status":"idle"}'
return json.loads(resp._content)
def get_parameters(cid,nid):
"""Get details of all the node's parameters"""
data={"api_token": token,
"api_email": user,
"c": "get_parameters",
"cid": cid,
"nid": nid}
resp = flowr.POST(headers = standard_headers, data=data)
# parameters as json
return json.loads(resp._content)
# Hm, what function to we use to learn which parameters there are to be set?
# I just wrote some corresponding functions in the Clojure integration layer...
def set_parameter(cid,nid,pname,pvalue):
"""Set the value of a node parameter."""
data={"api_token": token,
"api_email": user,
"c": "set_parameter",
"cid": cid,
"nid": nid,
"pname": pname,
"pvalue": pvalue,}
resp = flowr.POST(headers = standard_headers, data=data)
# returns "saved" or "error"
return resp._content
def new_variable(cid, nid):
"""Creates a new output variable for the node."""
data={"api_token": token,
"api_email": user,
"c": "new_variable",
"cid": cid,
"nid": nid}
resp = flowr.POST(headers = standard_headers, data=data)
# returns something like "#newVar0", or "error" if the named
# node does not exist
return resp._content
def rename_variable(cid,nid,vname,nname):
"""Renames an existing variable."""
data={"api_token": token,
"api_email": user,
"c": "rename_variable",
"cid": cid,
"nid": nid,
"vname": vname,
"nname": nname,}
resp = flowr.POST(headers = standard_headers, data=data)
# returns a list of the variables associated with the node
# e.g.
# [{u'simpletype': u'TwitterOutput', u'type': u'ccg.flow.processnodes.text.retrievers.Twitter.TwitterOutput', u'name': u'#fooBar', u'defn': u''}]
#
# It's not totally clear how one would get this without renaming,
# although notice that it's possible to rename a variable to the same name.
return json.loads(resp._content)
def delete_variable(cid,nid,vname):
"""Deletes an existing variable."""
data={"api_token": token,
"api_email": user,
"c": "delete_variable",
"cid": cid,
"nid": nid,
"vname": vname}
resp = flowr.POST(headers = standard_headers, data=data)
# returns a list of the variables associated with the node
return json.loads(resp._content)
def get_variables(cid, nid):
"""Gets a list of the node's output variables."""
data={"api_token": token,
"api_email": user,
"c": "get_variables",
"cid": cid,
"nid": nid}
resp = flowr.POST(headers = standard_headers, data=data)
# returns a list of the output variables associated with the node
return json.loads(resp._content)
# JAC - it will return something like {text: answers[*], type: ArrayList<String>}.
# then, if you apply a definition like *answers[f1]* in a subsequent mapping,
# you will produce something of String type...
#
# f1 - is the first 1 element
# r10 - is some random 10 elements
# m3 - would be the middle 3 elements
#
# And anything where the number > 1 will be of list type.
def get_output_tree (cid, nid):
"""Get details of all the possible outputs."""
data={"api_token": token,
"api_email": user,
"c": "get_output_tree",
"cid": cid,
"nid": nid}
resp = flowr.POST(headers = standard_headers, data=data)
# output information as json
return json.loads(resp._content)
# JAC - this is how variables get connected together with arrows
def set_variable_definition(cid,nid,vname,vdef):
"""Changes a variable definition."""
data={"api_token": token,
"api_email": user,
"c": "set_variable_definition",
"cid": cid,
"nid": nid,
"vname": vname,
"vdef": vdef}
resp = flowr.POST(headers = standard_headers, data=data)
# returns a list of the variables associated with the node
return json.loads(resp._content)
def get_node_output(cid,nid):
"""Get the latest node output."""
data={"api_token": token,
"api_email": user,
"c": "get_node_output",
"cid": cid,
"nid": nid}
resp = flowr.POST(headers = standard_headers, data=data)
# returns JSON formatted output
return resp._content
# JAC - Unlike to the other variable-related functions, this doesn't take an nid
# I wonder why, is that intentional?
def get_variable_output(cid,vname):
"""Get the latest node output."""
data={"api_token": token,
"api_email": user,
"c": "get_variable_output",
"cid": cid,
"vname": vname}
resp = flowr.POST(headers = standard_headers, data=data)
# returns JSON formatted output
return json.loads(resp._content)
# flowr_web.py ends here
|
|
"""
image.py
This module provides a simple interface to create a window, load an image and experiment
with image based algorithms. Many of which require pixel-by-pixel manipulation. This
is a educational module, its not intended to replace the excellent Python Image Library, in fact
it uses PIL.
The module and its interface and some of the code were inspired/copied by/from John Zelle's graphics.py
which serves a similar purpose in the graphics primitive world.
"""
# Release Notes:
# Version 1.0 Fall 2005
#
# Brad Miller, Luther College
#
# Version 1.1 December 7, 2005
# Changes:
# Modify class name for base image to be AbstractImage This way we don't have a lower case
# class name running around. We still don't expect people to create an AbstractImage but
# rather create an image through FileImage, ListImage, or EmptyImage.
# Add ability to convert an image to a list
# Add save function to write an image back to disk.
#
# Version 1.2 November 2007
# Changes:
# Modify the setPosition function to position the image by the top left corner rather than
# the center.
# Add the exitOnClick method to ImageWin. Use this as the last call in the program to
# avoid early exits when running from the command line, and nasty hangs when running
# from within IDLE
#
# Version 1.3 May 2008
# Changes:
# Modify all code to be Python 3.0 ready. -- still runs under 2.x
# Modify all code so that if PIL is not available then image.py will still
# function using Tkimages. N.B. Tk restricts image types to gif or ppm
#
try:
import tkinter
except:
import Tkinter as tkinter
pilAvailable = True
try:
from PIL import Image as PIL_Image
from PIL import ImageTk
except:
pilAvailable = False
#import exceptions
# Borrow some ideas from Zelle
# create an invisible global main root for all windows
tk = tkinter
_imroot = tk.Tk()
_imroot.withdraw()
def formatPixel(data):
if type(data) == tuple:
return '{#%02x%02x%02x}'%data
elif isinstance(data,Pixel):
return '{#%02x%02x%02x}'%data.getColorTuple()
class ImageWin(tk.Canvas):
"""
ImageWin: Make a frame to display one or more images.
"""
def __init__(self,title="image window",width=640,height=640):
"""
Create a window with a title, width and height.
"""
master = tk.Toplevel(_imroot)
master.protocol("WM_DELETE_WINDOW", self._close)
#super(ImageWin, self).__init__(master, width=width, height=height)
tk.Canvas.__init__(self, master, width=width, height=height)
self.master.title(title)
self.pack()
master.resizable(0,0)
self.foreground = "black"
self.items = []
self.mouseX = None
self.mouseY = None
self.bind("<Button-1>", self._onClick)
self.height = height
self.width = width
self._mouseCallback = None
self.trans = None
_imroot.update()
def _close(self):
"""Close the window"""
self.master.destroy()
self.quit()
_imroot.update()
def getMouse(self):
"""Wait for mouse click and return a tuple with x,y position in screen coordinates after
the click"""
self.mouseX = None
self.mouseY = None
while self.mouseX is None or self.mouseY is None:
self.update()
return ((self.mouseX,self.mouseY))
def setMouseHandler(self, func):
self._mouseCallback = func
def _onClick(self, e):
self.mouseX = e.x
self.mouseY = e.y
if self._mouseCallback:
self._mouseCallback(e.x, e.y)
def exitOnClick(self):
"""When the Mouse is clicked close the window and exit"""
self.getMouse()
self._close()
def exitonclick(self):
self.exitOnClick()
class Pixel(object):
"""This simple class abstracts the RGB pixel values."""
def __init__(self, red, green, blue):
super(Pixel, self).__init__()
self.__red = red
self.__green = green
self.__blue = blue
self.max = 255
def getRed(self):
"""Return the red component of the pixel"""
return self.__red
def getGreen(self):
"""Return the green component of the pixel"""
return self.__green
def getBlue(self):
"""Return the blue component of the pixel"""
return self.__blue
def getColorTuple(self):
"""Return all color information as a tuple"""
return (self.__red, self.__green, self.__blue)
def setRed(self,red):
"""Modify the red component"""
if self.max >= red >= 0:
self.__red = red
else:
raise ValueError("Error: pixel value %d is out of range" % red)
def setGreen(self,green):
"""Modify the green component"""
if self.max >= green >= 0:
self.__green = green
else:
raise ValueError("Error: pixel value %d is out of range" % green)
def setBlue(self,blue):
"""Modify the blue component"""
if self.max >= blue >= 0:
self.__blue = blue
else:
raise ValueError("Error: pixel value %d is out of range" % blue)
def __getitem__(self,key):
"""Allow new style pixel class to act like a color tuple:
0 --> red
1 --> green
2 --> blue
"""
if key == 0:
return self.__red
elif key == 1:
return self.__green
elif key == 2:
return self.__blue
else:
raise ValueError("Error %d Index out of range" % key)
def setRange(self,pmax):
"""docstring for setRange"""
if pmax == 1.0:
self.max = 1.0
elif pmax == 255:
self.max = 255
else:
raise ValueError("Error range must be 1.0 or 256")
def __str__(self):
return str(self.getColorTuple())
def __repr__(self):
"""docstring for __repr__"""
return str(self.getColorTuple())
red = property(getRed, setRed, None, "I'm the red property.")
green = property(getGreen, setGreen, None, "I'm the green property.")
blue = property(getBlue, setBlue, None, "I'm the blue property.")
class AbstractImage(object):
"""
Create an image. The image may be created in one of four ways:
1. From an image file such as gif, jpg, png, ppm for example: i = image('fname.jpb)
2. From a list of lists
3. From another image object
4. By specifying the height and width to create a blank image.
"""
imageCache = {} # tk photoimages go here to avoid GC while drawn
imageId = 1
def __init__(self,fname=None,data=[],imobj=None,height=0,width=0):
"""
An image can be created using any of the following keyword parameters. When image creation is
complete the image will be an rgb image.
fname: A filename containing an image. Can be jpg, gif, and others
data: a list of lists representing the image. This might be something you construct by
reading an asii format ppm file, or an ascii art file and translate into rgb yourself.
imobj: Make a copy of another image.
height:
width: Create a blank image of a particular height and width.
"""
super(AbstractImage, self).__init__()
# if PIL is available then use the PIL functions otherwise fall back to Tk
if pilAvailable:
self.loadImage = self.loadPILImage
self.createBlankImage = self.createBlankPILImage
self.setPixel = self.setPILPixel
self.getPixel = self.getPILPixel
self.save = self.savePIL
else:
self.loadImage = self.loadTkImage
self.createBlankImage = self.createBlankTkImage
self.setPixel = self.setTkPixel
self.getPixel = self.getTkPixel
self.save = self.saveTk
if fname:
self.loadImage(fname)
self.imFileName = fname
elif data:
height = len(data)
width = len(data[0])
self.createBlankImage(height,width)
for row in range(height):
for col in range(width):
self.setPixel(col,row,Pixel(data[row][col]))
elif height > 0 and width > 0:
self.createBlankImage(height,width)
elif imobj:
self.im = imobj.copy()
if pilAvailable:
self.width,self.height = self.im.size
else:
self.width = self.im.width()
self.height = self.im.height()
self.centerX = self.width/2+3 # +3 accounts for the ~3 pixel border in Tk windows
self.centerY = self.height/2+3
self.id = None
def loadPILImage(self,fname):
self.im = PIL_Image.open(fname)
ni = self.im.convert("RGB")
self.im = ni
def loadTkImage(self,fname):
sufstart = fname.rfind('.')
if sufstart < 0:
suffix = ""
else:
suffix = fname[sufstart:]
if suffix not in ['.gif', '.ppm']:
raise ValueError("Bad Image Type: %s : Without PIL, only .gif or .ppm files are allowed" % suffix)
self.im = tkinter.PhotoImage(file=fname)
def createBlankPILImage(self,height,width):
self.im = PIL_Image.new("RGB",(width,height))
ni = self.im.convert("RGB")
self.im = ni
def createBlankTkImage(self,height,width):
self.im = tkinter.PhotoImage(height=height,width=width)
def copy(self):
"""Return a copy of this image"""
newI = AbstractImage(imobj=self.im)
return newI
def clone(self):
"""Return a copy of this image"""
newI = AbstractImage(imobj=self.im)
return newI
def getHeight(self):
"""Return the height of the image"""
return self.height
def getWidth(self):
"""Return the width of the iamge"""
return self.width
def getTkPixel(self,x,y):
"""Get a pixel at the given x,y coordinate. The pixel is returned as an rgb color tuple
for eaxamplle foo.getPixel(10,10) --> (10,200,156) """
p = [int(j) for j in self.im.get(x,y).split()]
return Pixel(p[0],p[1],p[2])
def setTkPixel(self,x,y,pixel):
"""Set the color of a pixel at position x,y. The color must be specified as an rgb tuple (r,g,b) where
the rgb values are between 0 and 255."""
if x < self.getWidth() and y < self.getHeight():
self.im.put(formatPixel(pixel.getColorTuple()),(x,y))
else:
raise ValueError("Pixel index out of range.")
def getPILPixel(self,x,y):
"""docstring for getPILPIxel"""
p = self.im.getpixel((x,y))
return Pixel(p[0],p[1],p[2])
def setPILPixel(self,x,y,pixel):
"""docstring for setPILPixel"""
if x < self.getWidth() and y < self.getHeight():
self.im.putpixel((x,y),pixel.getColorTuple())
else:
raise ValueError("Pixel index out of range")
def setPosition(self,x,y):
"""Set the position in the window where the top left corner of the window should be."""
self.top = y
self.left = x
self.centerX = x + (self.width/2)+3
self.centerY = y + (self.height/2)+3
def getImage(self):
if pilAvailable:
return ImageTk.PhotoImage(self.im)
else:
return self.im
def draw(self,win):
"""Draw this image in the ImageWin window."""
ig = self.getImage()
self.imageCache[self.imageId] = ig # save a reference else Tk loses it...
AbstractImage.imageId = AbstractImage.imageId + 1
self.canvas=win
self.id = self.canvas.create_image(self.centerX,self.centerY,image=ig)
_imroot.update()
def saveTk(self,fname=None,ftype='gif'):
if fname is None:
fname = self.imFileName
sufstart = fname.rfind('.')
if sufstart < 0:
suffix = ""
else:
suffix = fname[sufstart:]
if suffix == "":
suffix = "."+ftype
fname = fname+suffix
if suffix not in ['.gif', '.ppm']:
raise ValueError("Without PIL, only .gif or .ppm files are allowed")
try:
self.im.write(fname,format=ftype)
except:
print("Error saving, Could Not open ", fname, " to write.")
def savePIL(self,fname=None,ftype='jpg'):
if fname is None:
fname = self.imFileName
sufstart = fname.rfind('.')
if sufstart < 0:
suffix = ""
else:
suffix = fname[sufstart:]
if suffix == "":
suffix = "."+ftype
fname = fname+suffix
try:
self.im.save(fname)
except:
print("Error saving, Could Not open ", fname, " to write.")
def toList(self):
"""
Convert the image to a List of Lists representation
"""
res = []
for i in range(self.height):
res.append([])
for j in range(self.width):
res[i].append(self.getPixel(j,i))
return res
class FileImage(AbstractImage):
def __init__(self,thefile):
super(FileImage, self).__init__(fname = thefile)
class Image(FileImage):
pass
class EmptyImage(AbstractImage):
def __init__(self,cols,rows):
super(EmptyImage, self).__init__(height = rows, width = cols)
class ListImage(AbstractImage):
def __init__(self,thelist):
super(ListImage, self).__init__(data=thelist)
# Example program Read in an image and calulate the negative.
if __name__ == '__main__':
win = ImageWin("My Window",480,640)
oImage = FileImage('example.png')
print(oImage.getWidth(), oImage.getHeight())
oImage.draw(win)
myImage = oImage.copy()
for row in range(myImage.getHeight()):
for col in range(myImage.getWidth()):
v = myImage.getPixel(col,row)
v.red = 255 - v.red
v.green = 255 - v.green
v.blue = 255 - v.blue
# x = map(lambda x: 255-x, v)
myImage.setPixel(col,row,v)
myImage.setPosition(myImage.getWidth()+1,0)
myImage.draw(win)
print(win.getMouse())
myImage.save('testfoo.png')
#~ print(myImage.toList())
win.exitOnClick()
|
|
from __future__ import division
from datetime import datetime
import config
import logging
import os
import pickle
import re
import simplejson as json
logger = logging.getLogger("ghc.repos")
class Repo:
def __init__(self, id, user, name, date, fork=None):
self.id = int(id)
self.user = user
self.name = name
self.created = datetime.strptime(date, '%Y-%m-%d').date()
self.fork = int(fork) if fork != None else None
# References to Repo objects that this is a direct
# or indirect fork of.
self.ancestors = []
# References to Repo objects that directly or indirectly
# fork this project
self.descendants = []
# An array of tuples that contains (language, raw lines, %lines)
self.languages =[]
def __eq__(self, other):
return self.id == other.id
def __str__(self):
return "({0.id}, '{0.user}', '{0.name}', '{1}', {0.fork})".format(self, self.created.strftime('%Y-%m-%d'))
def __repr__(self):
return "Repo({0.id}, '{0.user}', '{0.name}', '{1}', {0.fork})".format(self, self.created.strftime('%Y-%m-%d'))
def is_forked(self):
return self.fork != None
def __lt__(self, other):
return self.id < other.id
def __gt__(self, other):
return self.id > other.id
def __hash__(self):
return self.id
def to_json(self):
# Use the underscore on certain attributes
# to force more desirable ordering.
return json.dumps({
'_id': self.id,
'__user': self.user,
'__name': self.name,
'__created': self.created.strftime('%Y-%m-%d'),
'_fork': self.fork,
'ancestors': [ancestor.id for ancestor in self.ancestors],
'descendants': [descendant.id for descendant in self.descendants],
'languages' : self.languages
}, sort_keys=True, indent=2)
def get_repos():
"""
Pull the data out of repos.txt into Repo objects.
Hashes them by repo id
"""
repos = _load_repos()
if repos:
return repos
logger.debug("Building repos")
repos = {}
for line in open(os.path.join(config.SRC_DATA_PATH, 'repos.txt')):
repo = _read_repo(line)
repos[repo.id] = repo
_set_lineage(repos)
_set_languages(repos)
_store_repos(repos, debug=True)
return repos
def _get_ancestry_cmp(repo_map):
"""
Sort by created at. Time resolution is
not in milliseconds and there are frequent
ties. Use repo id as a tiebreaker.
This method must ensure that ancestors
show up before their dependents. There are
cases where this is tricky, such as a repo
forked from another repo that was itself
forked, and all this happened on the same
day. In that case, use the parent repos
as the parameters and recurse until a
definitive tie-breaking condition is found.
"""
def repo_sort(x,y):
ret = cmp(x.created, y.created)
if ret != 0: return ret
if not x.is_forked() and y.is_forked(): return -1
if x.is_forked() and not y.is_forked(): return 1
if x.is_forked() and y.is_forked(): return repo_sort(repo_map[x.fork], repo_map[y.fork])
return cmp(x.id, y.id)
return repo_sort
def _read_repo(line):
"""
Given a line in repos.txt, create a basic Repo object.
"""
tmp = line.rstrip().split(',')
id, meta = tmp[0].split(':')
user, name = meta.split('/')
fork = tmp[2] if len(tmp) == 3 else None
return Repo(id, user, name, tmp[1], fork)
def _set_lineage(repos):
"""
Build the lineage data between repos
"""
# The timestamps do not have a high enough resolution
# for true sorting. This sorting algorithm will put
# the repos in order by date, with the guarantee
# that a given repo always comes after the repo
# it was forked from.
#
# Note that the data from github contained a few
# anomalies where the forked repo had a timestamp
# that was later than it's child. The assert
# statement later in this method catches these
# cases. They were fixed by hand.
sorted_repos = sorted(repos.values(), cmp=_get_ancestry_cmp(repos))
for repo in sorted_repos:
if repo.is_forked():
forked = repos[repo.fork]
repo.ancestors.append(forked)
forked.descendants.append(repo)
# Use a local function to recurse through each
# ancestor
def update_ancestry_chain(r, f):
assert len(f.ancestors) > 0, "Forked project {0} from {1} has no ancestors".format(f, r)
# Add the current project as a descendant of it's ancestors
# Add each project as an ancestor of the current project
for ancestor in f.ancestors:
if not ancestor in r.ancestors:
r.ancestors.append(ancestor)
if not r in ancestor.descendants:
ancestor.descendants.append(r)
# Recurse
if ancestor.is_forked():
update_ancestry_chain(r, ancestor)
if forked.is_forked():
update_ancestry_chain(repo, forked)
def _set_languages(repos):
"""
Given a map of repo.id -> repo, add the language data in
the lang.txt file.
"""
for line in open(os.path.join(config.SRC_DATA_PATH, 'lang.txt')):
repo_id,languages = line.split(':')
total_lines = sum([int(count) for count in re.findall(r';(\d+),?', languages)])
if total_lines == 0: continue
if int(repo_id) not in repos:
logger.debug("Could not find repo {0} while setting lang".format(repo_id))
continue
repo = repos[int(repo_id)]
for pair in languages.split(','):
lang,lines = pair.split(';')
repo.languages.append((lang, int(lines), int(lines)/total_lines))
def _store_repos(repo_map, debug=False, overwrite=False):
"""
Stores the repos hash to a pickled file.
If debug is true, als generates a readable
JSON text file.
"""
path = os.path.join(config.CALC_DATA_PATH, 'repos.pickle')
if os.path.exists(path) and not overwrite:
return
logger.debug("Dumping pickle file {0}".format(path))
pickle.dump(repo_map, open(path, "w"))
logger.debug("Dumped pickle file {0}".format(path))
if debug:
json_file = open(os.path.join(config.CALC_DATA_PATH, 'repos.json'), "w")
logger.debug("Dumping json file {0}".format(json_file.name))
for repo in sorted(repo_map.values(), key=lambda x: x.id):
json_file.write("{0}\n".format(repo.to_json()))
logger.debug("Dumped json file {0}".format(json_file.name))
json_file.close()
def _load_repos():
"""
Loads a dict of repos from a pickled file.
"""
path = os.path.join(config.CALC_DATA_PATH, 'repos.pickle')
if not os.path.exists(path):
return None
logger.debug("Loading pickle file {0}".format(path))
return pickle.load(open(path))
|
|
# coding=utf-8
# Copyright 2020 The Real-World RL Suite Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for real-world environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import operator
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import realworldrl_suite.environments as rwrl
from realworldrl_suite.environments import realworld_env
NUM_DUMMY = 5
class EnvTest(parameterized.TestCase):
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testLoadEnv(self, domain_name, task_name):
"""Ensure it is possible to load the environment."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
self.assertIsNotNone(env)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyConstraintsPresent(self, domain_name, task_name):
"""Ensure observations contain 'constraints' when safety is specified."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True})
env.reset()
step = env.step(0)
self.assertIn('constraints', step.observation.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyCoeff(self, domain_name, task_name):
"""Ensure observations contain 'constraints' when safety is specified."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True, 'safety_coeff': 0.1})
env.reset()
step = env.step(0)
self.assertIn('constraints', step.observation.keys())
for c in [2, -1]:
with self.assertRaises(ValueError):
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True, 'safety_coeff': c})
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testSafetyObservationsDisabled(self, domain_name, task_name):
"""Ensure safety observations can be disabled."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={
'enable': True,
'observations': False
})
env.reset()
step = env.step(0)
self.assertNotIn('constraints', step.observation.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayActionsNoDelay(self, domain_name, task_name):
"""Ensure there is no action delay if not specified."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
action_spec = env.action_spec()
# Send zero action and make sure it is immediately executed.
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
# Send one action and make sure it is immediately executed.
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
env.step(copy.copy(one_action))
np.testing.assert_array_equal(env.physics.control(), one_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayActionsDelay(self, domain_name, task_name):
"""Ensure there is action delay as specified."""
actions_delay = np.random.randint(low=1, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
delay_spec={
'enable': True,
'actions': actions_delay
})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
# Perfrom first action that fills up the buffer.
env.step(copy.copy(zero_action))
# Send one action and make sure zero action is still executed.
for _ in range(actions_delay):
env.step(copy.copy(one_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
# Make sure we finally perform the delayed one action.
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), one_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayObservationsNoDelay(self, domain_name, task_name):
"""Ensure there is no observation delay if not specified."""
env = rwrl.load(domain_name=domain_name, task_name=task_name)
env.reset()
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
obs1 = env._task.get_observation(env._physics)
env.step(copy.copy(one_action))
obs2 = env._task.get_observation(env._physics)
# Make sure subsequent observations are different.
array_equality = []
for key in obs1:
array_equality.append((obs1[key] == obs2[key]).all())
self.assertIn(False, array_equality)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testDelayObservationsDelay(self, domain_name, task_name):
"""Ensure there is observation delay as specified."""
observations_delay = np.random.randint(low=1, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
delay_spec={
'enable': True,
'observations': observations_delay
})
obs1 = env.reset()[3]
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
# Make sure subsequent observations are the same (clearing the buffer).
for _ in range(observations_delay):
obs2 = env.step(copy.copy(one_action))[3]
for key in obs1:
np.testing.assert_array_equal(obs1[key], obs2[key])
# Make sure we finally observe a different observation.
obs2 = env.step(copy.copy(one_action))[3]
array_equality = []
for key in obs1:
array_equality.append((obs1[key] == obs2[key]).all())
self.assertIn(False, array_equality)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianActions(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the action."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'actions': noise
}})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
# Perform zero action.
env.step(copy.copy(zero_action))
# Verify that a non-zero action was actually performed.
np.testing.assert_array_compare(operator.__ne__, env.physics.control(),
zero_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testAddedDummyObservations(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
dimensionality_spec={
'enable': True,
'num_random_state_observations': 5,
})
env.reset()
# Get observation from realworld task.
obs = env._task.get_observation(env._physics)
for i in range(5):
self.assertIn('dummy-{}'.format(i), obs.keys())
for i in range(6, 10):
self.assertNotIn('dummy-{}'.format(i), obs.keys())
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testAddedDummyObservationsFlattened(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
base_env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True},
environment_kwargs=dict(flat_observation=True))
base_env.reset()
mod_env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
dimensionality_spec={
'enable': True,
'num_random_state_observations': NUM_DUMMY,
},
safety_spec={'enable': True},
environment_kwargs=dict(flat_observation=True))
mod_env.reset()
# Get observation from realworld task.
base_obs = base_env.step(0)
mod_obs = mod_env.step(0)
self.assertEqual(mod_obs.observation.shape[0],
base_obs.observation.shape[0] + NUM_DUMMY)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianObservationsFlattening(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'observations': noise
}},
environment_kwargs={'flat_observation': True})
env.reset()
env.step(0)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseGaussianObservations(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
noise = 0.5
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={'gaussian': {
'enable': True,
'observations': noise
}})
env.reset()
# Get observation from realworld cartpole.
obs1 = env._task.get_observation(env._physics)
# Get observation from underlying cartpole.
obs2 = collections.OrderedDict()
if domain_name == 'cartpole':
obs2['position'] = env.physics.bounded_position()
obs2['velocity'] = env.physics.velocity()
elif domain_name == 'humanoid':
obs2['joint_angles'] = env.physics.joint_angles()
obs2['head_height'] = env.physics.head_height()
obs2['extremities'] = env.physics.extremities()
obs2['torso_vertical'] = env.physics.torso_vertical_orientation()
obs2['com_velocity'] = env.physics.center_of_mass_velocity()
obs2['velocity'] = env.physics.velocity()
elif domain_name == 'manipulator':
arm_joints = [
'arm_root', 'arm_shoulder', 'arm_elbow', 'arm_wrist', 'finger',
'fingertip', 'thumb', 'thumbtip'
]
obs2['arm_pos'] = env.physics.bounded_joint_pos(arm_joints)
obs2['arm_vel'] = env.physics.joint_vel(arm_joints)
obs2['touch'] = env.physics.touch()
obs2['hand_pos'] = env.physics.body_2d_pose('hand')
obs2['object_pos'] = env.physics.body_2d_pose(env._task._object)
obs2['object_vel'] = env.physics.joint_vel(env._task._object_joints)
obs2['target_pos'] = env.physics.body_2d_pose(env._task._target)
elif domain_name == 'quadruped':
obs2['egocentric_state'] = env.physics.egocentric_state()
obs2['torso_velocity'] = env.physics.torso_velocity()
obs2['torso_upright'] = env.physics.torso_upright()
obs2['imu'] = env.physics.imu()
obs2['force_torque'] = env.physics.force_torque()
elif domain_name == 'walker':
obs2['orientations'] = env.physics.orientations()
obs2['height'] = env.physics.torso_height()
obs2['velocity'] = env.physics.velocity()
else:
raise ValueError('Unknown environment name: %s' % domain_name)
# Verify that the observations are different (noise added).
for key in obs1:
np.testing.assert_array_compare(operator.__ne__, obs1[key], obs2[key])
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseDroppedObservationsFlattening(self, domain_name, task_name):
"""Ensure there is an additive Gaussian noise to the observation."""
prob = 1.
steps = np.random.randint(low=3, high=10)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'dropped': {
'enable': True,
'observations_prob': prob,
'observations_steps': steps,
}
},
environment_kwargs={'flat_observation': True}
)
env.reset()
env.step(np.array(0)) # Scalar actions aren't tolerated with noise.
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseDroppedObservationsValues(self, domain_name, task_name):
"""Ensure observations drop values."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'dropped': {
'enable': True,
'observations_prob': prob,
'observations_steps': steps,
}
})
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
for step in range(steps):
# Verify that values are dropping for the first `steps` steps.
if step == 1:
# Cancel the dropped values after the first sequence.
env._task._noise_dropped_obs_steps = 0.
obs = env.step(copy.copy(one_action))[3]
for key in obs:
if isinstance(obs[key], np.ndarray):
np.testing.assert_array_equal(obs[key], np.zeros(obs[key].shape))
else:
np.testing.assert_array_equal(obs[key], 0.)
obs = env.step(copy.copy(one_action))[3]
# Ensure observation is not filled with zeros.
for key in obs:
obs[key] += np.random.normal()
# Pass observation through the base class that in charge of dropping values.
obs = realworld_env.Base.get_observation(env._task, env._physics, obs)
for key in obs:
# Verify that values have stopped dropping.
if isinstance(obs[key], np.ndarray):
np.testing.assert_array_compare(operator.__ne__, obs[key],
np.zeros(obs[key].shape))
else:
np.testing.assert_array_compare(operator.__ne__, obs[key], 0.)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseDroppedActionsValues(self, domain_name, task_name):
"""Ensure observations drop values."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'dropped': {
'enable': True,
'actions_prob': prob,
'actions_steps': steps,
}
})
env.reset()
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
for step in range(steps):
# Verify that values are dropping for the first `steps` steps.
if step == 1:
# Cancel the dropped values after the first sequence.
env._task._noise_dropped_action_steps = 0.
_ = env.step(copy.copy(one_action))
action = env.physics.control()
if isinstance(action, np.ndarray):
np.testing.assert_array_equal(action, np.zeros(action.shape))
else:
np.testing.assert_array_equal(action, 0.)
# Ensure values are no longer dropping.
_ = env.step(copy.copy(one_action))
action = env.physics.control()
if isinstance(action, np.ndarray):
np.testing.assert_array_compare(operator.__ne__, action,
np.zeros(action.shape))
else:
np.testing.assert_array_compare(operator.__ne__, action, 0.)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseStuckObservationsValues(self, domain_name, task_name):
"""Ensure observations have stuck values."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'stuck': {
'enable': True,
'observations_prob': prob,
'observations_steps': steps,
}
})
action_spec = env.action_spec()
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
prev_obs = None
for step in range(steps):
# Verify that values are stuck for the first `steps` steps.
if step == 1:
# Cancel the stuck values after the first sequence.
env._task._noise_stuck_obs_steps = 0.
obs = env.step(copy.copy(one_action))[3]
if not prev_obs:
prev_obs = copy.deepcopy(obs)
for key in obs:
np.testing.assert_array_equal(obs[key], prev_obs[key])
prev_obs = copy.deepcopy(obs)
# Perturb observation.
for key in obs:
obs[key] += np.random.normal()
# Pass observation through the base class that in charge of stuck values.
obs = realworld_env.Base.get_observation(env._task, env._physics, obs)
for key in obs:
# Verify that values have stopped getting stuck.
np.testing.assert_array_compare(operator.__ne__, obs[key], prev_obs[key])
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseStuckActionsValues(self, domain_name, task_name):
"""Ensure observations have stuck values."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'stuck': {
'enable': True,
'actions_prob': prob,
'actions_steps': steps,
}
})
env.reset()
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
# Get the action stuck for the next `steps` steps.
env.step(copy.copy(one_action))
# Cancel the stuck values after the first sequence.
env._task._noise_stuck_action_steps = 0.
for _ in range(steps):
# Verify that values are stuck for the first `steps` steps.
np.testing.assert_array_equal(env.physics.control(), one_action)
# Apply a different action.
env.step(copy.copy(zero_action))
# Verify that zero_action executed after action becoming un-stuck.
np.testing.assert_array_equal(env.physics.control(), zero_action)
for step in range(17):
# Alternate actions and make sure they don't get stuck.
action = zero_action if step % 2 == 0 else one_action
env.step(copy.copy(action))
np.testing.assert_array_equal(env.physics.control(), action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testNoiseRepetitionActions(self, domain_name, task_name):
"""Ensure actions are being repeated."""
steps = np.random.randint(low=3, high=10)
prob = 1.
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
noise_spec={
'repetition': {
'enable': True,
'actions_prob': prob,
'actions_steps': steps,
}
})
action_spec = env.action_spec()
zero_action = np.zeros(shape=action_spec.shape, dtype=action_spec.dtype)
one_action = np.ones(shape=action_spec.shape, dtype=action_spec.dtype)
if hasattr(action_spec, 'minimum'):
one_action = np.minimum(action_spec.maximum, one_action)
env.reset()
env.step(copy.copy(zero_action))
# Verify that all the actions are zero_action for 'steps' time steps.
for _ in range(steps):
np.testing.assert_array_equal(env.physics.control(), zero_action)
env.step(copy.copy(one_action))
# Verify that all the actions are one_action for 'steps' time steps.
for _ in range(steps):
np.testing.assert_array_equal(env.physics.control(), one_action)
env.step(copy.copy(zero_action))
np.testing.assert_array_equal(env.physics.control(), zero_action)
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testPerturbRandomWalk(self, domain_name, task_name):
"""Ensure parameter is perturbed on each reset."""
period = 3
perturb_scheduler = 'drift_pos'
if domain_name == 'cartpole':
perturb_param = 'pole_length'
elif domain_name == 'walker':
perturb_param = 'thigh_length'
elif domain_name == 'humanoid':
perturb_param = 'contact_friction'
elif domain_name == 'quadruped':
perturb_param = 'shin_length'
elif domain_name == 'manipulator':
perturb_param = 'lower_arm_length'
else:
raise ValueError('Unknown environment name: %s' % domain_name)
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
perturb_spec={
'enable': True,
'period': period,
'param': perturb_param,
'scheduler': perturb_scheduler
})
def get_param_val():
if domain_name == 'cartpole':
return env._physics.named.model.geom_size['pole_1', 1]
elif domain_name == 'walker':
return env._physics.named.model.geom_size['right_thigh', 1]
elif domain_name == 'humanoid':
return env._physics.named.model.geom_friction['right_right_foot', 0]
elif domain_name == 'quadruped':
return env._physics.named.model.geom_size['shin_front_left', 1]
elif domain_name == 'manipulator':
return env._physics.named.model.geom_size['lower_arm', 1]
else:
pass
# Verify that first reset changes value.
val_old = get_param_val()
env.reset()
val_new = get_param_val()
self.assertNotEqual(val_old, val_new)
# Verify that the parameter changes only each `period` number of times.
val_old = val_new
for unused_count1 in range(1):
for unused_count2 in range(period - 1):
env.reset()
val_new = get_param_val()
self.assertEqual(val_old, val_new)
env.reset()
val_new = get_param_val()
self.assertNotEqual(val_old, val_new)
val_old = val_new
@parameterized.named_parameters(*rwrl.ALL_TASKS)
def testCombinedChallenges(self, domain_name, task_name):
"""Ensure the combined challenges are properly defined."""
all_combined_challenges = ['easy', 'medium', 'hard']
# Verify that a non-specified combined challenge breaks the code.
with self.assertRaises(ValueError):
_ = rwrl.load(
domain_name=domain_name,
task_name=task_name,
combined_challenge='random_name')
# Verify specs can't be specified if combined challenge is specified.
for combined_challenge in all_combined_challenges:
with self.assertRaises(ValueError):
_ = rwrl.load(
domain_name=domain_name,
task_name=task_name,
safety_spec={'enable': True},
delay_spec={'enable': True},
noise_spec={'enable': True},
perturb_spec={'enable': True},
dimensionality_spec={'enable': True},
multiobj_spec={'enable': True},
combined_challenge=combined_challenge)
# Verify the combined challenges are correctly set.
for combined_challenge in all_combined_challenges:
env = rwrl.load(
domain_name=domain_name,
task_name=task_name,
combined_challenge=combined_challenge)
if combined_challenge == 'easy':
# Delay.
self.assertTrue(env._task._delay_enabled)
self.assertEqual(env._task._buffer_observations_len, 3+1)
self.assertEqual(env._task._buffer_actions_len, 3+1)
self.assertEqual(env._task._buffer_rewards_len, 10+1)
# Noise.
self.assertTrue(env._task._noise_guassian_enabled)
self.assertEqual(env._task._noise_gaussian_observations, 0.1)
self.assertEqual(env._task._noise_gaussian_actions, 0.1)
self.assertTrue(env._task._noise_dropped_enabled)
self.assertEqual(env._task._noise_dropped_obs_prob, 0.01)
self.assertEqual(env._task._noise_dropped_obs_steps, 1)
self.assertTrue(env._task._noise_stuck_enabled)
self.assertEqual(env._task._noise_stuck_obs_prob, 0.01)
self.assertEqual(env._task._noise_stuck_obs_steps, 1)
self.assertTrue(env._task._noise_repetition_enabled)
self.assertEqual(env._task._noise_repetition_actions_prob, 1.0)
self.assertEqual(env._task._noise_repetition_actions_steps, 1)
# Perturbation.
self.assertTrue(env._task._perturb_enabled)
self.assertEqual(env._task._perturb_period, 1)
self.assertEqual(env._task._perturb_scheduler, 'uniform')
if domain_name == 'cartpole':
self.assertEqual(env._task._perturb_param, 'pole_length')
self.assertEqual(env._task._perturb_min, 0.9)
self.assertEqual(env._task._perturb_max, 1.1)
self.assertEqual(env._task._perturb_std, 0.02)
elif domain_name == 'quadruped':
self.assertEqual(env._task._perturb_param, 'shin_length')
self.assertEqual(env._task._perturb_min, 0.25)
self.assertEqual(env._task._perturb_max, 0.3)
self.assertEqual(env._task._perturb_std, 0.005)
elif domain_name == 'walker':
self.assertEqual(env._task._perturb_param, 'thigh_length')
self.assertEqual(env._task._perturb_min, 0.225)
self.assertEqual(env._task._perturb_max, 0.25)
self.assertEqual(env._task._perturb_std, 0.002)
elif domain_name == 'humanoid':
self.assertEqual(env._task._perturb_param, 'contact_friction')
self.assertEqual(env._task._perturb_min, 0.6)
self.assertEqual(env._task._perturb_max, 0.8)
self.assertEqual(env._task._perturb_std, 0.02)
# Dimensionality.
self.assertTrue(env._task._dimensionality_enabled)
self.assertEqual(env._task._num_random_state_observations, 10)
# Safety.
self.assertFalse(env._task._safety_enabled)
# Multi-objective.
self.assertFalse(env._task._multiobj_enabled)
elif combined_challenge == 'medium':
# Delay.
self.assertTrue(env._task._delay_enabled)
self.assertEqual(env._task._buffer_observations_len, 6+1)
self.assertEqual(env._task._buffer_actions_len, 6+1)
self.assertEqual(env._task._buffer_rewards_len, 20+1)
# Noise.
self.assertTrue(env._task._noise_guassian_enabled)
self.assertEqual(env._task._noise_gaussian_observations, 0.3)
self.assertEqual(env._task._noise_gaussian_actions, 0.3)
self.assertTrue(env._task._noise_dropped_enabled)
self.assertEqual(env._task._noise_dropped_obs_prob, 0.05)
self.assertEqual(env._task._noise_dropped_obs_steps, 5)
self.assertTrue(env._task._noise_stuck_enabled)
self.assertEqual(env._task._noise_stuck_obs_prob, 0.05)
self.assertEqual(env._task._noise_stuck_obs_steps, 5)
self.assertTrue(env._task._noise_repetition_enabled)
self.assertEqual(env._task._noise_repetition_actions_prob, 1.0)
self.assertEqual(env._task._noise_repetition_actions_steps, 2)
# Perturbation.
self.assertTrue(env._task._perturb_enabled)
self.assertEqual(env._task._perturb_period, 1)
self.assertEqual(env._task._perturb_scheduler, 'uniform')
if domain_name == 'cartpole':
self.assertEqual(env._task._perturb_param, 'pole_length')
self.assertEqual(env._task._perturb_min, 0.7)
self.assertEqual(env._task._perturb_max, 1.7)
self.assertEqual(env._task._perturb_std, 0.1)
elif domain_name == 'quadruped':
self.assertEqual(env._task._perturb_param, 'shin_length')
self.assertEqual(env._task._perturb_min, 0.25)
self.assertEqual(env._task._perturb_max, 0.8)
self.assertEqual(env._task._perturb_std, 0.05)
elif domain_name == 'walker':
self.assertEqual(env._task._perturb_param, 'thigh_length')
self.assertEqual(env._task._perturb_min, 0.225)
self.assertEqual(env._task._perturb_max, 0.4)
self.assertEqual(env._task._perturb_std, 0.015)
elif domain_name == 'humanoid':
self.assertEqual(env._task._perturb_param, 'contact_friction')
self.assertEqual(env._task._perturb_min, 0.5)
self.assertEqual(env._task._perturb_max, 0.9)
self.assertEqual(env._task._perturb_std, 0.04)
# Dimensionality.
self.assertTrue(env._task._dimensionality_enabled)
self.assertEqual(env._task._num_random_state_observations, 20)
# Safety.
self.assertFalse(env._task._safety_enabled)
# Multi-objective.
self.assertFalse(env._task._multiobj_enabled)
elif combined_challenge == 'hard':
# Delay.
self.assertTrue(env._task._delay_enabled)
self.assertEqual(env._task._buffer_observations_len, 9+1)
self.assertEqual(env._task._buffer_actions_len, 9+1)
self.assertEqual(env._task._buffer_rewards_len, 40+1)
# Noise.
self.assertTrue(env._task._noise_guassian_enabled)
self.assertEqual(env._task._noise_gaussian_observations, 1.0)
self.assertEqual(env._task._noise_gaussian_actions, 1.0)
self.assertTrue(env._task._noise_dropped_enabled)
self.assertEqual(env._task._noise_dropped_obs_prob, 0.1)
self.assertEqual(env._task._noise_dropped_obs_steps, 10)
self.assertTrue(env._task._noise_stuck_enabled)
self.assertEqual(env._task._noise_stuck_obs_prob, 0.1)
self.assertEqual(env._task._noise_stuck_obs_steps, 10)
self.assertTrue(env._task._noise_repetition_enabled)
self.assertEqual(env._task._noise_repetition_actions_prob, 1.0)
self.assertEqual(env._task._noise_repetition_actions_steps, 3)
# Perturbation.
self.assertTrue(env._task._perturb_enabled)
self.assertEqual(env._task._perturb_period, 1)
self.assertEqual(env._task._perturb_scheduler, 'uniform')
if domain_name == 'cartpole':
self.assertEqual(env._task._perturb_param, 'pole_length')
self.assertEqual(env._task._perturb_min, 0.5)
self.assertEqual(env._task._perturb_max, 2.3)
self.assertEqual(env._task._perturb_std, 0.15)
elif domain_name == 'quadruped':
self.assertEqual(env._task._perturb_param, 'shin_length')
self.assertEqual(env._task._perturb_min, 0.25)
self.assertEqual(env._task._perturb_max, 1.4)
self.assertEqual(env._task._perturb_std, 0.1)
elif domain_name == 'walker':
self.assertEqual(env._task._perturb_param, 'thigh_length')
self.assertEqual(env._task._perturb_min, 0.225)
self.assertEqual(env._task._perturb_max, 0.55)
self.assertEqual(env._task._perturb_std, 0.04)
elif domain_name == 'humanoid':
self.assertEqual(env._task._perturb_param, 'contact_friction')
self.assertEqual(env._task._perturb_min, 0.4)
self.assertEqual(env._task._perturb_max, 1.0)
self.assertEqual(env._task._perturb_std, 0.06)
# Dimensionality.
self.assertTrue(env._task._dimensionality_enabled)
self.assertEqual(env._task._num_random_state_observations, 50)
# Safety.
self.assertFalse(env._task._safety_enabled)
# Multi-objective.
self.assertFalse(env._task._multiobj_enabled)
if __name__ == '__main__':
absltest.main()
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
from itertools import izip, islice
from random import shuffle
from contextlib import contextmanager
from collections import Counter
from operator import itemgetter
import argparse
import subprocess
import tempfile
import os
import logging
import sys
import shlex
import shutil
help_msg = """\
Apply any number of those pre-processing steps to given corpus:
Tokenization, lowercasing, shuffling, filtering of lines according to length,
splitting into train/dev/test, punctuation and digit normalization.
"""
temporary_files = []
@contextmanager
def open_files(names, mode='r'):
files = []
try:
for name_ in names:
files.append(open(name_, mode=mode))
yield files
finally:
for file_ in files:
file_.close()
@contextmanager
def open_temp_files(num=1, mode='w', delete=False):
files = []
try:
for _ in range(num):
files.append(tempfile.NamedTemporaryFile(mode=mode, delete=delete))
if not delete:
temporary_files.append(files[-1].name)
yield files
finally:
for file_ in files:
file_.close()
def process_file(corpus, id_, args):
filename = '{}.{}'.format(corpus, args.extensions[id_])
logging.info('processing ' + filename)
lang = args.lang[id_]
with open_temp_files(num=1) as output_, open(filename) as input_:
output_ = output_[0]
def path_to(script_name):
if args.scripts is None:
return script_name
else:
return os.path.join(args.scripts, script_name)
processes = [['cat']] # just copy file if there is no other operation
if args.normalize_punk:
processes.append([path_to('normalize-punctuation.perl'), '-l',
lang])
# replace html entities FIXME (doesn't seem to work)
# processes.append(shlex.split("perl -MHTML::Entities -pe 'decode_entities($_);'"))
if args.tokenize:
processes.append([path_to('tokenizer.perl'), '-l', lang,
'-threads', str(args.threads)])
if args.lowercase:
processes.append([path_to('lowercase.perl')])
if args.normalize_digits:
processes.append(['sed', 's/[[:digit:]]/0/g'])
ps = None
for i, process in enumerate(processes):
stdout = output_ if i == len(processes) - 1 else subprocess.PIPE
stdin = input_ if i == 0 else ps.stdout
ps = subprocess.Popen(process, stdin=stdin, stdout=stdout,
stderr=open('/dev/null', 'w'))
ps.wait()
return output_.name
def process_corpus(corpus, args):
input_filenames = [process_file(corpus, i, args)
for i in range(len(args.extensions))]
with open_files(input_filenames) as input_files,\
open_temp_files(len(input_filenames)) as output_files:
# (lazy) sequence of sentence tuples
all_lines = (lines for lines in izip(*input_files) if
all(args.min <= len(line.split()) <= args.max
for line in lines))
if args.shuffle:
all_lines = list(all_lines) # not lazy anymore
shuffle(all_lines)
for lines in all_lines: # keeps it lazy if no shuffle
for line, output_file in zip(lines, output_files):
output_file.write(line)
return [f.name for f in output_files]
def split_corpus(filenames, sizes, args):
with open_files(filenames) as input_files:
output_filenames = []
for size in sizes:
if size == 0:
output_filenames.append(None)
continue
with open_temp_files(num=len(args.extensions)) as output_files:
for input_file, output_file in zip(input_files, output_files):
# If size is None, this will read the whole file.
# That's why we put train last.
output_file.writelines(islice(input_file, size))
output_filenames.append([f.name for f in output_files])
return output_filenames
def get_vocab(filename, args):
with open(filename) as file_:
counts = Counter(word for line in file_ for word in line.split())
words = [(w, c) for w, c in counts.iteritems() if c >= args.min_count]
max_vocab_size = args.max_vocab_size
if 0 < max_vocab_size < len(words):
words = sorted(words, key=itemgetter(1), reverse=True)[:max_vocab_size]
return set(w for w, _ in words)
def move_and_filter(filenames, output_corpus, args, vocabs=None):
output_filenames = ['{}.{}'.format(output_corpus, ext)
for ext in args.extensions]
if not vocabs:
for filename, output_filename in zip(filenames, output_filenames):
shutil.move(filename, output_filename)
return
for filename, output_filename, vocab in zip(filenames, output_filenames,
vocabs):
with open(filename) as input_file,\
open(output_filename, 'w') as output_file:
for line in input_file:
line = ' '.join(w if w in vocab else args.unk_symbol
for w in line.split())
output_file.write(line + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=help_msg,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('corpus', help='training corpus')
parser.add_argument('output_corpus', help='destination corpus')
parser.add_argument('extensions', nargs='+', help='list of extensions')
parser.add_argument('--dev-corpus', help='development corpus')
parser.add_argument('--test-corpus', help='test corpus')
parser.add_argument('--scripts', help='path to script directory '
'(None if in $PATH)', default='scripts')
parser.add_argument('--dev-size', type=int,
help='size of development corpus', default=0)
parser.add_argument('--test-size', type=int,
help='size of test corpus', default=0)
parser.add_argument('--train-size', type=int,
help='size of training corpus (default: maximum)')
parser.add_argument('--lang', nargs='+', help='optional list of language '
'codes (when different than file extensions)')
parser.add_argument('--normalize-punk', help='normalize punctuation',
action='store_true')
parser.add_argument('--normalize-digits', help='normalize digits '
'(replace all digits with 0)', action='store_true')
parser.add_argument('--lowercase', help='put everything to lowercase',
action='store_true')
parser.add_argument('--shuffle', help='shuffle the corpus',
action='store_true')
parser.add_argument('--tokenize', dest='tokenize',
help='tokenize the corpus', action='store_true')
parser.add_argument('-v', '--verbose', help='verbose mode',
action='store_true')
parser.add_argument('--min', type=int, default=1,
help='min number of tokens per line')
parser.add_argument('--max', type=int, default=0,
help='max number of tokens per line (0 for no limit)')
parser.add_argument('--threads', type=int, default=16,
help='number of threads for tokenizer')
parser.add_argument('--min-count', type=int, default=0)
parser.add_argument('--max-vocab-size', type=int, default=0)
parser.add_argument('--unk-symbol', default='<UNK>')
args = parser.parse_args()
args.max = args.max if args.max > 0 else float('inf')
if args.lang is None:
args.lang = args.extensions
elif len(args.lang) != len(args.extensions):
sys.exit('wrong number of values for parameter --lang')
if args.verbose:
logging.basicConfig(format='%(message)s', level=logging.INFO)
output_dir = os.path.dirname(args.output_corpus)
if output_dir and not os.path.exists(output_dir):
logging.info('creating directory')
os.makedirs(output_dir)
try:
input_corpora = (args.dev_corpus, args.test_corpus, args.corpus)
output_corpora = (args.output_corpus + '.dev' ,
args.output_corpus + '.test',
args.output_corpus)
# list of temporary files for each corpus (dev, test, train)
# a value of None means no such corpus
filenames = [None, None, None]
for i, corpus in enumerate(input_corpora):
if corpus is not None:
filenames[i] = process_corpus(corpus, args)
# split files
sizes = [
args.dev_size if not args.dev_corpus else 0,
args.test_size if not args.test_corpus else 0,
args.train_size
]
if any(sizes):
logging.info('splitting files')
# returns a list in the same format as `filenames`
split_filenames = split_corpus(filenames[-1], sizes, args)
# union of `filenames` and `split_filenames`
for i, filenames_ in enumerate(split_filenames):
if filenames_ is not None:
filenames[i] = filenames_
vocabs = None
if args.max_vocab_size or args.min_count:
# vocabularies are created from training corpus
vocabs = [get_vocab(filename, args) for filename in filenames[-1]]
# move temporary files to their destination
for filenames_, output_corpus in zip(filenames, output_corpora):
if filenames_ is not None:
move_and_filter(filenames_, output_corpus, args, vocabs)
finally:
logging.info('removing temporary files')
for name in temporary_files:
try:
os.remove(name)
except OSError:
pass
|
|
#! /usr/bin/env python3
import matplotlib
matplotlib.use('agg')
import argparse
import logging
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.io
import tqdm
import pbio.utils.bed_utils as bed_utils
import pbio.misc.parallel as parallel
import pbio.misc.logging_utils as logging_utils
import pbio.misc.shell_utils as shell_utils
import pbio.misc.utils as utils
import pbio.ribo.ribo_filenames as filenames
logger = logging.getLogger(__name__)
default_image_type = 'eps'
default_title = ""
default_min_profile = 5
default_max_orfs = 10000
def get_windows(profile):
profile = profile / np.max(profile)
orf_len = len(profile)
if orf_len < 42:
# we would return first window and exit
first_window = profile[:21]
return (first_window, None, None)
first_window, middle_window, last_window = np.split(profile, [21, orf_len-21])
# now, pull together and sum up all intermediate windows (of length 21)
# cheat a bit, and just split split the middle into 21-bp windows, drop the last window
indices = np.arange(21, len(middle_window), 21)
middle_windows = np.split(middle_window, indices)[:-1]
return first_window, middle_windows, last_window
def get_profile(orf, profiles, min_profile):
orf_num = orf['orf_num']
orf_len = orf['orf_len']
if orf_len < 21:
return None
profile = utils.to_dense(profiles, orf_num, length=orf_len)
if sum(profile) < min_profile:
return None
return profile
def plot_windows(windows, title, out):
if len(windows) == 0:
msg = "Did not find any windows for: {}".format(title)
logger.warning(msg)
return
windows_np = np.array(windows)
first_windows = windows_np[:,0]
last_windows = windows_np[:,2]
last_windows = np.array([lw for lw in last_windows if lw is not None])
middle_windows = windows_np[:,1]
middle_windows = [mw for mw in middle_windows if mw is not None]
middle_windows = utils.flatten_lists(middle_windows)
middle_windows = np.array(middle_windows)
if (len(last_windows) == 0) or (len(middle_windows) == 0):
msg = "No long ORFs for: {}".format(title)
logger.warning(msg)
return
ind = np.arange(21) # the x locations for the groups
width = 0.5 # the width of the bars
fig, axes = plt.subplots(ncols=3, sharey=True, sharex=True, figsize=(10,5))
# the first window
first_means = np.mean(first_windows, axis=0)
first_var = np.var(first_windows, axis=0)
rects_first = axes[0].bar(ind, first_means, width, color='g', yerr=first_var)
# the middle windows
middle_means = np.mean(middle_windows, axis=0)
middle_var = np.var(middle_windows, axis=0)
rects_middle = axes[1].bar(ind, middle_means, width, color='g', yerr=middle_var)
# the last window
last_means = np.mean(last_windows, axis=0)
last_var = np.var(last_windows, axis=0)
rects_last = axes[2].bar(ind, last_means, width, color='g', yerr=last_var)
axes[0].set_xlim((-width, 21))
axes[0].set_ylim((0, 1.05))
axes[0].set_title('First 21-bp window')
axes[1].set_title('All 21-bp windows in middle')
axes[2].set_title('Last 21-bp window')
fig.suptitle(title)
msg = "Saving figure to: {}".format(out)
logger.debug(msg)
fig.savefig(out, bbox_inches='tight')
def extract_profiles_and_plot_strand(g, profiles, orf_type, strand, args):
m_strand = g['strand'] == strand
msg = "Extracting profiles"
logger.debug(msg)
# we will manually build up the list so we can quit as soon as possible
df = g[m_strand]
# quickly figure out which profiles have a sum we can use
orf_nums = np.array(df['orf_num'])
# we may have only calculated profiles for a few ORFs (using --num-orfs)
# so do not keep indices which are too large
num_profiles = profiles.shape[0]
m_orfs = orf_nums < num_profiles
orf_nums = orf_nums[m_orfs]
g_profiles = profiles[orf_nums]
s = g_profiles.sum(axis=1)
m_sum = np.array(s > args.min_profile)[:,0]
# we need to extend m_sum so it is the correct length
num_orfs = len(orf_nums)
diff = len(m_orfs) - num_orfs
m_orfs = np.concatenate([m_sum, np.array([False]*diff)]).astype(bool)
g_profiles = []
df_rows = tqdm.tqdm(df[m_orfs].iterrows(), total=len(df[m_orfs]),
leave=True, file=sys.stdout)
for row in df_rows:
profile = get_profile(row[1], profiles, args.min_profile)
if profile is not None:
g_profiles.append(profile)
if len(g_profiles) >= args.max_orfs:
break
msg = "Slicing the profiles into windows"
logger.debug(msg)
windows = parallel.apply_iter_simple(g_profiles, get_windows, progress_bar=True)
msg = "Plotting the profile statistics"
logger.debug(msg)
out = filenames.get_orf_type_profile_image(args.out, orf_type, strand, args.image_type)
title = "{}: {}, strand: {} ({})".format(args.title, orf_type, strand, len(df[m_orfs]))
plot_windows(windows, title, out)
def extract_profiles_and_plot(g, profiles, args):
orf_type = g['orf_type'].iloc[0]
msg = "ORF type: {}".format(orf_type)
logger.info(msg)
msg = "Strand: -"
logger.info(msg)
strand = "-"
extract_profiles_and_plot_strand(g, profiles, orf_type, strand, args)
msg = "Strand: +"
logger.info(msg)
strand = "+"
extract_profiles_and_plot_strand(g, profiles, orf_type, strand, args)
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script visualizes the metagene profiles for each ORF type "
"present in a given BED12+ file. It visualizes the mean and variance of normalized "
"profiles in the first 21-bp, last 21-bp, and across all other 21-bp windows.")
parser.add_argument('orfs', help="The BED12+ file containing the ORFs")
parser.add_argument('profiles', help="The (mtx) file containing the ORF profiles")
parser.add_argument('out', help="The base output name. The output filenames will be of "
"the form: <out>.<orf-type>.<image-type>.")
parser.add_argument('--min-profile', help="The minimum value of the sum over the profile "
"to include it in the analysis", type=float, default=default_min_profile)
parser.add_argument('--max-orfs', help="At most this many ORFs of each type will be "
"used to create the figures. They will be sampled randomly from among those "
"which meet the min-profile constraint.", type=int, default=default_max_orfs)
parser.add_argument('--title', help="The prefix to use for the title of the plots",
default=default_title)
parser.add_argument('--image-type', help="The type of image files to create. The type "
"must be recognized by matplotlib.", default=default_image_type)
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
msg = "Reading ORFs"
logger.info(msg)
orfs = bed_utils.read_bed(args.orfs)
msg = "Reading profiles"
logger.info(msg)
profiles = scipy.io.mmread(args.profiles).tocsr()
msg = "Extracting the metagene profiles and creating the images"
logger.info(msg)
orf_type_groups = orfs.groupby('orf_type')
orf_type_groups.apply(extract_profiles_and_plot, profiles, args)
msg = "Finished"
logger.info(msg)
if __name__ == '__main__':
main()
|
|
import warnings
from wtforms import fields, validators
from sqlalchemy import Boolean, Column
from flask_admin import form
from flask_admin.model.form import (converts, ModelConverterBase,
InlineModelConverterBase, FieldPlaceholder)
from flask_admin.model.fields import AjaxSelectField, AjaxSelectMultipleField
from flask_admin.model.helpers import prettify_name
from flask_admin._backwards import get_property
from flask_admin._compat import iteritems
from .validators import Unique
from .fields import (QuerySelectField, QuerySelectMultipleField,
InlineModelFormList, InlineHstoreList, HstoreForm)
from flask_admin.model.fields import InlineFormField
from .tools import (has_multiple_pks, filter_foreign_columns,
get_field_with_path)
from .ajax import create_ajax_loader
class AdminModelConverter(ModelConverterBase):
"""
SQLAlchemy model to form converter
"""
def __init__(self, session, view):
super(AdminModelConverter, self).__init__()
self.session = session
self.view = view
def _get_label(self, name, field_args):
"""
Label for field name. If it is not specified explicitly,
then the views prettify_name method is used to find it.
:param field_args:
Dictionary with additional field arguments
"""
if 'label' in field_args:
return field_args['label']
column_labels = get_property(self.view, 'column_labels', 'rename_columns')
if column_labels:
return column_labels.get(name)
prettify_override = getattr(self.view, 'prettify_name', None)
if prettify_override:
return prettify_override(name)
return prettify_name(name)
def _get_description(self, name, field_args):
if 'description' in field_args:
return field_args['description']
column_descriptions = getattr(self.view, 'column_descriptions', None)
if column_descriptions:
return column_descriptions.get(name)
def _get_field_override(self, name):
form_overrides = getattr(self.view, 'form_overrides', None)
if form_overrides:
return form_overrides.get(name)
return None
def _model_select_field(self, prop, multiple, remote_model, **kwargs):
loader = getattr(self.view, '_form_ajax_refs', {}).get(prop.key)
if loader:
if multiple:
return AjaxSelectMultipleField(loader, **kwargs)
else:
return AjaxSelectField(loader, **kwargs)
if 'query_factory' not in kwargs:
kwargs['query_factory'] = lambda: self.session.query(remote_model)
if multiple:
return QuerySelectMultipleField(**kwargs)
else:
return QuerySelectField(**kwargs)
def _convert_relation(self, prop, kwargs):
# Check if relation is specified
form_columns = getattr(self.view, 'form_columns', None)
if form_columns and prop.key not in form_columns:
return None
remote_model = prop.mapper.class_
column = prop.local_remote_pairs[0][0]
# If this relation points to local column that's not foreign key, assume
# that it is backref and use remote column data
if not column.foreign_keys:
column = prop.local_remote_pairs[0][1]
kwargs['label'] = self._get_label(prop.key, kwargs)
kwargs['description'] = self._get_description(prop.key, kwargs)
# determine optional/required, or respect existing
requirement_options = (validators.Optional, validators.InputRequired)
if not any(isinstance(v, requirement_options) for v in kwargs['validators']):
if column.nullable or prop.direction.name != 'MANYTOONE':
kwargs['validators'].append(validators.Optional())
else:
kwargs['validators'].append(validators.InputRequired())
# Contribute model-related parameters
if 'allow_blank' not in kwargs:
kwargs['allow_blank'] = column.nullable
# Override field type if necessary
override = self._get_field_override(prop.key)
if override:
return override(**kwargs)
if prop.direction.name == 'MANYTOONE' or not prop.uselist:
return self._model_select_field(prop, False, remote_model, **kwargs)
elif prop.direction.name == 'ONETOMANY':
return self._model_select_field(prop, True, remote_model, **kwargs)
elif prop.direction.name == 'MANYTOMANY':
return self._model_select_field(prop, True, remote_model, **kwargs)
def convert(self, model, mapper, prop, field_args, hidden_pk):
# Properly handle forced fields
if isinstance(prop, FieldPlaceholder):
return form.recreate_field(prop.field)
kwargs = {
'validators': [],
'filters': []
}
if field_args:
kwargs.update(field_args)
if kwargs['validators']:
# Create a copy of the list since we will be modifying it.
kwargs['validators'] = list(kwargs['validators'])
# Check if it is relation or property
if hasattr(prop, 'direction'):
return self._convert_relation(prop, kwargs)
elif hasattr(prop, 'columns'): # Ignore pk/fk
# Check if more than one column mapped to the property
if len(prop.columns) > 1:
columns = filter_foreign_columns(model.__table__, prop.columns)
if len(columns) > 1:
warnings.warn('Can not convert multiple-column properties (%s.%s)' % (model, prop.key))
return None
column = columns[0]
else:
# Grab column
column = prop.columns[0]
form_columns = getattr(self.view, 'form_columns', None) or ()
# Do not display foreign keys - use relations, except when explicitly instructed
if column.foreign_keys and prop.key not in form_columns:
return None
# Only display "real" columns
if not isinstance(column, Column):
return None
unique = False
if column.primary_key:
if hidden_pk:
# If requested to add hidden field, show it
return fields.HiddenField()
else:
# By default, don't show primary keys either
# If PK is not explicitly allowed, ignore it
if prop.key not in form_columns:
return None
# Current Unique Validator does not work with multicolumns-pks
if not has_multiple_pks(model):
kwargs['validators'].append(Unique(self.session,
model,
column))
unique = True
# If field is unique, validate it
if column.unique and not unique:
kwargs['validators'].append(Unique(self.session,
model,
column))
optional_types = getattr(self.view, 'form_optional_types', (Boolean,))
if (
not column.nullable
and not isinstance(column.type, optional_types)
and not column.default
and not column.server_default
):
kwargs['validators'].append(validators.InputRequired())
# Apply label and description if it isn't inline form field
if self.view.model == mapper.class_:
kwargs['label'] = self._get_label(prop.key, kwargs)
kwargs['description'] = self._get_description(prop.key, kwargs)
# Figure out default value
default = getattr(column, 'default', None)
value = None
if default is not None:
value = getattr(default, 'arg', None)
if value is not None:
if getattr(default, 'is_callable', False):
value = lambda: default.arg(None)
else:
if not getattr(default, 'is_scalar', True):
value = None
if value is not None:
kwargs['default'] = value
# Check nullable
if column.nullable:
kwargs['validators'].append(validators.Optional())
# Override field type if necessary
override = self._get_field_override(prop.key)
if override:
return override(**kwargs)
# Check choices
form_choices = getattr(self.view, 'form_choices', None)
if mapper.class_ == self.view.model and form_choices:
choices = form_choices.get(column.key)
if choices:
return form.Select2Field(
choices=choices,
allow_blank=column.nullable,
**kwargs
)
# Run converter
converter = self.get_converter(column)
if converter is None:
return None
return converter(model=model, mapper=mapper, prop=prop,
column=column, field_args=kwargs)
return None
@classmethod
def _string_common(cls, column, field_args, **extra):
if isinstance(column.type.length, int) and column.type.length:
field_args['validators'].append(validators.Length(max=column.type.length))
@converts('String') # includes VARCHAR, CHAR, and Unicode
def conv_String(self, column, field_args, **extra):
if hasattr(column.type, 'enums'):
accepted_values = list(column.type.enums)
field_args['choices'] = [(f, f) for f in column.type.enums]
if column.nullable:
field_args['allow_blank'] = column.nullable
accepted_values.append(None)
field_args['validators'].append(validators.AnyOf(accepted_values))
return form.Select2Field(**field_args)
if column.nullable:
filters = field_args.get('filters', [])
filters.append(lambda x: x or None)
field_args['filters'] = filters
self._string_common(column=column, field_args=field_args, **extra)
return fields.StringField(**field_args)
@converts('Text', 'LargeBinary', 'Binary') # includes UnicodeText
def conv_Text(self, field_args, **extra):
self._string_common(field_args=field_args, **extra)
return fields.TextAreaField(**field_args)
@converts('Boolean', 'sqlalchemy.dialects.mssql.base.BIT')
def conv_Boolean(self, field_args, **extra):
return fields.BooleanField(**field_args)
@converts('Date')
def convert_date(self, field_args, **extra):
field_args['widget'] = form.DatePickerWidget()
return fields.DateField(**field_args)
@converts('DateTime') # includes TIMESTAMP
def convert_datetime(self, field_args, **extra):
return form.DateTimeField(**field_args)
@converts('Time')
def convert_time(self, field_args, **extra):
return form.TimeField(**field_args)
@converts('Integer') # includes BigInteger and SmallInteger
def handle_integer_types(self, column, field_args, **extra):
unsigned = getattr(column.type, 'unsigned', False)
if unsigned:
field_args['validators'].append(validators.NumberRange(min=0))
return fields.IntegerField(**field_args)
@converts('Numeric') # includes DECIMAL, Float/FLOAT, REAL, and DOUBLE
def handle_decimal_types(self, column, field_args, **extra):
# override default decimal places limit, use database defaults instead
field_args.setdefault('places', None)
return fields.DecimalField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.INET')
def conv_PGInet(self, field_args, **extra):
field_args.setdefault('label', u'IP Address')
field_args['validators'].append(validators.IPAddress())
return fields.StringField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.MACADDR')
def conv_PGMacaddr(self, field_args, **extra):
field_args.setdefault('label', u'MAC Address')
field_args['validators'].append(validators.MacAddress())
return fields.StringField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.UUID')
def conv_PGUuid(self, field_args, **extra):
field_args.setdefault('label', u'UUID')
field_args['validators'].append(validators.UUID())
return fields.StringField(**field_args)
@converts('sqlalchemy.dialects.postgresql.base.ARRAY')
def conv_ARRAY(self, field_args, **extra):
return form.Select2TagsField(save_as_list=True, **field_args)
@converts('HSTORE')
def conv_HSTORE(self, field_args, **extra):
inner_form = field_args.pop('form', HstoreForm)
return InlineHstoreList(InlineFormField(inner_form), **field_args)
@converts('JSON')
def convert_JSON(self, field_args, **extra):
return form.JSONField(**field_args)
def _resolve_prop(prop):
"""
Resolve proxied property
:param prop:
Property to resolve
"""
# Try to see if it is proxied property
if hasattr(prop, '_proxied_property'):
return prop._proxied_property
return prop
# Get list of fields and generate form
def get_form(model, converter,
base_class=form.BaseForm,
only=None,
exclude=None,
field_args=None,
hidden_pk=False,
ignore_hidden=True,
extra_fields=None):
"""
Generate form from the model.
:param model:
Model to generate form from
:param converter:
Converter class to use
:param base_class:
Base form class
:param only:
Include fields
:param exclude:
Exclude fields
:param field_args:
Dictionary with additional field arguments
:param hidden_pk:
Generate hidden field with model primary key or not
:param ignore_hidden:
If set to True (default), will ignore properties that start with underscore
"""
# TODO: Support new 0.8 API
if not hasattr(model, '_sa_class_manager'):
raise TypeError('model must be a sqlalchemy mapped model')
mapper = model._sa_class_manager.mapper
field_args = field_args or {}
properties = ((p.key, p) for p in mapper.iterate_properties)
if only:
def find(name):
# If field is in extra_fields, it has higher priority
if extra_fields and name in extra_fields:
return name, FieldPlaceholder(extra_fields[name])
column, path = get_field_with_path(model, name)
if path and not hasattr(column.prop, 'direction'):
raise Exception("form column is located in another table and "
"requires inline_models: {0}".format(name))
name = column.key
if column is not None and hasattr(column, 'property'):
return name, column.property
raise ValueError('Invalid model property name %s.%s' % (model, name))
# Filter properties while maintaining property order in 'only' list
properties = (find(x) for x in only)
elif exclude:
properties = (x for x in properties if x[0] not in exclude)
field_dict = {}
for name, p in properties:
# Ignore protected properties
if ignore_hidden and name.startswith('_'):
continue
prop = _resolve_prop(p)
field = converter.convert(model, mapper, prop, field_args.get(name), hidden_pk)
if field is not None:
field_dict[name] = field
# Contribute extra fields
if not only and extra_fields:
for name, field in iteritems(extra_fields):
field_dict[name] = form.recreate_field(field)
return type(model.__name__ + 'Form', (base_class, ), field_dict)
class InlineModelConverter(InlineModelConverterBase):
"""
Inline model form helper.
"""
inline_field_list_type = InlineModelFormList
"""
Used field list type.
If you want to do some custom rendering of inline field lists,
you can create your own wtforms field and use it instead
"""
def __init__(self, session, view, model_converter):
"""
Constructor.
:param session:
SQLAlchemy session
:param view:
Flask-Admin view object
:param model_converter:
Model converter class. Will be automatically instantiated with
appropriate `InlineFormAdmin` instance.
"""
super(InlineModelConverter, self).__init__(view)
self.session = session
self.model_converter = model_converter
def get_info(self, p):
info = super(InlineModelConverter, self).get_info(p)
# Special case for model instances
if info is None:
if hasattr(p, '_sa_class_manager'):
return self.form_admin_class(p)
else:
model = getattr(p, 'model', None)
if model is None:
raise Exception('Unknown inline model admin: %s' % repr(p))
attrs = dict()
for attr in dir(p):
if not attr.startswith('_') and attr != 'model':
attrs[attr] = getattr(p, attr)
return self.form_admin_class(model, **attrs)
info = self.form_admin_class(model, **attrs)
# Resolve AJAX FKs
info._form_ajax_refs = self.process_ajax_refs(info)
return info
def process_ajax_refs(self, info):
refs = getattr(info, 'form_ajax_refs', None)
result = {}
if refs:
for name, opts in iteritems(refs):
new_name = '%s-%s' % (info.model.__name__.lower(), name)
loader = None
if isinstance(opts, dict):
loader = create_ajax_loader(info.model, self.session, new_name, name, opts)
else:
loader = opts
result[name] = loader
self.view._form_ajax_refs[new_name] = loader
return result
def contribute(self, model, form_class, inline_model):
"""
Generate form fields for inline forms and contribute them to
the `form_class`
:param converter:
ModelConverterBase instance
:param session:
SQLAlchemy session
:param model:
Model class
:param form_class:
Form to add properties to
:param inline_model:
Inline model. Can be one of:
- ``tuple``, first value is related model instance,
second is dictionary with options
- ``InlineFormAdmin`` instance
- Model class
:return:
Form class
"""
mapper = model._sa_class_manager.mapper
info = self.get_info(inline_model)
# Find property from target model to current model
# Use the base mapper to support inheritance
target_mapper = info.model._sa_class_manager.mapper.base_mapper
reverse_prop = None
for prop in target_mapper.iterate_properties:
if hasattr(prop, 'direction') and prop.direction.name in ('MANYTOONE', 'MANYTOMANY'):
if issubclass(model, prop.mapper.class_):
reverse_prop = prop
break
else:
raise Exception('Cannot find reverse relation for model %s' % info.model)
# Find forward property
forward_prop = None
if prop.direction.name == 'MANYTOONE':
candidate = 'ONETOMANY'
else:
candidate = 'MANYTOMANY'
for prop in mapper.iterate_properties:
if hasattr(prop, 'direction') and prop.direction.name == candidate:
if prop.mapper.class_ == target_mapper.class_:
forward_prop = prop
break
else:
raise Exception('Cannot find forward relation for model %s' % info.model)
# Remove reverse property from the list
ignore = [reverse_prop.key]
if info.form_excluded_columns:
exclude = ignore + list(info.form_excluded_columns)
else:
exclude = ignore
# Create converter
converter = self.model_converter(self.session, info)
# Create form
child_form = info.get_form()
if child_form is None:
child_form = get_form(info.model,
converter,
base_class=info.form_base_class or form.BaseForm,
only=info.form_columns,
exclude=exclude,
field_args=info.form_args,
hidden_pk=True,
extra_fields=info.form_extra_fields)
# Post-process form
child_form = info.postprocess_form(child_form)
kwargs = dict()
label = self.get_label(info, forward_prop.key)
if label:
kwargs['label'] = label
if self.view.form_args:
field_args = self.view.form_args.get(forward_prop.key, {})
kwargs.update(**field_args)
# Contribute field
setattr(form_class,
forward_prop.key,
self.inline_field_list_type(child_form,
self.session,
info.model,
reverse_prop.key,
info,
**kwargs))
return form_class
|
|
__author__ = 'Thomas Rueckstiess and Tom Schaul'
from scipy import pi, dot, array, ones, exp
from scipy.linalg import norm
from pybrain.rl.environments.cartpole.nonmarkovpole import NonMarkovPoleEnvironment
from pybrain.rl.environments.cartpole.doublepole import DoublePoleEnvironment
from pybrain.rl.environments import EpisodicTask
from cartpole import CartPoleEnvironment
from pybrain.utilities import crossproduct
class BalanceTask(EpisodicTask):
""" The task of balancing some pole(s) on a cart """
def __init__(self, env=None, maxsteps=1000, desiredValue = 0):
"""
:key env: (optional) an instance of a CartPoleEnvironment (or a subclass thereof)
:key maxsteps: maximal number of steps (default: 1000)
"""
self.desiredValue = desiredValue
if env == None:
env = CartPoleEnvironment()
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# scale position and angle, don't scale velocities (unknown maximum)
self.sensor_limits = [(-3, 3)]
for i in range(1, self.outdim):
if isinstance(self.env, NonMarkovPoleEnvironment) and i % 2 == 0:
self.sensor_limits.append(None)
else:
self.sensor_limits.append((-pi, pi))
# self.sensor_limits = [None] * 4
# actor between -10 and 10 Newton
self.actor_limits = [(-50, 50)]
def reset(self):
EpisodicTask.reset(self)
self.t = 0
def performAction(self, action):
self.t += 1
EpisodicTask.performAction(self, action)
def isFinished(self):
if max(map(abs, self.env.getPoleAngles())) > 0.7:
# pole has fallen
return True
elif abs(self.env.getCartPosition()) > 2.4:
# cart is out of it's border conditions
return True
elif self.t >= self.N:
# maximal timesteps
return True
return False
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
reward = 0
if min(angles) < 0.05 and abs(s) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -1
return reward
def setMaxLength(self, n):
self.N = n
class JustBalanceTask(BalanceTask):
""" this task does not require the cart to be moved to the middle. """
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
if min(angles) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -1
return reward
class EasyBalanceTask(BalanceTask):
""" this task is a bit easier to learn because it gives gradual feedback
about the distance to the centre. """
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
if min(angles) < 0.05 and abs(s) < 0.05:
reward = 0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -2 * (self.N - self.t)
else:
reward = -abs(s) / 2
return reward
class DiscreteBalanceTask(BalanceTask):
""" here there are 3 discrete actions, left, right, nothing. """
numActions = 3
def __init__(self, env=None, maxsteps=1000):
"""
:key env: (optional) an instance of a CartPoleEnvironment (or a subclass thereof)
:key maxsteps: maximal number of steps (default: 1000)
"""
if env == None:
env = CartPoleEnvironment()
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# no scaling of sensors
self.sensor_limits = [None] * self.env.outdim
# scale actor
self.actor_limits = [(-50, 50)]
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
if self.sensor_limits:
sensors = self.normalize(sensors)
return sensors
def performAction(self, action):
action = action - (self.numActions-1)/2.
BalanceTask.performAction(self, action)
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
if min(angles) < 0.05: # and abs(s) < 0.05:
reward = 1.0
elif max(angles) > 0.7 or abs(s) > 2.4:
reward = -1. * (self.N - self.t)
else:
reward = 0
return reward
class DiscreteNoHelpTask(DiscreteBalanceTask):
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
s = abs(self.env.getCartPosition())
if max(angles) > 0.7 or abs(s) > 2.4:
reward = -1. * (self.N - self.t)
else:
reward = 0.0
return reward
class DiscretePOMDPTask(DiscreteBalanceTask):
def __init__(self, env=None, maxsteps=1000):
"""
:key env: (optional) an instance of a CartPoleEnvironment (or a subclass thereof)
:key maxsteps: maximal number of steps (default: 1000)
"""
if env == None:
env = CartPoleEnvironment()
EpisodicTask.__init__(self, env)
self.N = maxsteps
self.t = 0
# no scaling of sensors
self.sensor_limits = [None] * 2
# scale actor
self.actor_limits = [(-50, 50)]
@property
def outdim(self):
return 2
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = [self.env.getSensors()[0], self.env.getSensors()[2]]
if self.sensor_limits:
sensors = self.normalize(sensors)
return sensors
class LinearizedBalanceTask(BalanceTask):
""" Here we follow the setup in
Peters J, Vijayakumar S, Schaal S (2003) Reinforcement learning for humanoid robotics.
TODO: This stuff is not yet compatible to any other cartpole environment. """
Q = array([12., 0.25, 1.25, 1.0])
def getReward(self):
return dot(self.env.sensors ** 2, self.Q) + self.env.action[0] ** 2 * 0.01
def isFinished(self):
if abs(self.env.getPoleAngles()[0]) > 0.5235988: # pi/6
# pole has fallen
return True
elif abs(self.env.getCartPosition()) > 1.5:
# cart is out of it's border conditions
return True
elif self.t >= self.N:
# maximal timesteps
return True
return False
class DiscreteBalanceTaskRBF(DiscreteBalanceTask):
""" From Lagoudakis & Parr, 2003:
With RBF features to generate a 10-dimensional observation (including bias),
also no cart-restrictions, no helpful rewards, and a single pole. """
CENTERS = array(crossproduct([[-pi/4, 0, pi/4], [1, 0, -1]]))
def getReward(self):
angles = map(abs, self.env.getPoleAngles())
if max(angles) > 1.6:
reward = -1.
else:
reward = 0.0
return reward
def isFinished(self):
if max(map(abs, self.env.getPoleAngles())) > 1.6:
return True
elif self.t >= self.N:
return True
return False
def getObservation(self):
res = ones(1+len(self.CENTERS))
sensors = self.env.getSensors()[:-2]
res[1:] = exp(-array(map(norm, self.CENTERS-sensors))**2/2)
return res
@property
def outdim(self):
return 1+len(self.CENTERS)
class DiscreteDoubleBalanceTaskRBF(DiscreteBalanceTaskRBF):
""" Same idea, but two poles. """
CENTERS = array(crossproduct([[-pi/4, 0, pi/4], [1, 0, -1]]*2))
def __init__(self, env=None, maxsteps=1000):
if env == None:
env = DoublePoleEnvironment()
DiscreteBalanceTask.__init__(self, env, maxsteps)
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import requests
import uuid
from tempest import clients
from tempest.common import rest_client
from tempest import config
from tempest import exceptions
import tempest.test
CONF = config.CONF
class MuranoClient(rest_client.RestClient):
def __init__(self, auth_provider):
super(MuranoClient, self).__init__(auth_provider)
self.service = 'application_catalog'
self.endpoint_url = 'publicURL'
def get_environments_list(self):
resp, body = self.get('v1/environments')
return resp, json.loads(body)
def create_environment(self, name):
post_body = '{"name": "%s"}' % name
resp, body = self.post('v1/environments', post_body)
return resp, json.loads(body)
def delete_environment(self, environment_id):
return self.delete('v1/environments/{0}'.format(environment_id))
def update_environment(self, environment_id):
post_body = '{"name": "%s"}' % ("changed-environment-name")
resp, body = self.put('v1/environments/{0}'.format(environment_id),
post_body)
return resp, json.loads(body)
def get_environment(self, environment_id):
resp, body = self.get('v1/environments/{0}'.format(environment_id))
return resp, json.loads(body)
def create_session(self, environment_id):
post_body = None
resp, body = self.post(
'v1/environments/{0}/configure'.format(environment_id),
post_body
)
return resp, json.loads(body)
def delete_session(self, environment_id, session_id):
return self.delete(
'v1/environments/{0}/sessions/{1}'.format(environment_id,
session_id))
def get_session(self, environment_id, session_id):
resp, body = self.get(
'v1/environments/{0}/sessions/{1}'.format(environment_id,
session_id))
return resp, json.loads(body)
def create_service(self, environment_id, session_id, post_body):
post_body = json.dumps(post_body)
headers = self.get_headers()
headers.update(
{'X-Configuration-Session': session_id}
)
resp, body = self.post(
'v1/environments/{0}/services'.format(environment_id),
post_body,
headers
)
return resp, json.loads(body)
def delete_service(self, environment_id, session_id, service_id):
headers = self.get_headers()
headers.update(
{'X-Configuration-Session': session_id}
)
return self.delete(
'v1/environments/{0}/services/{1}'.format(environment_id,
service_id),
headers
)
def get_services_list(self, environment_id, session_id):
headers = self.get_headers()
headers.update(
{'X-Configuration-Session': session_id}
)
resp, body = self.get(
'v1/environments/{0}/services'.format(environment_id),
headers
)
return resp, json.loads(body)
def get_service(self, environment_id, session_id, service_id):
headers = self.get_headers()
headers.update(
{'X-Configuration-Session': session_id}
)
resp, body = self.get(
'v1/environments/{0}/services/{1}'.format(environment_id,
service_id),
headers
)
return resp, json.loads(body)
def get_list_packages(self):
resp, body = self.get('v1/catalog/packages')
return resp, json.loads(body)
def get_package(self, id):
resp, body = self.get('v1/catalog/packages/{0}'.format(id))
return resp, json.loads(body)
def upload_package(self, package_name, body):
__location__ = os.path.realpath(os.path.join(
os.getcwd(), os.path.dirname(__file__)))
headers = {'X-Auth-Token': self.auth_provider.get_token()}
files = {'%s' % package_name: open(
os.path.join(__location__, 'v1/DummyTestApp.zip'), 'rb')}
post_body = {'JsonString': json.dumps(body)}
request_url = '{endpoint}{url}'.format(endpoint=self.base_url,
url='/v1/catalog/packages')
resp = requests.post(request_url, files=files, data=post_body,
headers=headers)
return resp
def update_package(self, id, post_body):
headers = {
'X-Auth-Token': self.auth_provider.get_token(),
'content-type': 'application/murano-packages-json-patch'
}
resp, body = self.patch('v1/catalog/packages/{0}'.format(id),
json.dumps(post_body), headers=headers)
return resp, json.loads(body)
def delete_package(self, id):
return self.delete('v1/catalog/packages/{0}'.format(id))
def download_package(self, id):
return self.get('v1/catalog/packages/{0}/download'.format(id))
def get_ui_definition(self, id):
return self.get('v1/catalog/packages/{0}/ui'.format(id))
def get_logo(self, id):
return self.get('v1/catalog/packages/{0}/logo'.format(id))
def list_categories(self):
resp, body = self.get('v1/catalog/packages/categories')
return resp, json.loads(body)
class TestCase(tempest.test.BaseTestCase):
@classmethod
def setUpClass(cls):
super(TestCase, cls).setUpClass()
# If no credentials are provided, the Manager will use those
# in CONF.identity and generate an auth_provider from them
mgr = clients.Manager()
cls.client = MuranoClient(mgr.auth_provider)
def setUp(self):
super(TestCase, self).setUp()
self.environments = []
def tearDown(self):
super(TestCase, self).tearDown()
for environment in self.environments:
try:
self.client.delete_environment(environment['id'])
except exceptions.NotFound:
pass
def create_environment(self, name):
environment = self.client.create_environment(name)[1]
self.environments.append(environment)
return environment
def create_demo_service(self, environment_id, session_id):
post_body = {
"?": {
"id": uuid.uuid4().hex,
"type": "io.murano.tests.demoService"
},
"availabilityZone": "nova",
"name": "demo",
"unitNamingPattern": "host",
"osImage": {
"type": "cirros.demo",
"name": "demo",
"title": "Demo"
},
"units": [{}],
"flavor": "m1.small",
"configuration": "standalone"
}
return self.client.create_service(environment_id,
session_id,
post_body)
class NegativeTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(NegativeTestCase, cls).setUpClass()
# If no credentials are provided, the Manager will use those
# in CONF.identity and generate an auth_provider from them
mgr = clients.Manager()
cls.alt_client = MuranoClient(mgr.auth_provider)
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from dataclasses import _MISSING_TYPE, dataclass, field
from typing import Any, List, Optional
import torch
from omegaconf import II, MISSING
from fairseq.dataclass.constants import (
DATASET_IMPL_CHOICES,
DDP_BACKEND_CHOICES,
DDP_COMM_HOOK_CHOICES,
GENERATION_CONSTRAINTS_CHOICES,
GENERATION_DECODING_FORMAT_CHOICES,
LOG_FORMAT_CHOICES,
PIPELINE_CHECKPOINT_CHOICES,
PRINT_ALIGNMENT_CHOICES,
ZERO_SHARDING_CHOICES,
)
@dataclass
class FairseqDataclass:
"""fairseq base dataclass that supported fetching attributes and metas"""
_name: Optional[str] = None
@staticmethod
def name():
return None
def _get_all_attributes(self) -> List[str]:
return [k for k in self.__dataclass_fields__.keys()]
def _get_meta(
self, attribute_name: str, meta: str, default: Optional[Any] = None
) -> Any:
return self.__dataclass_fields__[attribute_name].metadata.get(meta, default)
def _get_name(self, attribute_name: str) -> str:
return self.__dataclass_fields__[attribute_name].name
def _get_default(self, attribute_name: str) -> Any:
if hasattr(self, attribute_name):
if str(getattr(self, attribute_name)).startswith("${"):
return str(getattr(self, attribute_name))
elif str(self.__dataclass_fields__[attribute_name].default).startswith(
"${"
):
return str(self.__dataclass_fields__[attribute_name].default)
elif (
getattr(self, attribute_name)
!= self.__dataclass_fields__[attribute_name].default
):
return getattr(self, attribute_name)
f = self.__dataclass_fields__[attribute_name]
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
def _get_type(self, attribute_name: str) -> Any:
return self.__dataclass_fields__[attribute_name].type
def _get_help(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "help")
def _get_argparse_const(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_const")
def _get_argparse_alias(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "argparse_alias")
def _get_choices(self, attribute_name: str) -> Any:
return self._get_meta(attribute_name, "choices")
@classmethod
def from_namespace(cls, args):
if isinstance(args, cls):
return args
else:
config = cls()
for k in config.__dataclass_fields__.keys():
if k.startswith("_"):
# private member, skip
continue
if hasattr(args, k):
setattr(config, k, getattr(args, k))
return config
@dataclass
class CommonConfig(FairseqDataclass):
# This is the core dataclass including common parameters shared by all different jobs. Please append your params to other dataclasses if they were
# used for a particular purpose or task, such as those dedicated for `distributed training`, `optimization`, etc.
no_progress_bar: bool = field(
default=False, metadata={"help": "disable progress bar"}
)
log_interval: int = field(
default=100,
metadata={
"help": "log progress every N batches (when progress bar is disabled)"
},
)
log_format: Optional[LOG_FORMAT_CHOICES] = field(
default=None, metadata={"help": "log format to use"}
)
log_file: Optional[str] = field(
default=None, metadata={"help": "log file to copy metrics to."}
)
tensorboard_logdir: Optional[str] = field(
default=None,
metadata={
"help": "path to save logs for tensorboard, should match --logdir "
"of running tensorboard (default: no tensorboard logging)"
},
)
wandb_project: Optional[str] = field(
default=None,
metadata={"help": "Weights and Biases project name to use for logging"},
)
azureml_logging: Optional[bool] = field(
default=False,
metadata={"help": "Log scalars to AzureML context"},
)
seed: int = field(
default=1, metadata={"help": "pseudo random number generator seed"}
)
cpu: bool = field(default=False, metadata={"help": "use CPU instead of CUDA"})
tpu: bool = field(default=False, metadata={"help": "use TPU instead of CUDA"})
bf16: bool = field(default=False, metadata={"help": "use bfloat16; implies --tpu"})
memory_efficient_bf16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of BF16 training; implies --bf16"
},
)
fp16: bool = field(default=False, metadata={"help": "use FP16"})
memory_efficient_fp16: bool = field(
default=False,
metadata={
"help": "use a memory-efficient version of FP16 training; implies --fp16"
},
)
fp16_no_flatten_grads: bool = field(
default=False, metadata={"help": "don't flatten FP16 grads tensor"}
)
fp16_init_scale: int = field(
default=2**7, metadata={"help": "default FP16 loss scale"}
)
fp16_scale_window: Optional[int] = field(
default=None,
metadata={"help": "number of updates before increasing loss scale"},
)
fp16_scale_tolerance: float = field(
default=0.0,
metadata={
"help": "pct of updates that can overflow before decreasing the loss scale"
},
)
on_cpu_convert_precision: bool = field(
default=False,
metadata={
"help": "if set, the floating point conversion to fp16/bf16 runs on CPU. "
"This reduces bus transfer time and GPU memory usage."
},
)
min_loss_scale: float = field(
default=1e-4,
metadata={
"help": "minimum FP16/AMP loss scale, after which training is stopped"
},
)
threshold_loss_scale: Optional[float] = field(
default=None, metadata={"help": "threshold FP16 loss scale from below"}
)
amp: bool = field(default=False, metadata={"help": "use automatic mixed precision"})
amp_batch_retries: int = field(
default=2,
metadata={
"help": "number of retries of same batch after reducing loss scale with AMP"
},
)
amp_init_scale: int = field(
default=2**7, metadata={"help": "default AMP loss scale"}
)
amp_scale_window: Optional[int] = field(
default=None,
metadata={"help": "number of updates before increasing AMP loss scale"},
)
user_dir: Optional[str] = field(
default=None,
metadata={
"help": "path to a python module containing custom extensions (tasks and/or architectures)"
},
)
empty_cache_freq: int = field(
default=0,
metadata={"help": "how often to clear the PyTorch CUDA cache (0 to disable)"},
)
all_gather_list_size: int = field(
default=16384,
metadata={"help": "number of bytes reserved for gathering stats from workers"},
)
model_parallel_size: int = field(
default=1, metadata={"help": "total number of GPUs to parallelize model over"}
)
quantization_config_path: Optional[str] = field(
default=None, metadata={"help": "path to quantization config file"}
)
profile: bool = field(
default=False, metadata={"help": "enable autograd profiler emit_nvtx"}
)
reset_logging: bool = field(
default=False,
metadata={
"help": "when using Hydra, reset the logging at the beginning of training"
},
)
suppress_crashes: bool = field(
default=False,
metadata={
"help": "suppress crashes when training with the hydra_train entry point so that the "
"main method can return a value (useful for sweeps)"
},
)
use_plasma_view: bool = field(
default=False, metadata={"help": "Store indices and sizes in shared memory"}
)
plasma_path: Optional[str] = field(
default="/tmp/plasma",
metadata={
"help": "path to run plasma_store, defaults to /tmp/plasma. Paths outside /tmp tend to fail."
},
)
@dataclass
class DistributedTrainingConfig(FairseqDataclass):
distributed_world_size: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "total number of GPUs across all nodes (default: all visible GPUs)"
},
)
distributed_num_procs: Optional[int] = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "total number of processes to fork (default: all visible GPUs)"
},
)
distributed_rank: Optional[int] = field(
default=0, metadata={"help": "rank of the current worker"}
)
distributed_backend: str = field(
default="nccl", metadata={"help": "distributed backend"}
)
distributed_init_method: Optional[str] = field(
default=None,
metadata={
"help": "typically tcp://hostname:port that will be used to "
"establish initial connetion"
},
)
distributed_port: int = field(
default=-1,
metadata={
"help": "port number (not required if using --distributed-init-method)"
},
)
device_id: int = field(
default=0,
metadata={
"help": "which GPU to use (usually configured automatically)",
"argparse_alias": "--local_rank",
},
)
distributed_no_spawn: bool = field(
default=False,
metadata={
"help": "do not spawn multiple processes even if multiple GPUs are visible"
},
)
ddp_backend: DDP_BACKEND_CHOICES = field(
default="pytorch_ddp", metadata={"help": "DistributedDataParallel backend"}
)
ddp_comm_hook: DDP_COMM_HOOK_CHOICES = field(
default="none", metadata={"help": "communication hook"}
)
bucket_cap_mb: int = field(
default=25, metadata={"help": "bucket size for reduction"}
)
fix_batches_to_gpus: bool = field(
default=False,
metadata={
"help": "don't shuffle batches between GPUs; this reduces overall "
"randomness and may affect precision but avoids the cost of re-reading the data"
},
)
find_unused_parameters: bool = field(
default=False,
metadata={
"help": "disable unused parameter detection (not applicable to "
"--ddp-backend=legacy_ddp)"
},
)
gradient_as_bucket_view: bool = field(
default=False,
metadata={
"help": "when set to True, gradients will be views pointing to different offsets of allreduce communication buckets. This can reduce peak memory usage, where the saved memory size will be equal to the total gradients size. "
"--gradient-as-bucket-view=gradient_as_bucket_view)"
},
)
fast_stat_sync: bool = field(
default=False,
metadata={"help": "[deprecated] this is now defined per Criterion"},
)
heartbeat_timeout: int = field(
default=-1,
metadata={
"help": "kill the job if no progress is made in N seconds; "
"set to -1 to disable"
},
)
broadcast_buffers: bool = field(
default=False,
metadata={
"help": "Copy non-trainable parameters between GPUs, such as "
"batchnorm population statistics"
},
)
slowmo_momentum: Optional[float] = field(
default=None,
metadata={
"help": "SlowMo momentum term; by default use 0.0 for 16 GPUs, "
"0.2 for 32 GPUs; 0.5 for 64 GPUs, 0.6 for > 64 GPUs"
},
)
slowmo_base_algorithm: str = field(
default="localsgd",
metadata={
"help": "Base algorithm. Either 'localsgd' or 'sgp'. Please refer "
"to the documentation of 'slowmo_base_algorithm' parameter in "
"https://fairscale.readthedocs.io/en/latest/api/experimental/nn/slowmo_ddp.html "
"for more details"
},
)
localsgd_frequency: int = field(
default=3, metadata={"help": "Local SGD allreduce frequency"}
)
nprocs_per_node: int = field(
default=max(1, torch.cuda.device_count()),
metadata={
"help": "number of GPUs in each node. An allreduce operation across GPUs in "
"a node is very fast. Hence, we do allreduce across GPUs in a node, "
"and gossip across different nodes"
},
)
pipeline_model_parallel: bool = field(
default=False,
metadata={"help": "if set, use pipeline model parallelism across GPUs"},
)
pipeline_balance: Optional[str] = field(
default=None,
metadata={
"help": "partition the model into N_K pieces, where each piece "
"contains N_i layers. The sum(args.pipeline_balance) "
"should equal the total number of layers in the model"
},
)
pipeline_devices: Optional[str] = field(
default=None,
metadata={
"help": "a list of device indices indicating which device to place "
"each of the N_K partitions. The length of this list should "
"equal the length of the --pipeline-balance argument"
},
)
pipeline_chunks: Optional[int] = field(
default=0, metadata={"help": "microbatch count for pipeline model parallelism"}
)
pipeline_encoder_balance: Optional[str] = field(
default=None,
metadata={
"help": "partition the pipeline parallel encoder into N_K pieces, where each piece "
"contains N_i layers. The sum(args.pipeline_encoder_balance) "
"should equal the total number of encoder layers in the model"
},
)
pipeline_encoder_devices: Optional[str] = field(
default=None,
metadata={
"help": "a list of device indices indicating which device to place "
"each of the N_K partitions. The length of this list should "
"equal the length of the --pipeline-encoder-balance argument"
},
)
pipeline_decoder_balance: Optional[str] = field(
default=None,
metadata={
"help": "partition the pipeline parallel decoder into N_K pieces, where each piece "
"contains N_i layers. The sum(args.pipeline_decoder_balance) "
"should equal the total number of decoder layers in the model"
},
)
pipeline_decoder_devices: Optional[str] = field(
default=None,
metadata={
"help": "a list of device indices indicating which device to place "
"each of the N_K partitions. The length of this list should "
"equal the length of the --pipeline-decoder-balance argument"
},
)
pipeline_checkpoint: PIPELINE_CHECKPOINT_CHOICES = field(
default="never",
metadata={"help": "checkpointing mode for pipeline model parallelism"},
)
zero_sharding: ZERO_SHARDING_CHOICES = field(
default="none", metadata={"help": "ZeRO sharding"}
)
fp16: bool = II("common.fp16")
memory_efficient_fp16: bool = II("common.memory_efficient_fp16")
tpu: bool = II("common.tpu")
# configuration for --ddp-backend=fully_sharded
no_reshard_after_forward: bool = field(
default=False,
metadata={"help": "don't reshard parameters after forward pass"},
)
fp32_reduce_scatter: bool = field(
default=False,
metadata={"help": "reduce-scatter grads in FP32"},
)
cpu_offload: bool = field(
default=False, metadata={"help": "offload FP32 params to CPU"}
)
use_sharded_state: bool = field(
default=False,
metadata={"help": "use sharded checkpoint files"},
)
not_fsdp_flatten_parameters: bool = field(
default=False,
metadata={"help": "not flatten parameter param for fsdp"},
)
@dataclass
class DatasetConfig(FairseqDataclass):
num_workers: int = field(
default=1, metadata={"help": "how many subprocesses to use for data loading"}
)
skip_invalid_size_inputs_valid_test: bool = field(
default=False,
metadata={"help": "ignore too long or too short lines in valid and test set"},
)
max_tokens: Optional[int] = field(
default=None, metadata={"help": "maximum number of tokens in a batch"}
)
batch_size: Optional[int] = field(
default=None,
metadata={
"help": "number of examples in a batch",
"argparse_alias": "--max-sentences",
},
)
required_batch_size_multiple: int = field(
default=8, metadata={"help": "batch size will be a multiplier of this value"}
)
required_seq_len_multiple: int = field(
default=1,
metadata={
"help": "maximum sequence length in batch will be a multiplier of this value"
},
)
dataset_impl: Optional[DATASET_IMPL_CHOICES] = field(
default=None, metadata={"help": "output dataset implementation"}
)
data_buffer_size: int = field(
default=10, metadata={"help": "Number of batches to preload"}
)
train_subset: str = field(
default="train",
metadata={"help": "data subset to use for training (e.g. train, valid, test)"},
)
valid_subset: str = field(
default="valid",
metadata={
"help": "comma separated list of data subsets to use for validation"
" (e.g. train, valid, test)"
},
)
combine_valid_subsets: Optional[bool] = field(
default=None,
metadata={
"help": "comma separated list of data subsets to use for validation"
" (e.g. train, valid, test)",
"argparse_alias": "--combine-val",
},
)
ignore_unused_valid_subsets: Optional[bool] = field(
default=False,
metadata={"help": "do not raise error if valid subsets are ignored"},
)
validate_interval: int = field(
default=1, metadata={"help": "validate every N epochs"}
)
validate_interval_updates: int = field(
default=0, metadata={"help": "validate every N updates"}
)
validate_after_updates: int = field(
default=0, metadata={"help": "dont validate until reaching this many updates"}
)
fixed_validation_seed: Optional[int] = field(
default=None, metadata={"help": "specified random seed for validation"}
)
disable_validation: bool = field(
default=False, metadata={"help": "disable validation"}
)
max_tokens_valid: Optional[int] = field(
default=II("dataset.max_tokens"),
metadata={
"help": "maximum number of tokens in a validation batch"
" (defaults to --max-tokens)"
},
)
batch_size_valid: Optional[int] = field(
default=II("dataset.batch_size"),
metadata={
"help": "batch size of the validation batch (defaults to --batch-size)",
"argparse_alias": "--max-sentences-valid",
},
)
max_valid_steps: Optional[int] = field(
default=None,
metadata={"help": "How many batches to evaluate", "argparse_alias": "--nval"},
)
curriculum: int = field(
default=0, metadata={"help": "don't shuffle batches for first N epochs"}
)
gen_subset: str = field(
default="test",
metadata={"help": "data subset to generate (train, valid, test)"},
)
num_shards: int = field(
default=1, metadata={"help": "shard generation over N shards"}
)
shard_id: int = field(
default=0, metadata={"help": "id of the shard to generate (id < num_shards)"}
)
grouped_shuffling: bool = field(
default=False,
metadata={
"help": "shuffle batches in groups of num_shards to enable similar sequence lengths on each GPU worker when batches are sorted by length",
},
)
update_epoch_batch_itr: bool = field(
default=II("dataset.grouped_shuffling"),
metadata={
"help": "if true then prevents the reuse the epoch batch iterator by setting can_reuse_epoch_itr to false, defaults to --grouped-shuffling )",
},
)
update_ordered_indices_seed: bool = field(
default=False,
metadata={
"help": "if true then increment seed with epoch for getting batch iterators, defautls to False.",
},
)
@dataclass
class OptimizationConfig(FairseqDataclass):
max_epoch: int = field(
default=0, metadata={"help": "force stop training at specified epoch"}
)
max_update: int = field(
default=0, metadata={"help": "force stop training at specified update"}
)
stop_time_hours: float = field(
default=0,
metadata={
"help": "force stop training after specified cumulative time (if >0)"
},
)
clip_norm: float = field(
default=0.0, metadata={"help": "clip threshold of gradients"}
)
sentence_avg: bool = field(
default=False,
metadata={
"help": "normalize gradients by the number of sentences in a batch"
" (default is to normalize by number of tokens)"
},
)
update_freq: List[int] = field(
default_factory=lambda: [1],
metadata={"help": "update parameters every N_i batches, when in epoch i"},
)
lr: List[float] = field(
default_factory=lambda: [0.25],
metadata={
"help": "learning rate for the first N epochs; all epochs >N using LR_N"
" (note: this may be interpreted differently depending on --lr-scheduler)"
},
)
stop_min_lr: float = field(
default=-1.0,
metadata={"help": "stop training when the learning rate reaches this minimum"},
)
use_bmuf: bool = field(
default=False,
metadata={
"help": "specify global optimizer for syncing models on different GPUs/shards"
},
)
skip_remainder_batch: Optional[bool] = field(
default=False,
metadata={
"help": "if set, include the last (partial) batch of each epoch in training"
" (default is to skip it)."
},
)
@dataclass
class CheckpointConfig(FairseqDataclass):
save_dir: str = field(
default="checkpoints", metadata={"help": "path to save checkpoints"}
)
restore_file: str = field(
default="checkpoint_last.pt",
metadata={
"help": "filename from which to load checkpoint "
"(default: <save-dir>/checkpoint_last.pt"
},
)
continue_once: Optional[str] = field(
default=None,
metadata={
"help": "continues from this checkpoint, unless a checkpoint indicated in 'restore_file' option is present"
},
)
finetune_from_model: Optional[str] = field(
default=None,
metadata={
"help": "finetune from a pretrained model; note that meters and lr scheduler will be reset"
},
)
reset_dataloader: bool = field(
default=False,
metadata={
"help": "if set, does not reload dataloader state from the checkpoint"
},
)
reset_lr_scheduler: bool = field(
default=False,
metadata={
"help": "if set, does not load lr scheduler state from the checkpoint"
},
)
reset_meters: bool = field(
default=False,
metadata={"help": "if set, does not load meters from the checkpoint"},
)
reset_optimizer: bool = field(
default=False,
metadata={"help": "if set, does not load optimizer state from the checkpoint"},
)
optimizer_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override optimizer args when loading a checkpoint"
},
)
save_interval: int = field(
default=1, metadata={"help": "save a checkpoint every N epochs"}
)
save_interval_updates: int = field(
default=0, metadata={"help": "save a checkpoint (and validate) every N updates"}
)
keep_interval_updates: int = field(
default=-1,
metadata={
"help": "keep the last N checkpoints saved with --save-interval-updates"
},
)
keep_interval_updates_pattern: int = field(
default=-1,
metadata={
"help": "when used with --keep-interval-updates, skips deleting "
"any checkpoints with update X where "
"X %% keep_interval_updates_pattern == 0"
},
)
keep_last_epochs: int = field(
default=-1, metadata={"help": "keep last N epoch checkpoints"}
)
keep_best_checkpoints: int = field(
default=-1, metadata={"help": "keep best N checkpoints based on scores"}
)
no_save: bool = field(
default=False, metadata={"help": "don't save models or checkpoints"}
)
no_epoch_checkpoints: bool = field(
default=False, metadata={"help": "only store last and best checkpoints"}
)
no_last_checkpoints: bool = field(
default=False, metadata={"help": "don't store last checkpoints"}
)
no_save_optimizer_state: bool = field(
default=False,
metadata={"help": "don't save optimizer-state as part of checkpoint"},
)
best_checkpoint_metric: str = field(
default="loss", metadata={"help": 'metric to use for saving "best" checkpoints'}
)
maximize_best_checkpoint_metric: bool = field(
default=False,
metadata={
"help": 'select the largest metric value for saving "best" checkpoints'
},
)
patience: int = field(
default=-1,
metadata={
"help": (
"early stop training if valid performance doesn't "
"improve for N consecutive validation runs; note "
"that this is influenced by --validate-interval"
)
},
)
checkpoint_suffix: str = field(
default="", metadata={"help": "suffix to add to the checkpoint file name"}
)
checkpoint_shard_count: int = field(
default=1,
metadata={
"help": "Number of shards containing the checkpoint - "
"if the checkpoint is over 300GB, it is preferable "
"to split it into shards to prevent OOM on CPU while loading "
"the checkpoint"
},
)
load_checkpoint_on_all_dp_ranks: bool = field(
default=False,
metadata={
"help": "load checkpoints on all data parallel devices "
"(default: only load on rank 0 and broadcast to other devices)"
},
)
write_checkpoints_asynchronously: bool = field(
default=False,
metadata={
"help": (
"Write checkpoints asynchronously in a separate "
"thread. NOTE: This feature is currently being tested."
),
"argparse_alias": "--save-async",
},
)
model_parallel_size: int = II("common.model_parallel_size")
@dataclass
class FairseqBMUFConfig(FairseqDataclass):
block_lr: float = field(
default=1, metadata={"help": "block learning rate for bmuf"}
)
block_momentum: float = field(
default=0.875, metadata={"help": "block momentum for bmuf"}
)
global_sync_iter: int = field(
default=50, metadata={"help": "Iteration for syncing global model"}
)
warmup_iterations: int = field(
default=500, metadata={"help": "warmup iterations for model to broadcast"}
)
use_nbm: bool = field(
default=False,
metadata={"help": "Specify whether you want to use classical BM / Nesterov BM"},
)
average_sync: bool = field(
default=False,
metadata={
"help": "Specify whether you want to average the local momentum after each sync"
},
)
distributed_world_size: int = II("distributed_training.distributed_world_size")
@dataclass
class GenerationConfig(FairseqDataclass):
beam: int = field(
default=5,
metadata={"help": "beam size"},
)
nbest: int = field(
default=1,
metadata={"help": "number of hypotheses to output"},
)
max_len_a: float = field(
default=0,
metadata={
"help": "generate sequences of maximum length ax + b, where x is the source length"
},
)
max_len_b: int = field(
default=200,
metadata={
"help": "generate sequences of maximum length ax + b, where x is the source length"
},
)
min_len: int = field(
default=1,
metadata={"help": "minimum generation length"},
)
match_source_len: bool = field(
default=False,
metadata={"help": "generations should match the source length"},
)
unnormalized: bool = field(
default=False,
metadata={"help": "compare unnormalized hypothesis scores"},
)
no_early_stop: bool = field(
default=False,
metadata={"help": "deprecated"},
)
no_beamable_mm: bool = field(
default=False,
metadata={"help": "don't use BeamableMM in attention layers"},
)
lenpen: float = field(
default=1,
metadata={
"help": "length penalty: <1.0 favors shorter, >1.0 favors longer sentences"
},
)
unkpen: float = field(
default=0,
metadata={
"help": "unknown word penalty: <0 produces more unks, >0 produces fewer"
},
)
replace_unk: Optional[str] = field(
default=None,
metadata={
"help": "perform unknown replacement (optionally with alignment dictionary)",
"argparse_const": "@@ ",
},
)
sacrebleu: bool = field(
default=False,
metadata={"help": "score with sacrebleu"},
)
score_reference: bool = field(
default=False,
metadata={"help": "just score the reference translation"},
)
prefix_size: int = field(
default=0,
metadata={"help": "initialize generation by target prefix of given length"},
)
no_repeat_ngram_size: int = field(
default=0,
metadata={
"help": "ngram blocking such that this size ngram cannot be repeated in the generation"
},
)
sampling: bool = field(
default=False,
metadata={"help": "sample hypotheses instead of using beam search"},
)
sampling_topk: int = field(
default=-1,
metadata={"help": "sample from top K likely next words instead of all words"},
)
sampling_topp: float = field(
default=-1.0,
metadata={
"help": "sample from the smallest set whose cumulative probability mass exceeds p for next words"
},
)
constraints: Optional[GENERATION_CONSTRAINTS_CHOICES] = field(
default=None,
metadata={
"help": "enables lexically constrained decoding",
"argparse_const": "ordered",
},
)
temperature: float = field(
default=1.0,
metadata={"help": "temperature for generation"},
)
diverse_beam_groups: int = field(
default=-1,
metadata={"help": "number of groups for Diverse Beam Search"},
)
diverse_beam_strength: float = field(
default=0.5,
metadata={"help": "strength of diversity penalty for Diverse Beam Search"},
)
diversity_rate: float = field(
default=-1.0,
metadata={"help": "strength of diversity penalty for Diverse Siblings Search"},
)
print_alignment: Optional[PRINT_ALIGNMENT_CHOICES] = field(
default=None,
metadata={
"help": "if set, uses attention feedback to compute and print alignment to source tokens "
"(valid options are: hard, soft, otherwise treated as hard alignment)",
"argparse_const": "hard",
},
)
print_step: bool = field(
default=False,
metadata={"help": "print steps"},
)
lm_path: Optional[str] = field(
default=None,
metadata={"help": "path to lm checkpoint for lm fusion"},
)
lm_weight: float = field(
default=0.0,
metadata={"help": "weight for lm probs for lm fusion"},
)
# arguments for iterative refinement generator
iter_decode_eos_penalty: float = field(
default=0.0,
metadata={"help": "if > 0.0, it penalized early-stopping in decoding."},
)
iter_decode_max_iter: int = field(
default=10,
metadata={"help": "maximum iterations for iterative refinement."},
)
iter_decode_force_max_iter: bool = field(
default=False,
metadata={
"help": "if set, run exact the maximum number of iterations without early stop"
},
)
iter_decode_with_beam: int = field(
default=1,
metadata={
"help": "if > 1, model will generate translations varying by the lengths."
},
)
iter_decode_with_external_reranker: bool = field(
default=False,
metadata={
"help": "if set, the last checkpoint are assumed to be a reranker to rescore the translations"
},
)
retain_iter_history: bool = field(
default=False,
metadata={
"help": "if set, decoding returns the whole history of iterative refinement"
},
)
retain_dropout: bool = field(
default=False,
metadata={"help": "Use dropout at inference time"},
)
# temporarily set to Any until https://github.com/facebookresearch/hydra/issues/1117 is fixed
# retain_dropout_modules: Optional[List[str]] = field(
retain_dropout_modules: Any = field(
default=None,
metadata={
"help": "if set, only retain dropout for the specified modules; "
"if not set, then dropout will be retained for all modules"
},
)
# special decoding format for advanced decoding.
decoding_format: Optional[GENERATION_DECODING_FORMAT_CHOICES] = field(
default=None,
metadata={"help": "special decoding format for advanced decoding."},
)
no_seed_provided: bool = field(
default=False,
metadata={"help": "if set, dont use seed for initializing random generators"},
)
@dataclass
class CommonEvalConfig(FairseqDataclass):
path: Optional[str] = field(
default=None,
metadata={"help": "path(s) to model file(s), colon separated"},
)
post_process: Optional[str] = field(
default=None,
metadata={
"help": (
"post-process text by removing BPE, letter segmentation, etc. "
"Valid options can be found in fairseq.data.utils.post_process."
),
"argparse_const": "subword_nmt",
"argparse_alias": "--remove-bpe",
},
)
quiet: bool = field(default=False, metadata={"help": "only print final scores"})
model_overrides: str = field(
default="{}",
metadata={
"help": "a dictionary used to override model args at generation that were used during model training"
},
)
results_path: Optional[str] = field(
default=None, metadata={"help": "path to save eval results (optional)"}
)
@dataclass
class EvalLMConfig(FairseqDataclass):
output_word_probs: bool = field(
default=False,
metadata={
"help": "if set, outputs words and their predicted log probabilities to standard output"
},
)
output_word_stats: bool = field(
default=False,
metadata={
"help": "if set, outputs word statistics such as word count, average probability, etc"
},
)
context_window: int = field(
default=0,
metadata={
"help": "ensures that every evaluated token has access to a context of at least this size, if possible"
},
)
softmax_batch: int = field(
default=sys.maxsize,
metadata={
"help": "if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory"
},
)
@dataclass
class InteractiveConfig(FairseqDataclass):
buffer_size: int = field(
default=0,
metadata={
"help": "read this many sentences into a buffer before processing them"
},
)
input: str = field(
default="-",
metadata={"help": "file to read from; use - for stdin"},
)
@dataclass
class EMAConfig(FairseqDataclass):
store_ema: bool = field(
default=False, metadata={help: "store exponential moving average shadow model"}
)
ema_decay: float = field(
default=0.9999, metadata={"help": "decay for exponential moving average model"}
)
ema_start_update: int = field(
default=0, metadata={"help": "start EMA update after this many model updates"}
)
ema_seed_model: Optional[str] = field(
default=None,
metadata={
"help": "Seed to load EMA model from. "
"Used to load EMA model separately from the actual model."
},
)
ema_update_freq: int = field(
default=1, metadata={"help": "Do EMA update every this many model updates"}
)
ema_fp32: bool = field(
default=False,
metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"},
)
@dataclass
class FairseqConfig(FairseqDataclass):
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
optimization: OptimizationConfig = OptimizationConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
bmuf: FairseqBMUFConfig = FairseqBMUFConfig()
generation: GenerationConfig = GenerationConfig()
eval_lm: EvalLMConfig = EvalLMConfig()
interactive: InteractiveConfig = InteractiveConfig()
model: Any = MISSING
task: Any = None
criterion: Any = None
optimizer: Any = None
lr_scheduler: Any = None
scoring: Any = None
bpe: Any = None
tokenizer: Any = None
ema: EMAConfig = EMAConfig()
|
|
#! /usr/bin/env python
"""Store collections of data fields.
"""
import types
import inspect
from .scalar_data_fields import ScalarDataFields, FieldError
class Error(Exception):
"""Base class for errors in this module."""
pass
class GroupError(Error, KeyError):
"""Raise this error for a missing group name."""
def __init__(self, group):
self._group = group
def __str__(self):
return self._group
class ModelDataFields(object):
"""
The ModelDataFields class holds a set of ScalarDataFields that are separated
into *groups*. A typical use for this class would be to define the groups as
being locations on a grid where the values are defined. For instance, the
groups could be *node*, *cell*, *link*, and *face*.
Most of the method functions for ModelDataFields are the same as those for
the ScalarDataFields class but with the first argument being a string that
defines the group name.
Attributes
----------
groups
See Also
--------
landlab.field.ScalarDataFields : Data fields within a *group* are
stored as :class:`landlab.field.ScalarDataFields`.
landlab.grid.ModelGrid : Inherits from ModelDataFields.
Examples
--------
Create two groups of data fields defined at *node* and *cell*. Each set can
have a differenct number of values.
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 12)
>>> fields.new_field_location('cell', 2)
Create some new value arrays for each of the data fields.
>>> fields.ones('node')
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
>>> fields.zeros('cell')
array([ 0., 0.])
Create new value arrays and add them to the data fields. Because the data
fields are in different groups (node and cell), they can have the same
name.
>>> fields.add_ones('node', 'topographic__elevation')
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
>>> fields.at_node['topographic__elevation']
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
>>> fields.add_ones('cell', 'topographic__elevation')
array([ 1., 1.])
>>> fields.at_cell['topographic__elevation']
array([ 1., 1.])
Each group acts as a `dict` so, for instance, to get the variables names
in a group use the `keys` method,
>>> list(fields.at_cell.keys())
['topographic__elevation']
"""
def __init__(self, **kwds):
self._groups = dict()
super(ModelDataFields, self).__init__(**kwds)
@property
def groups(self):
"""List of group names.
Returns
-------
set
Set of quantity names.
"""
return set(self._groups.keys())
def has_group(self, group):
"""Check if a group exists.
Parameters
----------
group: str
Name of the group.
Returns
-------
boolean
True if the field contains *group*, otherwise False.
Examples
--------
Check if the field has the groups named *node* or *cell*.
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 12)
>>> fields.has_group('node')
True
>>> fields.has_group('cell')
False
"""
return group in self._groups
def has_field(self, group, field):
"""Check if a field is in a group.
Parameters
----------
group: str
Name of the group.
field: str
Name of the field.
Returns
-------
boolean
``True`` if the group contains the field, otherwise ``False``.
Examples
--------
Check if the field named ``topographic__elevation`` is contained
in a group.
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 12)
>>> _ = fields.add_ones('node', 'topographic__elevation')
>>> fields.has_field('node', 'topographic__elevation')
True
>>> fields.has_field('cell', 'topographic__elevation')
False
"""
try:
return field in self[group]
except KeyError:
return False
def keys(self, group):
"""List of field names in a group.
Returns a list of the field names as a list of strings.
Parameters
----------
group : str
Group name.
Returns
-------
list
List of field names.
Examples
--------
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 4)
>>> list(fields.keys('node'))
[]
>>> _ = fields.add_empty('node', 'topographic__elevation')
>>> list(fields.keys('node'))
['topographic__elevation']
"""
return self[group].keys()
def size(self, group):
"""Size of the arrays stored in a group.
Parameters
----------
group : str
Group name.
Returns
-------
int
Array size.
Examples
--------
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 4)
>>> fields.size('node')
4
"""
return self[group].size
def new_field_location(self, group, size):
"""Add a new quantity to a field.
Create an empty group into which new fields can be added. The new group
is created but no memory allocated yet. The dictionary of the new group
can be through a new *at_* attribute of the class instance.
Parameters
----------
group: str
Name of the new group to add to the field.
size: int
Number of elements in the new quantity.
Raises
------
ValueError
If the field already contains the group.
Examples
--------
Create a collection of fields and add two groups, *node* and *cell*,
to it.
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 12)
>>> fields.new_field_location('cell', 2)
The group names in the collection are retrieved with the *groups*
attribute as a `set`.
>>> names = list(fields.groups)
>>> names.sort()
>>> names
['cell', 'node']
Access the new (empty) groups with the *at_* attributes.
>>> fields.at_cell, fields.at_node
({}, {})
"""
if self.has_group(group):
raise ValueError('ModelDataFields already contains %s' % group)
else:
self._groups[group] = ScalarDataFields(size)
setattr(self, 'at_' + group, self[group])
def field_values(self, group, field):
"""Get values of a field.
Given a *group* and a *field*, return a reference to the associated
data array.
Parameters
----------
group: str
Name of the group.
field: str
Name of the field withing *group*.
Returns
-------
array
The values of the field.
Raises
------
GroupError
If *group* does not exits
FieldError
If *field* does not exits
Examples
--------
Create a group of fields called *node*.
>>> from landlab.field import ModelDataFields
>>> fields = ModelDataFields()
>>> fields.new_field_location('node', 4)
Add a field, initialized to ones, called *topographic__elevation*
to the *node* group. The *field_values* method returns a reference
to the field's data.
>>> _ = fields.add_ones('node', 'topographic__elevation')
>>> fields.field_values('node', 'topographic__elevation')
array([ 1., 1., 1., 1.])
Raise FieldError if *field* does not exist in *group*.
>>> fields.field_values('node', 'planet_surface__temperature') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
FieldError: planet_surface__temperature
If *group* does not exists, Raise GroupError.
>>> fields.field_values('cell', 'topographic__elevation') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
GroupError: cell
"""
return self[group][field]
def field_units(self, group, field):
"""Get units for a field.
Returns the unit string associated with the data array in *group* and
*field*.
Parameters
----------
group: str
Name of the group.
field: str
Name of the field withing *group*.
Returns
-------
str
The units of the field.
Raises
------
KeyError
If either *field* or *group* does not exist.
"""
return self[group].units[field]
def empty(self, group, **kwds):
"""Uninitialized array whose size is that of the field.
Return a new array of the data field size, without initializing
entries. Keyword arguments are the same as that for the equivalent
numpy function.
Parameters
----------
group : str
Name of the group.
See Also
--------
numpy.empty : See for a description of optional keywords.
landlab.field.ModelDataFields.ones : Equivalent method that
initializes the data to 1.
landlab.field.ModelDataFields.zeros : Equivalent method that
initializes the data to 0.
Examples
--------
>>> from landlab.field import ModelDataFields
>>> field = ModelDataFields()
>>> field.new_field_location('node', 4)
>>> field.empty('node') # doctest: +SKIP
array([ 2.31584178e+077, -2.68156175e+154, 9.88131292e-324,
... 2.78134232e-309]) # Uninitialized memory
Note that a new field is *not* added to the collection of fields.
>>> list(field.keys('node'))
[]
"""
return self[group].empty(**kwds)
def ones(self, group, **kwds):
"""Array, initialized to 1, whose size is that of the field.
Return a new array of the data field size, filled with ones. Keyword
arguments are the same as that for the equivalent numpy function.
Parameters
----------
group : str
Name of the group.
See Also
--------
numpy.ones : See for a description of optional keywords.
landlab.field.ModelDataFields.empty : Equivalent method that
does not initialize the new array.
landlab.field.ModelDataFields.zeros : Equivalent method that
initializes the data to 0.
Examples
--------
>>> from landlab.field import ModelDataFields
>>> field = ModelDataFields()
>>> field.new_field_location('node', 4)
>>> field.ones('node')
array([ 1., 1., 1., 1.])
>>> field.ones('node', dtype=int)
array([1, 1, 1, 1])
Note that a new field is *not* added to the collection of fields.
>>> list(field.keys('node'))
[]
"""
return self[group].ones(**kwds)
def zeros(self, group, **kwds):
"""Array, initialized to 0, whose size is that of the field.
Parameters
----------
group : str
Name of the group.
Return a new array of the data field size, filled with zeros. Keyword
arguments are the same as that for the equivalent numpy function.
See Also
--------
numpy.zeros : See for a description of optional keywords.
landlab.field.ModelDataFields.empty : Equivalent method that does not
initialize the new array.
landlab.field.ModelDataFields.ones : Equivalent
method that initializes the data to 1.
Examples
--------
>>> from landlab.field import ModelDataFields
>>> field = ModelDataFields()
>>> field.new_field_location('node', 4)
>>> field.zeros('node')
array([ 0., 0., 0., 0.])
Note that a new field is *not* added to the collection of fields.
>>> list(field.keys('node'))
[]
"""
return self[group].zeros(**kwds)
def add_empty(self, group, name, **kwds):
"""Create and add an uninitialized array of values to the field.
Create a new array of the data field size, without initializing
entries, and add it to the field as *name*. The *units* keyword gives
the units of the new fields as a string. Remaining keyword arguments
are the same as that for the equivalent numpy function.
Parameters
----------
group : str
Name of the group.
name : str
Name of the new field to add.
units : str, optional
Optionally specify the units of the field.
Returns
-------
array :
A reference to the newly-created array.
See Also
--------
numpy.empty : See for a description of optional keywords.
landlab.field.ModelDataFields.empty : Equivalent method that
does not initialize the new array.
landlab.field.ModelDataFields.zeros : Equivalent method that
initializes the data to 0.
"""
units = kwds.pop('units', None)
return self.add_field(group, name,
ModelDataFields.empty(self, group, **kwds),
units=units)
def add_ones(self, group, name, units=None, **kwds):
"""Create and add an array of values, initialized to 1, to the field.
Create a new array of the data field size, filled with ones, and
add it to the field as *name*. The *units* keyword gives the units of
the new fields as a string. Remaining keyword arguments are the same
as that for the equivalent numpy function.
Parameters
----------
group : str
Name of the group.
name : str
Name of the new field to add.
units : str, optional
Optionally specify the units of the field.
Returns
-------
array :
A reference to the newly-created array.
See Also
--------
numpy.ones : See for a description of optional keywords.
andlab.field.ModelDataFields.add_empty : Equivalent method that
does not initialize the new array.
andlab.field.ModelDataFields.add_zeros : Equivalent method that
initializes the data to 0.
Examples
--------
Add a new, named field to a collection of fields.
>>> from landlab.field import ModelDataFields
>>> field = ModelDataFields()
>>> field.new_field_location('node', 4)
>>> field.add_ones('node', 'topographic__elevation')
array([ 1., 1., 1., 1.])
>>> list(field.keys('node'))
['topographic__elevation']
>>> field['node']['topographic__elevation']
array([ 1., 1., 1., 1.])
>>> field.at_node['topographic__elevation']
array([ 1., 1., 1., 1.])
"""
units = kwds.pop('units', None)
return self.add_field(group, name,
ModelDataFields.ones(self, group, **kwds),
units=units)
def add_zeros(self, group, name, units=None, **kwds):
"""Create and add an array of values, initialized to 0, to the field.
Create a new array of the data field size, filled with zeros, and
add it to the field as *name*. The *units* keyword gives the units of
the new fields as a string. Remaining keyword arguments are the same
as that for the equivalent numpy function.
Parameters
----------
group : str
Name of the group.
name : str
Name of the new field to add.
units : str, optional
Optionally specify the units of the field.
Returns
-------
array :
A reference to the newly-created array.
See also
--------
numpy.zeros : See for a description of optional keywords.
landlab.field.ScalarDataFields.add_empty : Equivalent method that
does not initialize the new array.
landlab.field.ScalarDataFields.add_ones : Equivalent method that
initializes the data to 1.
"""
units = kwds.pop('units', None)
return self.add_field(group, name,
ModelDataFields.zeros(self, group, **kwds),
units=units)
def add_field(self, group, name, value_array, **kwds):
"""add_field(group, name, value_array, units='-', copy=False, noclobber=False)
Add an array of values to the field.
Add an array of data values to a collection of fields and associate it
with the key, *name*. Use the *copy* keyword to, optionally, add a
copy of the provided array.
Parameters
----------
group : str
Name of the group.
name : str
Name of the new field to add.
value_array : numpy.array
Array of values to add to the field.
units : str, optional
Optionally specify the units of the field.
copy : boolean, optional
If True, add a *copy* of the array to the field. Otherwise save add
a reference to the array.
noclobber : boolean, optional
Raise an exception if adding to an already existing field.
Returns
-------
numpy.array
The data array added to the field. Depending on the *copy*
keyword, this could be a copy of *value_array* or *value_array*
itself.
Raises
------
ValueError :
If *value_array* has a size different from the field.
Examples
--------
>>> import numpy as np
>>> from landlab.field import ModelDataFields
>>> field = ModelDataFields()
>>> field.new_field_location('node', 4)
>>> values = np.ones(4, dtype=int)
>>> field.add_field('node', 'topographic__elevation', values)
array([1, 1, 1, 1])
A new field is added to the collection of fields. The saved value
array is the same as the one initially created.
>>> field.at_node['topographic__elevation'] is values
True
If you want to save a copy of the array, use the *copy* keyword. In
addition, adding values to an existing field will remove the reference
to the previously saved array. The *noclobber* keyword changes this
behavior to raise an exception in such a case.
>>> field.add_field('node', 'topographic__elevation', values, copy=True)
array([1, 1, 1, 1])
>>> field.at_node['topographic__elevation'] is values
False
>>> field.add_field('node', 'topographic__elevation', values, noclobber=True) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
FieldError: topographic__elevation
"""
return self[group].add_field(name, value_array, **kwds)
def set_units(self, group, name, units):
"""Set the units for a field of values.
Parameters
----------
group : str
Name of the group.
name: str
Name of the field.
units: str
Units for the field
Raises
------
KeyError
If the named field does not exist.
"""
self[group].set_units(name, units)
def __getitem__(self, group):
try:
return self._groups[group]
except KeyError:
raise GroupError(group)
|
|
# Copyright 2013, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from oslo.config import cfg
from webob import exc
from neutron.common import constants
from neutron.db import extraroute_db
from neutron.extensions import extraroute
from neutron.extensions import l3
from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_l3_plugin as test_l3
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_api_v2._get_path
class ExtraRouteTestExtensionManager(object):
def get_resources(self):
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
extraroute.EXTENDED_ATTRIBUTES_2_0['routers'])
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
# This plugin class is for tests with plugin that integrates L3.
class TestExtraRouteIntPlugin(test_l3.TestL3NatIntPlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["external-net", "router", "extraroute"]
# A fake l3 service plugin class with extra route capability for
# plugins that delegate away L3 routing functionality
class TestExtraRouteL3NatServicePlugin(test_l3.TestL3NatServicePlugin,
extraroute_db.ExtraRoute_db_mixin):
supported_extension_aliases = ["router", "extraroute"]
class ExtraRouteDBTestCaseBase(object):
def _routes_update_prepare(self, router_id, subnet_id,
port_id, routes, skip_add=False):
if not skip_add:
self._router_interface_action('add', router_id, subnet_id, port_id)
self._update('routers', router_id, {'router': {'routes': routes}})
return self._show('routers', router_id)
def _routes_update_cleanup(self, port_id, subnet_id, router_id, routes):
self._update('routers', router_id, {'router': {'routes': routes}})
self._router_interface_action('remove', router_id, subnet_id, port_id)
def test_route_update_with_one_route(self):
routes = [{'destination': '135.207.0.0/16', 'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(body['router']['routes'], routes)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_clear_routes_with_None(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._routes_update_prepare(r['router']['id'],
None, p['port']['id'], routes)
body = self._update('routers', r['router']['id'],
{'router': {'routes': None}})
self.assertEqual(body['router']['routes'], [])
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_router_interface_in_use_by_route(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(body['router']['routes'], routes)
self._router_interface_action(
'remove',
r['router']['id'],
None,
p['port']['id'],
expected_code=exc.HTTPConflict.code)
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_route_update_with_multi_routes(self):
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def test_routes_update_for_multiple_routers(self):
routes1 = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.0.3'}]
routes2 = [{'destination': '12.0.0.0/8',
'nexthop': '10.0.0.4'}]
with contextlib.nested(
self.router(),
self.router(),
self.subnet(cidr='10.0.0.0/24')) as (r1, r2, s):
with contextlib.nested(
self.port(subnet=s, no_delete=True),
self.port(subnet=s, no_delete=True)) as (p1, p2):
body = self._routes_update_prepare(r1['router']['id'],
None, p1['port']['id'],
routes1)
self.assertEqual(body['router']['routes'], routes1)
body = self._routes_update_prepare(r2['router']['id'],
None, p2['port']['id'],
routes2)
self.assertEqual(body['router']['routes'], routes2)
self._routes_update_cleanup(p1['port']['id'],
None, r1['router']['id'], [])
self._routes_update_cleanup(p2['port']['id'],
None, r2['router']['id'], [])
def test_router_update_delete_routes(self):
routes_orig = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
routes_left = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'}]
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_orig)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_orig))
body = self._routes_update_prepare(r['router']['id'],
None, p['port']['id'],
routes_left,
skip_add=True)
self.assertEqual(sorted(body['router']['routes']),
sorted(routes_left))
self._routes_update_cleanup(p['port']['id'],
None, r['router']['id'], [])
def _test_malformed_route(self, routes):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._update('routers', r['router']['id'],
{'router': {'routes': routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_no_destination_route(self):
self._test_malformed_route([{'nexthop': '10.0.1.6'}])
def test_no_nexthop_route(self):
self._test_malformed_route({'destination': '135.207.0.0/16'})
def test_none_destination(self):
self._test_malformed_route([{'destination': None,
'nexthop': '10.0.1.3'}])
def test_none_nexthop(self):
self._test_malformed_route([{'destination': '135.207.0.0/16',
'nexthop': None}])
def test_nexthop_is_port_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
port_ip = p['port']['fixed_ips'][0]['ip_address']
routes = [{'destination': '135.207.0.0/16',
'nexthop': port_ip}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_too_many_routes(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '12.0.0.0/8',
'nexthop': '10.0.1.4'},
{'destination': '141.212.0.0/16',
'nexthop': '10.0.1.5'},
{'destination': '192.168.0.0/16',
'nexthop': '10.0.1.6'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_dup_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'},
{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_ip_address(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '512.207.0.0/16',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': '127.207.0.0/48',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
routes = [{'destination': 'invalid_ip_address',
'nexthop': '10.0.1.3'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_invalid_nexthop_ip(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 300.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_with_nexthop_is_outside_port_subnet(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
with self.port(subnet=s, no_delete=True) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
routes = [{'destination': '127.207.0.0/16',
'nexthop': ' 20.10.10.4'}]
self._update('routers', r['router']['id'],
{'router': {'routes':
routes}},
expected_code=exc.HTTPBadRequest.code)
# clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_update_on_external_port(self):
with self.router() as r:
with self.subnet(cidr='10.0.1.0/24') as s:
self._set_net_external(s['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = body['router']['external_gateway_info']['network_id']
self.assertEqual(net_id, s['subnet']['network_id'])
port_res = self._list_ports(
'json',
200,
s['subnet']['network_id'],
tenant_id=r['router']['tenant_id'],
device_own=constants.DEVICE_OWNER_ROUTER_GW)
port_list = self.deserialize('json', port_res)
self.assertEqual(len(port_list['ports']), 1)
routes = [{'destination': '135.207.0.0/16',
'nexthop': '10.0.1.3'}]
body = self._update('routers', r['router']['id'],
{'router': {'routes':
routes}})
body = self._show('routers', r['router']['id'])
self.assertEqual(body['router']['routes'],
routes)
self._remove_external_gateway_from_router(
r['router']['id'],
s['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
gw_info = body['router']['external_gateway_info']
self.assertIsNone(gw_info)
def test_router_list_with_sort(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_sort('router', (router3, router2, router1),
[('name', 'desc')])
def test_router_list_with_pagination(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination('router',
(router1, router2, router3),
('name', 'asc'), 2, 2)
def test_router_list_with_pagination_reverse(self):
with contextlib.nested(self.router(name='router1'),
self.router(name='router2'),
self.router(name='router3')
) as (router1, router2, router3):
self._test_list_with_pagination_reverse('router',
(router1, router2,
router3),
('name', 'asc'), 2, 2)
class ExtraRouteDBIntTestCase(test_l3.L3NatDBIntTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self, plugin=None, ext_mgr=None):
if not plugin:
plugin = ('neutron.tests.unit.test_extension_extraroute.'
'TestExtraRouteIntPlugin')
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForIntTests, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
self.setup_notification_driver()
class ExtraRouteDBIntTestCaseXML(ExtraRouteDBIntTestCase):
fmt = 'xml'
class ExtraRouteDBSepTestCase(test_l3.L3NatDBSepTestCase,
ExtraRouteDBTestCaseBase):
def setUp(self):
# the plugin without L3 support
plugin = 'neutron.tests.unit.test_l3_plugin.TestNoL3NatPlugin'
# the L3 service plugin
l3_plugin = ('neutron.tests.unit.test_extension_extraroute.'
'TestExtraRouteL3NatServicePlugin')
service_plugins = {'l3_plugin_name': l3_plugin}
# for these tests we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
cfg.CONF.set_default('max_routes', 3)
ext_mgr = ExtraRouteTestExtensionManager()
super(test_l3.L3BaseForSepTests, self).setUp(
plugin=plugin, ext_mgr=ext_mgr,
service_plugins=service_plugins)
self.setup_notification_driver()
class ExtraRouteDBSepTestCaseXML(ExtraRouteDBSepTestCase):
fmt = 'xml'
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class WorkspaceSettingsOperations:
"""WorkspaceSettingsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.WorkspaceSettingList"]:
"""Settings about where we should store your security data and logs. If the result is empty, it
means that no custom-workspace configuration was set.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WorkspaceSettingList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.models.WorkspaceSettingList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceSettingList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('WorkspaceSettingList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/workspaceSettings'} # type: ignore
async def get(
self,
workspace_setting_name: str,
**kwargs: Any
) -> "_models.WorkspaceSetting":
"""Settings about where we should store your security data and logs. If the result is empty, it
means that no custom-workspace configuration was set.
:param workspace_setting_name: Name of the security setting.
:type workspace_setting_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.WorkspaceSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'workspaceSettingName': self._serialize.url("workspace_setting_name", workspace_setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkspaceSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/workspaceSettings/{workspaceSettingName}'} # type: ignore
async def create(
self,
workspace_setting_name: str,
workspace_setting: "_models.WorkspaceSetting",
**kwargs: Any
) -> "_models.WorkspaceSetting":
"""creating settings about where we should store your security data and logs.
:param workspace_setting_name: Name of the security setting.
:type workspace_setting_name: str
:param workspace_setting: Security data setting object.
:type workspace_setting: ~azure.mgmt.security.models.WorkspaceSetting
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.WorkspaceSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'workspaceSettingName': self._serialize.url("workspace_setting_name", workspace_setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(workspace_setting, 'WorkspaceSetting')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkspaceSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/workspaceSettings/{workspaceSettingName}'} # type: ignore
async def update(
self,
workspace_setting_name: str,
workspace_setting: "_models.WorkspaceSetting",
**kwargs: Any
) -> "_models.WorkspaceSetting":
"""Settings about where we should store your security data and logs.
:param workspace_setting_name: Name of the security setting.
:type workspace_setting_name: str
:param workspace_setting: Security data setting object.
:type workspace_setting: ~azure.mgmt.security.models.WorkspaceSetting
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WorkspaceSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.WorkspaceSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WorkspaceSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'workspaceSettingName': self._serialize.url("workspace_setting_name", workspace_setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(workspace_setting, 'WorkspaceSetting')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WorkspaceSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/workspaceSettings/{workspaceSettingName}'} # type: ignore
async def delete(
self,
workspace_setting_name: str,
**kwargs: Any
) -> None:
"""Deletes the custom workspace settings for this subscription. new VMs will report to the default
workspace.
:param workspace_setting_name: Name of the security setting.
:type workspace_setting_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-08-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'workspaceSettingName': self._serialize.url("workspace_setting_name", workspace_setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Security/workspaceSettings/{workspaceSettingName}'} # type: ignore
|
|
# Copyright (c) 2012-2022, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
from . import AWSObject, AWSProperty, PropsDictType
from .validators import boolean, integer
from .validators.imagebuilder import (
component_platforms,
ebsinstanceblockdevicespecification_volume_type,
imagepipeline_status,
schedule_pipelineexecutionstartcondition,
)
class Component(AWSObject):
"""
`Component <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-component.html>`__
"""
resource_type = "AWS::ImageBuilder::Component"
props: PropsDictType = {
"ChangeDescription": (str, False),
"Data": (str, False),
"Description": (str, False),
"KmsKeyId": (str, False),
"Name": (str, True),
"Platform": (component_platforms, True),
"SupportedOsVersions": ([str], False),
"Tags": (dict, False),
"Uri": (str, False),
"Version": (str, True),
}
class ComponentParameter(AWSProperty):
"""
`ComponentParameter <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-componentparameter.html>`__
"""
props: PropsDictType = {
"Name": (str, True),
"Value": ([str], True),
}
class ComponentConfiguration(AWSProperty):
"""
`ComponentConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-componentconfiguration.html>`__
"""
props: PropsDictType = {
"ComponentArn": (str, False),
"Parameters": ([ComponentParameter], False),
}
class EbsInstanceBlockDeviceSpecification(AWSProperty):
"""
`EbsInstanceBlockDeviceSpecification <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-ebsinstanceblockdevicespecification.html>`__
"""
props: PropsDictType = {
"DeleteOnTermination": (boolean, False),
"Encrypted": (boolean, False),
"Iops": (integer, False),
"KmsKeyId": (str, False),
"SnapshotId": (str, False),
"Throughput": (integer, False),
"VolumeSize": (integer, False),
"VolumeType": (ebsinstanceblockdevicespecification_volume_type, False),
}
class InstanceBlockDeviceMapping(AWSProperty):
"""
`InstanceBlockDeviceMapping <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-instanceblockdevicemapping.html>`__
"""
props: PropsDictType = {
"DeviceName": (str, False),
"Ebs": (EbsInstanceBlockDeviceSpecification, False),
"NoDevice": (str, False),
"VirtualName": (str, False),
}
class InstanceConfiguration(AWSProperty):
"""
`InstanceConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-instanceconfiguration.html>`__
"""
props: PropsDictType = {
"BlockDeviceMappings": ([InstanceBlockDeviceMapping], False),
"Image": (str, False),
}
class TargetContainerRepository(AWSProperty):
"""
`TargetContainerRepository <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-containerrecipe-targetcontainerrepository.html>`__
"""
props: PropsDictType = {
"RepositoryName": (str, False),
"Service": (str, False),
}
class ContainerRecipe(AWSObject):
"""
`ContainerRecipe <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-containerrecipe.html>`__
"""
resource_type = "AWS::ImageBuilder::ContainerRecipe"
props: PropsDictType = {
"Components": ([ComponentConfiguration], True),
"ContainerType": (str, True),
"Description": (str, False),
"DockerfileTemplateData": (str, False),
"DockerfileTemplateUri": (str, False),
"ImageOsVersionOverride": (str, False),
"InstanceConfiguration": (InstanceConfiguration, False),
"KmsKeyId": (str, False),
"Name": (str, True),
"ParentImage": (str, True),
"PlatformOverride": (str, False),
"Tags": (dict, False),
"TargetRepository": (TargetContainerRepository, True),
"Version": (str, True),
"WorkingDirectory": (str, False),
}
class LaunchTemplateConfiguration(AWSProperty):
"""
`LaunchTemplateConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-launchtemplateconfiguration.html>`__
"""
props: PropsDictType = {
"AccountId": (str, False),
"LaunchTemplateId": (str, False),
"SetDefaultVersion": (boolean, False),
}
class Distribution(AWSProperty):
"""
`Distribution <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-distributionconfiguration-distribution.html>`__
"""
props: PropsDictType = {
"AmiDistributionConfiguration": (dict, False),
"ContainerDistributionConfiguration": (dict, False),
"LaunchTemplateConfigurations": ([LaunchTemplateConfiguration], False),
"LicenseConfigurationArns": ([str], False),
"Region": (str, True),
}
class DistributionConfiguration(AWSObject):
"""
`DistributionConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-distributionconfiguration.html>`__
"""
resource_type = "AWS::ImageBuilder::DistributionConfiguration"
props: PropsDictType = {
"Description": (str, False),
"Distributions": ([Distribution], True),
"Name": (str, True),
"Tags": (dict, False),
}
class ImageTestsConfiguration(AWSProperty):
"""
`ImageTestsConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-imagetestsconfiguration.html>`__
"""
props: PropsDictType = {
"ImageTestsEnabled": (boolean, False),
"TimeoutMinutes": (integer, False),
}
class Image(AWSObject):
"""
`Image <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-image.html>`__
"""
resource_type = "AWS::ImageBuilder::Image"
props: PropsDictType = {
"ContainerRecipeArn": (str, False),
"DistributionConfigurationArn": (str, False),
"EnhancedImageMetadataEnabled": (boolean, False),
"ImageRecipeArn": (str, False),
"ImageTestsConfiguration": (ImageTestsConfiguration, False),
"InfrastructureConfigurationArn": (str, True),
"Tags": (dict, False),
}
class Schedule(AWSProperty):
"""
`Schedule <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagepipeline-schedule.html>`__
"""
props: PropsDictType = {
"PipelineExecutionStartCondition": (
schedule_pipelineexecutionstartcondition,
False,
),
"ScheduleExpression": (str, False),
}
class ImagePipeline(AWSObject):
"""
`ImagePipeline <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagepipeline.html>`__
"""
resource_type = "AWS::ImageBuilder::ImagePipeline"
props: PropsDictType = {
"ContainerRecipeArn": (str, False),
"Description": (str, False),
"DistributionConfigurationArn": (str, False),
"EnhancedImageMetadataEnabled": (boolean, False),
"ImageRecipeArn": (str, False),
"ImageTestsConfiguration": (ImageTestsConfiguration, False),
"InfrastructureConfigurationArn": (str, True),
"Name": (str, True),
"Schedule": (Schedule, False),
"Status": (imagepipeline_status, False),
"Tags": (dict, False),
}
class SystemsManagerAgent(AWSProperty):
"""
`SystemsManagerAgent <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-systemsmanageragent.html>`__
"""
props: PropsDictType = {
"UninstallAfterBuild": (boolean, False),
}
class AdditionalInstanceConfiguration(AWSProperty):
"""
`AdditionalInstanceConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-imagerecipe-additionalinstanceconfiguration.html>`__
"""
props: PropsDictType = {
"SystemsManagerAgent": (SystemsManagerAgent, False),
"UserDataOverride": (str, False),
}
class ImageRecipe(AWSObject):
"""
`ImageRecipe <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-imagerecipe.html>`__
"""
resource_type = "AWS::ImageBuilder::ImageRecipe"
props: PropsDictType = {
"AdditionalInstanceConfiguration": (AdditionalInstanceConfiguration, False),
"BlockDeviceMappings": ([InstanceBlockDeviceMapping], False),
"Components": ([ComponentConfiguration], True),
"Description": (str, False),
"Name": (str, True),
"ParentImage": (str, True),
"Tags": (dict, False),
"Version": (str, True),
"WorkingDirectory": (str, False),
}
class InstanceMetadataOptions(AWSProperty):
"""
`InstanceMetadataOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-instancemetadataoptions.html>`__
"""
props: PropsDictType = {
"HttpPutResponseHopLimit": (integer, False),
"HttpTokens": (str, False),
}
class S3Logs(AWSProperty):
"""
`S3Logs <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-s3logs.html>`__
"""
props: PropsDictType = {
"S3BucketName": (str, False),
"S3KeyPrefix": (str, False),
}
class Logging(AWSProperty):
"""
`Logging <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-imagebuilder-infrastructureconfiguration-logging.html>`__
"""
props: PropsDictType = {
"S3Logs": (S3Logs, False),
}
class InfrastructureConfiguration(AWSObject):
"""
`InfrastructureConfiguration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-imagebuilder-infrastructureconfiguration.html>`__
"""
resource_type = "AWS::ImageBuilder::InfrastructureConfiguration"
props: PropsDictType = {
"Description": (str, False),
"InstanceMetadataOptions": (InstanceMetadataOptions, False),
"InstanceProfileName": (str, True),
"InstanceTypes": ([str], False),
"KeyPair": (str, False),
"Logging": (Logging, False),
"Name": (str, True),
"ResourceTags": (dict, False),
"SecurityGroupIds": ([str], False),
"SnsTopicArn": (str, False),
"SubnetId": (str, False),
"Tags": (dict, False),
"TerminateInstanceOnFailure": (boolean, False),
}
|
|
print("importing modules...")
from datetime import datetime, date
import time
print("> Flask")
from flask import Flask,session, flash, url_for, redirect, render_template, abort , g, make_response, stream_with_context, request, Response
print("> Flask-sqlalchemy")
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import asc, desc
print("> Flask-login")
from flask_login import LoginManager, login_user , logout_user , current_user , login_required
print("> Flask-uploads")
from flask_uploads import UploadSet, IMAGES, configure_uploads
from werkzeug.security import generate_password_hash, check_password_hash
import ast, re, uuid
from gevent import monkey
#import eventlet
#import eventlet.wsgi
from natural import date as ndate
from natural import number, size
from flask_socketio import SocketIO
print("initializing...")
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ms.db'
app.config['SECRET_KEY'] = 'thesecret'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config['UPLOADED_PHOTOS_DEST'] = 'static/uploaded'
db = SQLAlchemy(app)
login_manager = LoginManager()
login_manager.init_app(app)
sio = SocketIO(app)
photos = UploadSet('photos', IMAGES)
configure_uploads(app, photos)
print("forming databi...")
# FLASK-SQLALCHEMY MANY-TO-MANY RELATIONSHIP TABLE (USERS <-> OPPS) #
relationship_table = db.Table('relationship_table',
db.Column('user_id', db.Integer,db.ForeignKey('users.id'), nullable=False),
db.Column('opps_id',db.Integer,db.ForeignKey('opps.id'),nullable=False),
db.PrimaryKeyConstraint('user_id', 'opps_id') )
preferred_relationship_table = db.Table('preferred_relationship_table',
db.Column('user_id', db.Integer,db.ForeignKey('users.id'), nullable=False),
db.Column('opps_id',db.Integer,db.ForeignKey('opps.id'),nullable=False),
db.PrimaryKeyConstraint('user_id', 'opps_id') )
photos_likes_table = db.Table('photos_likes_table',
db.Column('user_id', db.Integer,db.ForeignKey('users.id'), nullable=False),
db.Column('photo_id',db.Integer,db.ForeignKey('photos.id'),nullable=False),
db.PrimaryKeyConstraint('user_id', 'photo_id') )
users_tags_table = db.Table('users_tags_table',
db.Column('user_id', db.Integer,db.ForeignKey('users.id'), nullable=False),
db.Column('tag_id',db.Integer,db.ForeignKey('tags.id'),nullable=False),
db.PrimaryKeyConstraint('user_id', 'tag_id') )
# USERS CLASS #
class Users(db.Model):
id = db.Column(db.Integer , primary_key=True) # user's internal id for app
fname = db.Column('fname', db.String(20)) # first name
lname = db.Column('lname', db.String(20)) # last name
password = db.Column('password' , db.String(250)) # definitely not the password
editor = db.Column('editor', db.Boolean) # whether or not user can edit events
cansignup = db.Column('cansignup', db.Boolean) # whether or not user can sign up for events
email = db.Column('email',db.String(50),unique=True , index=True) # user's grove city college email address
gccid = db.Column('gccid', db.Integer, unique=True) # user's grove city college student id
settings = db.Column('settings', db.String) # settings, stored as JSON/python dict encoded as string
year = db.Column('year' , db.Integer) # user graduation year
phone = db.Column('phone', db.Integer)
tags = db.relationship('Tags', secondary = users_tags_table, backref = 'users')
feedback = db.relationship('Feedback', backref = 'user')
photos = db.relationship('Photos', backref = 'user')
opps = db.relationship('Opps' , secondary = relationship_table, backref='users') # connects user to his or her opps
preferredOpps = db.relationship('Opps', secondary = preferred_relationship_table, backref='usersPreferred')
likes = db.relationship('Photos', secondary = photos_likes_table, backref='likers')
# user methods
def __init__(self ,password , email, admin):
# does some initializing when user first registers
self.set_password(password)
self.email = email
self.registered_on = datetime.utcnow()
self.editor = admin
self.cansignup = True
self.fname = ''
self.lname = ''
self.phone = 0
self.settings = "{'bcc':0,'pastevents':{}}"
def formatPhone(self):
phone = str(self.phone)
if not phone == '':
return '('+phone[0:3]+') '+phone[3:6]+'-'+phone[6:10]
else:
return ''
def formatYear(self):
return "'"+str(self.year)[2:4]
def set_password(self , password):
self.password = generate_password_hash(password)
def check_password(self , password):
return check_password_hash(self.password , password)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id).encode("utf-8").decode("utf-8")
def __repr__(self):
return '<User %r>' % (self.fname)
def is_editor_old(self):
# returns true if user can edit events
return self.editor
def is_editor(self):
# returns true if user can edit events
for tag in self.tags:
if tag.sid == "SCO" or tag.sid == "WAM":
return True
return False
def is_webadmin(self):
# returns true if user can edit events
for tag in self.tags:
if tag.sid == "WAM":
return True
return False
def get_setting(self, setting):
# returns a specified user setting
return ast.literal_eval(self.settings)[setting]
def chg_setting(self, name, val):
sets = ast.literal_eval(self.settings)
sets[name] = val
self.settings = str(sets)
db.session.commit()
# OPPS CLASS #
class Opps(db.Model):
id = db.Column(db.Integer, primary_key=True) # opp internal id for app
name = db.Column(db.String) # opp event name
date = db.Column(db.DateTime) # when opp starts (date and time)
enddate = db.Column(db.DateTime) # when opp ends (date and time)
techsneeded = db.Column(db.Integer) # the opposite of the number of techs that aren't needed
desc = db.Column(db.String) # event location
info = db.Column(db.String) # event extra information
uuid = db.Column(db.String)
deleted = db.Column(db.Boolean)
locked = db.Column('locked', db.Boolean) # recurring events, like chapels
feedback = db.relationship('Feedback', backref = 'event')
def __init__(self, name, desc):
# more boring initialization stuff
self.name = name
self.date = datetime.utcnow()
self.enddate = datetime.utcnow()
self.desc = desc
self.info = ""
self.techsneeded = 0
self.uuid = uuid.uuid4().hex
self.deleted = False
self.locked = 0
def __repr__(self):
return '<Event \'{}\'>'.format(self.name)
def get_timeline(self):
# returns a number based on how now is related to when the event is
now = datetime.now()
if now < self.date:
return 0 # if event is upcoming
elif now > self.date and now < self.enddate:
return 1 # if event is in progress
elif now > self.enddate:
return 2 # if event is over
def get_timesecs(self):
delta = self.date - datetime.now()
return delta.seconds + delta.days*24*3600
def get_natural(self, dort):
if dort == 'd':
#return ndate.duration(self.date)
return ndate.duration(self.date)
elif dort == 't':
#return ndate.delta(self.enddate, self.date)[0]
return ndate.delta(self.date, self.enddate)[0]
def is_today(self):
if self.date.date() == date.today():
return 1
def get_shorttime(self, beginningorend):
if beginningorend == 0:
d = self.date
else:
d = self.enddate
if d.strftime('%M') == '00':
timepre = d.strftime('%I')
else:
timepre = d.strftime('%I:%M')
if d.strftime('%p').upper() == 'AM':
timesuf = 'A'
else:
timesuf = 'P'
return timepre + timesuf
class Feedback(db.Model):
def __init__(self, data):
self.data = data
def __repr__(self):
return '<Feedback for {} by {}>'.format(self.event, self.user)
id = db.Column(db.Integer, primary_key = True)
data = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
opp_id = db.Column(db.Integer, db.ForeignKey('opps.id'))
photos = db.relationship('Photos', backref = 'feedback')
class Photos(db.Model):
def __init__(self, path):
self.path = path
id = db.Column(db.Integer, primary_key = True)
path = db.Column(db.String)
title = db.Column(db.String)
comment = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
feedback_id = db.Column(db.Integer, db.ForeignKey('feedback.id'))
# TAGS CLASS
class Tags(db.Model):
id = db.Column(db.Integer, primary_key=True)
sid = db.Column(db.String(3))
title = db.Column(db.String)
fontawesomeicon = db.Column(db.String)
class Staff(db.Model):
def __init__(self, id, fname, lname, email):
self.id = id
self.fname = fname
self.lname = lname
self.email = email
id = db.Column(db.Integer, primary_key=True)
fname = db.Column('fname', db.String)
lname = db.Column('lname', db.String)
email = db.Column('email', db.String)
title = db.Column('title', db.String)
fontawesomeicon = db.Column('fontawesomeicon', db.String)
class Meta(db.Model):
def __init__(self, text):
self.text = text
id = db.Column(db.Integer, primary_key=True)
welcome_text = db.Column(db.String)
|
|
## ###
# IP: GHIDRA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# The available pcode tests are recorded here as instances of the 'name'
# python class.
PCodeTest({
'name': 'ARM',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm',
'toolchain': 'ARM/arm-eabi',
'language_id': 'ARM:LE:32:v7',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARM_BE',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-armbe',
'toolchain': 'ARM/armbe-eabi',
'language_id': 'ARM:BE:32:v7',
'ccflags': '-mbig-endian -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'proc_test': 'arm',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'ARM2',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm2 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARMv5',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm',
'toolchain': 'ARM/arm-eabi',
'language_id': 'ARM:LE:32:v5',
'ccflags': '-march=armv5 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
})
PCodeTest({
'name': 'ARM7',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm7 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARM8',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm8 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARM9',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm9 -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARM10e',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mcpu=arm10e -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARM_thumb',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm -cpu cortex-a8',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mthumb -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s/thumb -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'ARM_BE_thumb',
'build_all': 1,
'toolchain': 'ARM/armbe-eabi',
'ccflags': '-mthumb -mbig-endian -L %(toolchain_dir)s/lib/gcc/armbe-eabi/%(gcc_version)s/thumb -lgcc',
'language_id': 'ARM:BE:32:v7',
'proc_test': 'arm',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'ARM_cortex',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-arm -cpu cortex-a8',
'toolchain': 'ARM/arm-eabi',
'ccflags': '-mthumb -mcpu=cortex-a8 -mfloat-abi=softfp -L %(toolchain_dir)s/lib/gcc/arm-eabi/%(gcc_version)s/thumb -lgcc',
'language_id': 'ARM:LE:32:v7',
'proc_test': 'arm',
})
PCodeTest({
'name': 'AARCH64',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-aarch64',
'toolchain': 'ARM/aarch64-elf',
'language_id': 'AARCH64:LE:64:v8A',
})
PCodeTest({
'name': 'AARCH64_ILP32',
'toolchain': 'ARM/aarch64-elf',
'ccflags': '-mabi=ilp32',
'language_id': 'AARCH64:LE:64:v8A',
})
PCodeTest({
'name': 'AARCH64_BE',
'build_all': 1,
'toolchain': 'ARM/aarch64_be-elf',
'language_id': 'AARCH64:BE:64:v8A',
})
PCodeTest({
'name': 'AARCH64_BE_ILP32',
'toolchain': 'ARM/aarch64_be-elf',
'ccflags': '-mabi=ilp32',
'language_id': 'AARCH64:BE:64:v8A',
})
PCodeTest({
'name': 'AVR',
'build_all': 1,
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr6 -lgcc',
'language_id': 'avr32:BE:32:default',
'processor': 'Atmel',
'has_float': 0,
'has_double': 0,
})
PCodeTest({
'name': 'AVR8_31',
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr31 -lgcc',
'language_id': 'avr8:LE:16:default',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
})
PCodeTest({
'name': 'AVR8_51',
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr51 -lgcc',
'language_id': 'avr8:LE:16:extended',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
})
PCodeTest({
'name': 'AVR8_6',
'toolchain': 'AVR/avr-elf',
'ccflags': '-mmcu=avr6 -lgcc',
'language_id': 'avr8:LE:16:atmega256',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
})
PCodeTest({
'name': 'HCS12',
'toolchain': 'HCS12/m6812',
'language_id': 'HCS12:BE:16:default',
})
PCodeTest({
'name': 'HPPA1.1',
'build_all': 1,
'toolchain': 'HPPA/hppa-linux',
'ccflags': '-march=1.1 -static -mlong-calls -L %(toolchain_dir)s/lib/gcc/hppa-linux/%(gcc_version)s -lgcc',
'language_id': 'pa-risc:BE:32:default',
'processor': 'PA-RISC',
'architecture_test': 'PARISC',
})
# Note that libgcc.a was built for m68020 which has a different function calling convention from pre-68020
PCodeTest({
'name': 'm68000',
'build_all': 1,
'build_exe': 0,
'qemu_command': 'qemu-m68k', # qemu: fatal: Illegal instruction
'toolchain': 'm68k/m68k-elf',
'ccflags': '-mcpu=68020 -m68020 -L %(toolchain_dir)s/lib/gcc/m68k-elf/%(gcc_version)s -lgcc',
'language_id': '68000:BE:32:default',
})
PCodeTest({
'name': 'MIPS',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-mips',
'toolchain': 'MIPS/mips-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s -lgcc -mno-gpopt',
'language_id': 'MIPS:BE:32:default',
})
PCodeTest({
'name': 'MIPSEL',
'build_all': 1,
'build_exe': 1,
'toolchain': 'MIPS/mips-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s/el -lgcc -mno-gpopt -mel',
'language_id': 'MIPS:LE:32:default',
})
PCodeTest({
'name': 'MIPS16',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-mips',
'toolchain': 'MIPS/mips-elf',
'ccflags': '-mno-gpopt',
'language_id': 'MIPS:BE:32:default',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'MIPS16MIX',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-mips',
'toolchain': 'MIPS/mips-elf',
'ccflags': '-mno-gpopt',
'language_id': 'MIPS:BE:32:default',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'MIPSMIC',
'build_all': 1,
'toolchain': 'MIPS/mips-elf',
'ccflags': '-mmicromips -L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s/micromips -lgcc',
'language_id': 'MIPS:BE:32:micro',
'architecture_test': 'MIPSMICRO',
})
PCodeTest({
'name': 'MIPSMICMIX',
'build_all': 1,
'toolchain': 'MIPS/mips-elf',
'ccflags': '-minterlink-compressed -D BODYNEW=micromips -L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s/micromips -lgcc',
'language_id': 'MIPS:BE:32:micro',
'architecture_test': 'MIPSMICROMIX',
})
PCodeTest({
'name': 'MIPSMIC64',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r5 -mmicromips -minterlink-compressed',
'language_id': 'MIPS:BE:64:micro',
})
PCodeTest({
'name': 'MIPS64_32addr',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r2',
'language_id': 'MIPS:BE:64:64-32addr',
})
PCodeTest({
'name': 'MIPS64_64addr',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r2 -mabi=64',
'language_id': 'MIPS:BE:64:64-64addr',
})
PCodeTest({
'name': 'MIPS64_64addrLE',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r2 -mabi=64 -EL',
'language_id': 'MIPS:LE:64:64-64addr',
})
PCodeTest({
'name': 'MIPSR6',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips32r6 -L %(toolchain_dir)s/lib/gcc/mips-mti-elf/%(gcc_version)s -lgcc',
'language_id': 'MIPS:BE:32:R6',
})
PCodeTest({
'name': 'MIPS64R6',
'build_all': 1,
'toolchain': 'MIPS/mipsr6-elf',
'ccflags': '-mips64r6 -mabi=64',
'language_id': 'MIPS:BE:64:R6',
})
PCodeTest({
'name': 'NDS32BE',
'build_all': 1,
'toolchain': 'NDS32/nds32be-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/nds32be-linux-elf/%(gcc_version)s -lgcc',
'language_id': 'NDS32:BE:32:default',
})
PCodeTest({
'name': 'NDS32LE',
'build_all': 1,
'toolchain': 'NDS32/nds32le-elf',
'ccflags': '-L %(toolchain_dir)s/lib/gcc/nds32le-linux-elf/%(gcc_version)s -lgcc',
'language_id': 'NDS32:LE:32:default',
})
PCodeTest({
'name': 'power6',
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=G5 -m32 -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:default',
})
PCodeTest({
'name': 'powerpc32',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-ppc64abi32',
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=powerpc -m32 -maltivec -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:default',
'architecture_test': 'PPC',
})
PCodeTest({
'name': 'powerpc64',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-ppc64',
'toolchain': 'PPC/powerpc64-linux',
'ccflags': '-mabi=elfv1 -maltivec -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:64:default',
'architecture_test': 'PPC64',
})
PCodeTest({
'name': 'powerpc64v2',
'toolchain': 'PPC/powerpc64-linux',
'ccflags': '-mabi=elfv2 -maltivec -mno-relocatable -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:64:default',
})
PCodeTest({
'name': 'ppcA2',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=a2 -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2',
'architecture_test': 'PPCA2',
})
PCodeTest({
'name': 'ppcA2Alt',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=a2 -maltivec -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2ALT',
'architecture_test': 'PPCA2Alt',
})
PCodeTest({
'name': 'ppcP8Alt',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=power8 -mvsx -maltivec -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2ALT',
'architecture_test': 'PPCP8Alt',
})
PCodeTest({
'name': 'ppcP9Alt',
'build_all': 1,
'toolchain': 'PPC/powerpc-elf',
'ccflags': '-mcpu=power9 -mvsx -maltivec -L %(toolchain_dir)s/lib/gcc/powerpc-elf/%(gcc_version)s -lgcc',
'language_id': 'PowerPC:BE:32:A2ALT',
'architecture_test': 'PPCP9Alt',
})
PCodeTest({
'name': 'msp430x',
'build_all': 1,
'toolchain': 'TI/msp430-elf',
'ccflags': '-g -mmcu=msp430x -mlarge -mhwmult=none -fno-builtin -Wl,-T,msp430x.ld -L %(toolchain_dir)s/lib/gcc/msp430-elf/%(gcc_version)s/large/ -lgcc -lmul_none',
'language_id': 'TI_MSP430X:LE:32:default',
'processor': 'TI',
'architecture_test': 'MSP430X',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
'skip_files': ['PointerManipulation.test', 'misc.test'],
})
PCodeTest({
'name': 'SH4',
'build_all': 1,
'build_exe': 0,
'qemu_command': 'qemu-sh4eb', # qemu gets "Invalid argument" error
'toolchain': 'SuperH4/sh4-elf',
'ccflags': '-mb -mrenesas -m4 -L %(toolchain_dir)s/lib/gcc/sh4-elf/%(gcc_version)s -lgcc',
'language_id': 'SuperH4:BE:32:default',
'architecture_test': 'SuperH4_BE',
})
PCodeTest({
'name': 'SH4_LE',
'build_all': 1,
'toolchain': 'SuperH4/sh4le-elf',
'ccflags': '-ml -mrenesas -m4 -L %(toolchain_dir)s/lib/gcc/sh4le-elf/%(gcc_version)s -lgcc',
'language_id': 'SuperH4:LE:32:default',
'architecture_test': 'SuperH4',
})
PCodeTest({
'name': 'sparcV9_32',
'build_all': 1,
'build_exe': 1,
'can_run': 0, # instruction error causes infinite loop
'qemu_command': 'qemu-sparc32plus',
'toolchain': 'SparcV9/sparc-elf',
'ccflags': '-mcpu=v9 -m32',
'language_id': 'sparc:BE:32:default',
'processor': 'Sparc',
'architecture_test': 'SparcV9_m32',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
# to suppress usage of application registers g2 and g3, add -mno-app-regs here
PCodeTest({
'name': 'sparcV9_64',
'build_all': 1,
'toolchain': 'SparcV9/sparc64-elf',
'ccflags': '-mcpu=v9 -m64',
'language_id': 'sparc:BE:64:default',
})
PCodeTest({
'name': 'pentium',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-i386',
'toolchain': 'x86/i386-elf-linux',
'ccflags': '-march=pentium -m32 -L %(toolchain_dir)s/lib/gcc/i386-elf-linux/%(gcc_version)s -lgcc',
'objdump_option': '-M intel',
'language_id': 'x86:LE:32:default',
'architecture_test': 'X86m32',
'has_vector': 1,
})
PCodeTest({
'name': 'i386_CLANG',
'toolchain': 'LLVM/llvm',
'toolchain_type': 'llvm',
'ccflags': '--target=i386',
'objdump_option': '-M intel',
'language_id': 'x86:LE:32:default',
})
PCodeTest({
'name': 'i686_CLANG',
'toolchain': 'LLVM/llvm',
'toolchain_type': 'llvm',
'ccflags': '--target=i686',
'objdump_option': '-M intel',
'language_id': 'x86:LE:32:default',
})
PCodeTest({
'name': 'AVX2',
'build_all': 1,
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=core-avx2',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
'has_vector': 1,
})
PCodeTest({
'name': 'AVXi',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=core-avx-i',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
PCodeTest({
'name': 'bdver2',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=bdver2',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
PCodeTest({
'name': 'core2',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-march=bdver2',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
PCodeTest({
'name': 'x86_m64',
'build_all': 1,
'build_exe': 1,
'qemu_command': 'qemu-x86_64',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-static -m64',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
'architecture_test': 'X86m64',
})
PCodeTest({
'name': 'x86_fma4',
'toolchain': 'x86/x86_64-elf',
'ccflags': '-mfma',
'objdump_option': '-M intel',
'language_id': 'x86:LE:64:default',
})
# the PIC30 toolchain is distributed by mchp. So when making the
# toolchain, specify toolchain_type to be mchp. But it is based on
# gcc, and after it's installed, it behaves exactly like gcc. So, when
# making a pcode test, specify toolchain_type to be gcc.
PCodeTest({
'name': 'PIC30',
'build_all': 1,
'toolchain': 'PIC/xc16',
'compile_exe': 'bin/xc16-gcc',
'objdump_exe': 'bin/xc16-objdump',
'readelf_exe': 'bin/xc16-readelf',
'nm_exe': 'bin/xc16-nm',
'ccflags': '-mcpu=30F2011 -DINT4_IS_LONG -Xlinker --defsym -Xlinker _main=0x0 -L %(toolchain_dir)s/lib -lpic30 -lc -lm',
'language_id': 'dsPIC30F:LE:24:default',
'skip_files': ['misc.test'],
'variants': {'O0': '-O0'},
'small_build': 1,
})
PCodeTest({
'name': 'PIC16',
'toolchain': 'PIC/xc8',
'compile_exe': 'bin/xc8',
'objdump_exe': 'bin/dump',
'ccflags': '-chip=16C57 -DINT4_IS_LONG -DSTATIC_MAIN -L %(toolchain_dir)s/lib -lpic30 -lc -lm',
'language_id': 'dsPIC16F:LE:24:default',
'small_build': 1,
})
PCodeTest({
'name': 'HCS08',
'toolchain': 'SDCC/s08',
'toolchain_type': 'sdcc',
'compile_exe': 'bin/sdcc',
'ccflags': '--out-fmt-elf --std-sdcc11',
'language_id': 'HCS08:BE:16:MC9S08GB60',
'variants': {'OX': ''},
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'Z80',
'toolchain': 'SDCC/z80',
'toolchain_type': 'sdcc',
'compile_exe': 'bin/sdcc',
'ccflags': '-mz80 -V --verbose --std-sdcc11 -DINT4_IS_LONG',
'language_id': 'z80:LE:16:default',
'variants': {'OX':''},
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
'small_build': 1,
# Currently the 'omitted' option is only supported by the SDCC toolchain!
# Causes a bit of funk with tpp.py still including references to these
# tests in cunit_main.c but the compiler accepts it with a warning.
'skip_files': ['PointerManipulation.test', 'StructUnionManipulation.test'],
# These tests are omitted because the SDCC compiler doesn't properly handle
# structs in functions and requires a more strict format than ANSI C requires.
})
PCodeTest({
'name': 'CR16C',
'build_all': 1,
'toolchain': 'NS/cr16-elf',
'language_id': 'CR16C:LE:16:default',
'processor': 'CR16',
'architecture_test': 'CRC16C',
'ccflags': '-lgcc',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
PCodeTest({
'name': 'RISCV',
'build_all': 1,
'toolchain': 'RISCV/riscv32-elf',
'language_id': 'RISCV:BE:32:default',
'architecture_test': 'RISCV',
'ccflags': '-lgcc',
'has_float': 0,
'has_double': 0,
'has_longlong': 0,
})
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import special_math_ops
from tensorflow.python.util.tf_export import tf_export
# Linear algebra ops.
band_part = array_ops.matrix_band_part
cholesky = linalg_ops.cholesky
cholesky_solve = linalg_ops.cholesky_solve
det = linalg_ops.matrix_determinant
slogdet = gen_linalg_ops.log_matrix_determinant
tf_export('linalg.slogdet')(slogdet)
diag = array_ops.matrix_diag
diag_part = array_ops.matrix_diag_part
eigh = linalg_ops.self_adjoint_eig
eigvalsh = linalg_ops.self_adjoint_eigvals
einsum = special_math_ops.einsum
eye = linalg_ops.eye
inv = linalg_ops.matrix_inverse
logm = gen_linalg_ops.matrix_logarithm
lu = gen_linalg_ops.lu
tf_export('linalg.logm')(logm)
lstsq = linalg_ops.matrix_solve_ls
norm = linalg_ops.norm
qr = linalg_ops.qr
set_diag = array_ops.matrix_set_diag
solve = linalg_ops.matrix_solve
sqrtm = linalg_ops.matrix_square_root
svd = linalg_ops.svd
tensordot = math_ops.tensordot
trace = math_ops.trace
transpose = array_ops.matrix_transpose
triangular_solve = linalg_ops.matrix_triangular_solve
@tf_export('linalg.logdet')
def logdet(matrix, name=None):
"""Computes log of the determinant of a hermitian positive definite matrix.
```python
# Compute the determinant of a matrix while reducing the chance of over- or
underflow:
A = ... # shape 10 x 10
det = tf.exp(tf.logdet(A)) # scalar
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op`. Defaults to `logdet`.
Returns:
The natural log of the determinant of `matrix`.
@compatibility(numpy)
Equivalent to numpy.linalg.slogdet, although no sign is returned since only
hermitian positive definite matrices are supported.
@end_compatibility
"""
# This uses the property that the log det(A) = 2*sum(log(real(diag(C))))
# where C is the cholesky decomposition of A.
with ops.name_scope(name, 'logdet', [matrix]):
chol = gen_linalg_ops.cholesky(matrix)
return 2.0 * math_ops.reduce_sum(
math_ops.log(math_ops.real(array_ops.matrix_diag_part(chol))),
axis=[-1])
@tf_export('linalg.adjoint')
def adjoint(matrix, name=None):
"""Transposes the last two dimensions of and conjugates tensor `matrix`.
For example:
```python
x = tf.constant([[1 + 1j, 2 + 2j, 3 + 3j],
[4 + 4j, 5 + 5j, 6 + 6j]])
tf.linalg.adjoint(x) # [[1 - 1j, 4 - 4j],
# [2 - 2j, 5 - 5j],
# [3 - 3j, 6 - 6j]]
```
Args:
matrix: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
The adjoint (a.k.a. Hermitian transpose a.k.a. conjugate transpose) of
matrix.
"""
with ops.name_scope(name, 'adjoint', [matrix]):
matrix = ops.convert_to_tensor(matrix, name='matrix')
return array_ops.matrix_transpose(matrix, conjugate=True)
# This section is ported nearly verbatim from Eigen's implementation:
# https://eigen.tuxfamily.org/dox/unsupported/MatrixExponential_8h_source.html
def _matrix_exp_pade3(matrix):
"""3rd-order Pade approximant for matrix exponential."""
b = [120.0, 60.0, 12.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
tmp = matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade5(matrix):
"""5th-order Pade approximant for matrix exponential."""
b = [30240.0, 15120.0, 3360.0, 420.0, 30.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
tmp = matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade7(matrix):
"""7th-order Pade approximant for matrix exponential."""
b = [17297280.0, 8648640.0, 1995840.0, 277200.0, 25200.0, 1512.0, 56.0]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp = matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 + b[0] * ident
return matrix_u, matrix_v
def _matrix_exp_pade9(matrix):
"""9th-order Pade approximant for matrix exponential."""
b = [
17643225600.0, 8821612800.0, 2075673600.0, 302702400.0, 30270240.0,
2162160.0, 110880.0, 3960.0, 90.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
matrix_8 = math_ops.matmul(matrix_6, matrix_2)
tmp = (
matrix_8 + b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 +
b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp)
matrix_v = (
b[8] * matrix_8 + b[6] * matrix_6 + b[4] * matrix_4 + b[2] * matrix_2 +
b[0] * ident)
return matrix_u, matrix_v
def _matrix_exp_pade13(matrix):
"""13th-order Pade approximant for matrix exponential."""
b = [
64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
1187353796428800.0, 129060195264000.0, 10559470521600.0, 670442572800.0,
33522128640.0, 1323241920.0, 40840800.0, 960960.0, 16380.0, 182.0
]
b = [constant_op.constant(x, matrix.dtype) for x in b]
ident = linalg_ops.eye(array_ops.shape(matrix)[-2],
batch_shape=array_ops.shape(matrix)[:-2],
dtype=matrix.dtype)
matrix_2 = math_ops.matmul(matrix, matrix)
matrix_4 = math_ops.matmul(matrix_2, matrix_2)
matrix_6 = math_ops.matmul(matrix_4, matrix_2)
tmp_u = (
math_ops.matmul(matrix_6,
matrix_6 + b[11] * matrix_4 + b[9] * matrix_2) +
b[7] * matrix_6 + b[5] * matrix_4 + b[3] * matrix_2 + b[1] * ident)
matrix_u = math_ops.matmul(matrix, tmp_u)
tmp_v = b[12] * matrix_6 + b[10] * matrix_4 + b[8] * matrix_2
matrix_v = (
math_ops.matmul(matrix_6, tmp_v) + b[6] * matrix_6 + b[4] * matrix_4 +
b[2] * matrix_2 + b[0] * ident)
return matrix_u, matrix_v
@tf_export('linalg.expm')
def matrix_exponential(input, name=None): # pylint: disable=redefined-builtin
r"""Computes the matrix exponential of one or more square matrices.
exp(A) = \sum_{n=0}^\infty A^n/n!
The exponential is computed using a combination of the scaling and squaring
method and the Pade approximation. Details can be found in:
Nicholas J. Higham, "The scaling and squaring method for the matrix
exponential revisited," SIAM J. Matrix Anal. Applic., 26:1179-1193, 2005.
The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
form square matrices. The output is a tensor of the same shape as the input
containing the exponential for all input submatrices `[..., :, :]`.
Args:
input: A `Tensor`. Must be `float16`, `float32`, `float64`, `complex64`,
or `complex128` with shape `[..., M, M]`.
name: A name to give this `Op` (optional).
Returns:
the matrix exponential of the input.
Raises:
ValueError: An unsupported type is provided as input.
@compatibility(scipy)
Equivalent to scipy.linalg.expm
@end_compatibility
"""
with ops.name_scope(name, 'matrix_exponential', [input]):
matrix = ops.convert_to_tensor(input, name='input')
if matrix.shape[-2:] == [0, 0]:
return matrix
batch_shape = matrix.shape[:-2]
if not batch_shape.is_fully_defined():
batch_shape = array_ops.shape(matrix)[:-2]
# reshaping the batch makes the where statements work better
matrix = array_ops.reshape(
matrix, array_ops.concat(([-1], array_ops.shape(matrix)[-2:]), axis=0))
l1_norm = math_ops.reduce_max(
math_ops.reduce_sum(math_ops.abs(matrix),
axis=array_ops.size(array_ops.shape(matrix)) - 2),
axis=-1)
const = lambda x: constant_op.constant(x, l1_norm.dtype)
def _nest_where(vals, cases):
assert len(vals) == len(cases) - 1
if len(vals) == 1:
return array_ops.where(
math_ops.less(l1_norm, const(vals[0])), cases[0], cases[1])
else:
return array_ops.where(
math_ops.less(l1_norm, const(vals[0])), cases[0],
_nest_where(vals[1:], cases[1:]))
if matrix.dtype in [dtypes.float16, dtypes.float32, dtypes.complex64]:
maxnorm = const(3.925724783138660)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(
matrix / math_ops.pow(
constant_op.constant(2.0, dtype=matrix.dtype),
math_ops.cast(squarings, matrix.dtype))[...,
array_ops.newaxis,
array_ops.newaxis])
conds = (4.258730016922831e-001, 1.880152677804762e+000)
u = _nest_where(conds, (u3, u5, u7))
v = _nest_where(conds, (v3, v5, v7))
elif matrix.dtype in [dtypes.float64, dtypes.complex128]:
maxnorm = const(5.371920351148152)
squarings = math_ops.maximum(
math_ops.floor(
math_ops.log(l1_norm / maxnorm) / math_ops.log(const(2.0))), 0)
u3, v3 = _matrix_exp_pade3(matrix)
u5, v5 = _matrix_exp_pade5(matrix)
u7, v7 = _matrix_exp_pade7(matrix)
u9, v9 = _matrix_exp_pade9(matrix)
u13, v13 = _matrix_exp_pade13(
matrix / math_ops.pow(
constant_op.constant(2.0, dtype=matrix.dtype),
math_ops.cast(squarings, matrix.dtype))[...,
array_ops.newaxis,
array_ops.newaxis])
conds = (1.495585217958292e-002,
2.539398330063230e-001,
9.504178996162932e-001,
2.097847961257068e+000)
u = _nest_where(conds, (u3, u5, u7, u9, u13))
v = _nest_where(conds, (v3, v5, v7, v9, v13))
else:
raise ValueError(
'tf.linalg.expm does not support matrices of type %s' % matrix.dtype)
numer = u + v
denom = -u + v
result = linalg_ops.matrix_solve(denom, numer)
max_squarings = math_ops.reduce_max(squarings)
i = const(0.0)
c = lambda i, r: math_ops.less(i, max_squarings)
def b(i, r):
return i+1, array_ops.where(math_ops.less(i, squarings),
math_ops.matmul(r, r), r)
_, result = control_flow_ops.while_loop(c, b, [i, result])
if not matrix.shape.is_fully_defined():
return array_ops.reshape(
result,
array_ops.concat((batch_shape, array_ops.shape(result)[-2:]), axis=0))
return array_ops.reshape(result, batch_shape.concatenate(result.shape[-2:]))
@tf_export('linalg.tridiagonal_solve')
def tridiagonal_solve(diagonals,
rhs,
diagonals_format='compact',
transpose_rhs=False,
conjugate_rhs=False,
name=None):
r"""Solves tridiagonal systems of equations.
Solution is computed via Gaussian elemination with partial pivoting.
The input can be supplied in various formats: `matrix`, `tuple` and `compact`,
specified by the `diagonals_format` arg.
In `matrix` format, `diagonals` must be a tensor of shape `[..., M, M]`, with
two inner-most dimensions representing the square tridiagonal matrices.
Elements outside of the three diagonals will be ignored.
In `sequence` format, `diagonals` are supplied as a tuple or list of three
tensors of shapes `[..., N]`, `[..., M]`, `[..., N]` representing
superdiagonals, diagonals, and subdiagonals, respectively. `N` can be either
`M-1` or `M`; in the latter case, the last element of superdiagonal and the
first element of subdiagonal will be ignored.
In `compact` format the three diagonals are brought together into one tensor
of shape `[..., 3, M]`, with last two dimensions containing superdiagonals,
diagonals, and subdiagonals, in order. Similarly to `sequence` format,
elements `diagonals[..., 0, M-1]` and `diagonals[..., 2, 0]` are ignored.
The `compact` format is recommended as the one with best performance. In case
you need to cast a tensor into a compact format manually, use `tf.gather_nd`.
An example for a tensor of shape [m, m]:
```python
rhs = tf.constant([...])
matrix = tf.constant([[...]])
m = matrix.shape[0]
dummy_idx = [0, 0] # An arbitrary element to use as a dummy
indices = [[[i, i + 1] for i in range(m - 1)] + [dummy_idx], # Superdiagonal
[[i, i] for i in range(m)], # Diagonal
[dummy_idx] + [[i + 1, i] for i in range(m - 1)]] # Subdiagonal
diagonals=tf.gather_nd(matrix, indices)
x = tf.linalg.tridiagonal_solve(diagonals, rhs)
```
Regardless of the `diagonals_format`, `rhs` is a tensor of shape `[..., M]` or
`[..., M, K]`. The latter allows to simultaneously solve K systems with the
same left-hand sides and K different right-hand sides. If `transpose_rhs`
is set to `True` the expected shape is `[..., M]` or `[..., K, M]`.
The batch dimensions, denoted as `...`, must be the same in `diagonals` and
`rhs`.
The output is a tensor of the same shape as `rhs`: either `[..., M]` or
`[..., M, K]`.
The op isn't guaranteed to raise an error if the input matrix is not
invertible. `tf.debugging.check_numerics` can be applied to the output to
detect invertibility problems.
Args:
diagonals: A `Tensor` or tuple of `Tensor`s describing left-hand sides. The
shape depends of `diagonals_format`, see description above. Must be
`float32`, `float64`, `complex64`, or `complex128`.
rhs: A `Tensor` of shape [..., M] or [..., M, K] and with the same dtype as
`diagonals`.
diagonals_format: one of `matrix`, `sequence`, or `compact`. Default is
`compact`.
transpose_rhs: If `True`, `rhs` is transposed before solving (has no effect
if the shape of rhs is [..., M]).
conjugate_rhs: If `True`, `rhs` is conjugated before solving.
name: A name to give this `Op` (optional).
Returns:
A `Tensor` of shape [..., M] or [..., M, K] containing the solutions.
Raises:
ValueError: An unsupported type is provided as input, or when the input
tensors have incorrect shapes.
"""
if diagonals_format == 'compact':
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, name)
if diagonals_format == 'sequence':
if not isinstance(diagonals, (tuple, list)) or len(diagonals) != 3:
raise ValueError('Expected diagonals to be a sequence of length 3.')
superdiag, maindiag, subdiag = diagonals
if (not subdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1]) or
not superdiag.shape[:-1].is_compatible_with(maindiag.shape[:-1])):
raise ValueError(
'Tensors representing the three diagonals must have the same shape,'
'except for the last dimension, got {}, {}, {}'.format(
subdiag.shape, maindiag.shape, superdiag.shape))
m = tensor_shape.dimension_value(maindiag.shape[-1])
def pad_if_necessary(t, name, last_dim_padding):
n = tensor_shape.dimension_value(t.shape[-1])
if not n or n == m:
return t
if n == m - 1:
paddings = (
[[0, 0] for _ in range(len(t.shape) - 1)] + [last_dim_padding])
return array_ops.pad(t, paddings)
raise ValueError('Expected {} to be have length {} or {}, got {}.'.format(
name, m, m - 1, n))
subdiag = pad_if_necessary(subdiag, 'subdiagonal', [1, 0])
superdiag = pad_if_necessary(superdiag, 'superdiagonal', [0, 1])
diagonals = array_ops.stack((superdiag, maindiag, subdiag), axis=-2)
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, name)
if diagonals_format == 'matrix':
m1 = tensor_shape.dimension_value(diagonals.shape[-1])
m2 = tensor_shape.dimension_value(diagonals.shape[-2])
if m1 and m2 and m1 != m2:
raise ValueError(
'Expected last two dimensions of diagonals to be same, got {} and {}'
.format(m1, m2))
m = m1 or m2
if not m:
raise ValueError('The size of the matrix needs to be known for '
'diagonals_format="matrix"')
# Extract diagonals; use input[..., 0, 0] as "dummy" m-th elements of sub-
# and superdiagonal.
# gather_nd slices into first indices, whereas we need to slice into the
# last two, so transposing back and forth is necessary.
dummy_idx = [0, 0]
indices = ([[[1, 0], [0, 0], dummy_idx]] + [
[[i + 1, i], [i, i], [i - 1, i]] for i in range(1, m - 1)
] + [[dummy_idx, [m - 1, m - 1], [m - 2, m - 1]]])
diagonals = array_ops.transpose(
array_ops.gather_nd(array_ops.transpose(diagonals), indices))
return _tridiagonal_solve_compact_format(diagonals, rhs, transpose_rhs,
conjugate_rhs, name)
raise ValueError('Unrecognized diagonals_format: {}'.format(diagonals_format))
def _tridiagonal_solve_compact_format(diagonals,
rhs,
transpose_rhs=False,
conjugate_rhs=False,
name=None):
"""Helper function used after the input has been cast to compact form."""
diags_rank, rhs_rank = len(diagonals.shape), len(rhs.shape)
if diags_rank < 2:
raise ValueError(
'Expected diagonals to have rank at least 2, got {}'.format(diags_rank))
if rhs_rank != diags_rank and rhs_rank != diags_rank - 1:
raise ValueError('Expected the rank of rhs to be {} or {}, got {}'.format(
diags_rank - 1, diags_rank, rhs_rank))
if diagonals.shape[-2] != 3:
raise ValueError('Expected 3 diagonals got {}'.format(diagonals.shape[-2]))
if not diagonals.shape[:-2].is_compatible_with(rhs.shape[:diags_rank - 2]):
raise ValueError('Batch shapes {} and {} are incompatible'.format(
diagonals.shape[:-2], rhs.shape[:diags_rank - 2]))
def check_num_lhs_matches_num_rhs():
if diagonals.shape[-1] != rhs.shape[-2]:
raise ValueError('Expected number of left-hand sided and right-hand '
'sides to be equal, got {} and {}'.format(
diagonals.shape[-1], rhs.shape[-2]))
if rhs_rank == diags_rank - 1:
# Rhs provided as a vector, ignoring transpose_rhs
if conjugate_rhs:
rhs = math_ops.conj(rhs)
rhs = array_ops.expand_dims(rhs, -1)
check_num_lhs_matches_num_rhs()
return array_ops.squeeze(
linalg_ops.tridiagonal_solve(diagonals, rhs, name), -1)
if transpose_rhs:
rhs = array_ops.matrix_transpose(rhs, conjugate=conjugate_rhs)
elif conjugate_rhs:
rhs = math_ops.conj(rhs)
check_num_lhs_matches_num_rhs()
result = linalg_ops.tridiagonal_solve(diagonals, rhs, name)
return array_ops.matrix_transpose(result) if transpose_rhs else result
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=
"""Dataset container."""
__all__ = ['MNIST', 'FashionMNIST', 'CIFAR10', 'CIFAR100',
'ImageRecordDataset', 'ImageFolderDataset']
import os
import gzip
import tarfile
import struct
import warnings
import numpy as np
from .. import dataset
from ...utils import download, check_sha1, _get_repo_file_url
from .... import nd, image, recordio, base
class MNIST(dataset._DownloadedDataset):
"""MNIST handwritten digits dataset from http://yann.lecun.com/exdb/mnist
Each sample is an image (in 3D NDArray) with shape (28, 28, 1).
Parameters
----------
root : str, default $MXNET_HOME/datasets/mnist
Path to temp folder for storing data.
train : bool, default True
Whether to load the training or testing set.
transform : function, default None
A user defined callback that transforms each sample. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root=os.path.join(base.data_dir(), 'datasets', 'mnist'),
train=True, transform=None):
self._train = train
self._train_data = ('train-images-idx3-ubyte.gz',
'6c95f4b05d2bf285e1bfb0e7960c31bd3b3f8a7d')
self._train_label = ('train-labels-idx1-ubyte.gz',
'2a80914081dc54586dbdf242f9805a6b8d2a15fc')
self._test_data = ('t10k-images-idx3-ubyte.gz',
'c3a25af1f52dad7f726cce8cacb138654b760d48')
self._test_label = ('t10k-labels-idx1-ubyte.gz',
'763e7fa3757d93b0cdec073cef058b2004252c17')
self._namespace = 'mnist'
super(MNIST, self).__init__(root, transform)
def _get_data(self):
if self._train:
data, label = self._train_data, self._train_label
else:
data, label = self._test_data, self._test_label
namespace = 'gluon/dataset/'+self._namespace
data_file = download(_get_repo_file_url(namespace, data[0]),
path=self._root,
sha1_hash=data[1])
label_file = download(_get_repo_file_url(namespace, label[0]),
path=self._root,
sha1_hash=label[1])
with gzip.open(label_file, 'rb') as fin:
struct.unpack(">II", fin.read(8))
label = np.frombuffer(fin.read(), dtype=np.uint8).astype(np.int32)
with gzip.open(data_file, 'rb') as fin:
struct.unpack(">IIII", fin.read(16))
data = np.frombuffer(fin.read(), dtype=np.uint8)
data = data.reshape(len(label), 28, 28, 1)
self._data = nd.array(data, dtype=data.dtype)
self._label = label
class FashionMNIST(MNIST):
"""A dataset of Zalando's article images consisting of fashion products,
a drop-in replacement of the original MNIST dataset from
https://github.com/zalandoresearch/fashion-mnist
Each sample is an image (in 3D NDArray) with shape (28, 28, 1).
Parameters
----------
root : str, default $MXNET_HOME/datasets/fashion-mnist'
Path to temp folder for storing data.
train : bool, default True
Whether to load the training or testing set.
transform : function, default None
A user defined callback that transforms each sample. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root=os.path.join(base.data_dir(), 'datasets', 'fashion-mnist'),
train=True, transform=None):
self._train = train
self._train_data = ('train-images-idx3-ubyte.gz',
'0cf37b0d40ed5169c6b3aba31069a9770ac9043d')
self._train_label = ('train-labels-idx1-ubyte.gz',
'236021d52f1e40852b06a4c3008d8de8aef1e40b')
self._test_data = ('t10k-images-idx3-ubyte.gz',
'626ed6a7c06dd17c0eec72fa3be1740f146a2863')
self._test_label = ('t10k-labels-idx1-ubyte.gz',
'17f9ab60e7257a1620f4ad76bbbaf857c3920701')
self._namespace = 'fashion-mnist'
super(MNIST, self).__init__(root, transform) # pylint: disable=bad-super-call
class CIFAR10(dataset._DownloadedDataset):
"""CIFAR10 image classification dataset from https://www.cs.toronto.edu/~kriz/cifar.html
Each sample is an image (in 3D NDArray) with shape (32, 32, 1).
Parameters
----------
root : str, default $MXNET_HOME/datasets/cifar10
Path to temp folder for storing data.
train : bool, default True
Whether to load the training or testing set.
transform : function, default None
A user defined callback that transforms each sample. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root=os.path.join(base.data_dir(), 'datasets', 'cifar10'),
train=True, transform=None):
self._train = train
self._archive_file = ('cifar-10-binary.tar.gz', 'fab780a1e191a7eda0f345501ccd62d20f7ed891')
self._train_data = [('data_batch_1.bin', 'aadd24acce27caa71bf4b10992e9e7b2d74c2540'),
('data_batch_2.bin', 'c0ba65cce70568cd57b4e03e9ac8d2a5367c1795'),
('data_batch_3.bin', '1dd00a74ab1d17a6e7d73e185b69dbf31242f295'),
('data_batch_4.bin', 'aab85764eb3584312d3c7f65fd2fd016e36a258e'),
('data_batch_5.bin', '26e2849e66a845b7f1e4614ae70f4889ae604628')]
self._test_data = [('test_batch.bin', '67eb016db431130d61cd03c7ad570b013799c88c')]
self._namespace = 'cifar10'
super(CIFAR10, self).__init__(root, transform)
def _read_batch(self, filename):
with open(filename, 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3072+1)
return data[:, 1:].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1), \
data[:, 0].astype(np.int32)
def _get_data(self):
if any(not os.path.exists(path) or not check_sha1(path, sha1)
for path, sha1 in ((os.path.join(self._root, name), sha1)
for name, sha1 in self._train_data + self._test_data)):
namespace = 'gluon/dataset/'+self._namespace
filename = download(_get_repo_file_url(namespace, self._archive_file[0]),
path=self._root,
sha1_hash=self._archive_file[1])
with tarfile.open(filename) as tar:
tar.extractall(self._root)
if self._train:
data_files = self._train_data
else:
data_files = self._test_data
data, label = zip(*(self._read_batch(os.path.join(self._root, name))
for name, _ in data_files))
data = np.concatenate(data)
label = np.concatenate(label)
self._data = nd.array(data, dtype=data.dtype)
self._label = label
class CIFAR100(CIFAR10):
"""CIFAR100 image classification dataset from https://www.cs.toronto.edu/~kriz/cifar.html
Each sample is an image (in 3D NDArray) with shape (32, 32, 1).
Parameters
----------
root : str, default $MXNET_HOME/datasets/cifar100
Path to temp folder for storing data.
fine_label : bool, default False
Whether to load the fine-grained (100 classes) or coarse-grained (20 super-classes) labels.
train : bool, default True
Whether to load the training or testing set.
transform : function, default None
A user defined callback that transforms each sample. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, root=os.path.join(base.data_dir(), 'datasets', 'cifar100'),
fine_label=False, train=True, transform=None):
self._train = train
self._archive_file = ('cifar-100-binary.tar.gz', 'a0bb982c76b83111308126cc779a992fa506b90b')
self._train_data = [('train.bin', 'e207cd2e05b73b1393c74c7f5e7bea451d63e08e')]
self._test_data = [('test.bin', '8fb6623e830365ff53cf14adec797474f5478006')]
self._fine_label = fine_label
self._namespace = 'cifar100'
super(CIFAR10, self).__init__(root, transform) # pylint: disable=bad-super-call
def _read_batch(self, filename):
with open(filename, 'rb') as fin:
data = np.frombuffer(fin.read(), dtype=np.uint8).reshape(-1, 3072+2)
return data[:, 2:].reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1), \
data[:, 0+self._fine_label].astype(np.int32)
class ImageRecordDataset(dataset.RecordFileDataset):
"""A dataset wrapping over a RecordIO file containing images.
Each sample is an image and its corresponding label.
Parameters
----------
filename : str
Path to rec file.
flag : {0, 1}, default 1
If 0, always convert images to greyscale.
If 1, always convert images to colored (RGB).
transform : function, default None
A user defined callback that transforms each sample. For example::
transform=lambda data, label: (data.astype(np.float32)/255, label)
"""
def __init__(self, filename, flag=1, transform=None):
super(ImageRecordDataset, self).__init__(filename)
self._flag = flag
self._transform = transform
def __getitem__(self, idx):
record = super(ImageRecordDataset, self).__getitem__(idx)
header, img = recordio.unpack(record)
if self._transform is not None:
return self._transform(image.imdecode(img, self._flag), header.label)
return image.imdecode(img, self._flag), header.label
class ImageFolderDataset(dataset.Dataset):
"""A dataset for loading image files stored in a folder structure like::
root/car/0001.jpg
root/car/xxxa.jpg
root/car/yyyb.jpg
root/bus/123.jpg
root/bus/023.jpg
root/bus/wwww.jpg
Parameters
----------
root : str
Path to root directory.
flag : {0, 1}, default 1
If 0, always convert loaded images to greyscale (1 channel).
If 1, always convert loaded images to colored (3 channels).
transform : callable, default None
A function that takes data and label and transforms them::
transform = lambda data, label: (data.astype(np.float32)/255, label)
Attributes
----------
synsets : list
List of class names. `synsets[i]` is the name for the integer label `i`
items : list of tuples
List of all images in (filename, label) pairs.
"""
def __init__(self, root, flag=1, transform=None):
self._root = os.path.expanduser(root)
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_images(self._root)
def _list_images(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s'%(
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
img = image.imread(self.items[idx][0], self._flag)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
return img, label
def __len__(self):
return len(self.items)
|
|
# -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities.
Authors
-------
* Fernando Perez.
* Brian Granger
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2009 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX',
'inline' : 'module://IPython.kernel.zmq.pylab.backend_inline'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png'):
"""Convert a figure to svg or png for inline display."""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
fc = fig.get_facecolor()
ec = fig.get_edgecolor()
bytes_io = BytesIO()
dpi = rcParams['savefig.dpi']
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
fig.canvas.print_figure(bytes_io, format=fmt, bbox_inches='tight',
facecolor=fc, edgecolor=ec, dpi=dpi)
data = bytes_io.getvalue()
return data
def retina_figure(fig):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina')
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pylab as pylab
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if pylab.draw_if_interactive.called:
pylab.draw()
pylab.draw_if_interactive.called = False
return mpl_execfile
def select_figure_format(shell, fmt):
"""Select figure format for inline backend, can be 'png', 'retina', or 'svg'.
Using this method ensures only one figure format is active at a time.
"""
from matplotlib.figure import Figure
from IPython.kernel.zmq.pylab import backend_inline
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
if fmt == 'png':
svg_formatter.type_printers.pop(Figure, None)
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png'))
elif fmt in ('png2x', 'retina'):
svg_formatter.type_printers.pop(Figure, None)
png_formatter.for_type(Figure, retina_figure)
elif fmt == 'svg':
png_formatter.type_printers.pop(Figure, None)
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg'))
else:
raise ValueError("supported formats are: 'png', 'retina', 'svg', not %r" % fmt)
# set the format to be used in the backend()
backend_inline._figure_format = fmt
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://IPython.kernel.zmq.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pylab as pylab
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec s in user_ns
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec s in user_ns
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from IPython.kernel.zmq.pylab.backend_inline import InlineBackend
except ImportError:
return
from matplotlib import pyplot
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
shell.register_post_execute(flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = pyplot.rcParams[k]
# load inline_rc
pyplot.rcParams.update(cfg.rc)
else:
from IPython.kernel.zmq.pylab.backend_inline import flush_figures
if flush_figures in shell._post_execute:
shell._post_execute.pop(flush_figures)
if hasattr(shell, '_saved_rcParams'):
pyplot.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
# Setup the default figure format
select_figure_format(shell, cfg.figure_format)
|
|
"""Interact with the OSM APIs."""
import datetime as dt
import json
import logging as lg
import re
import socket
import time
from collections import OrderedDict
from hashlib import sha1
from pathlib import Path
from urllib.parse import urlparse
import numpy as np
import requests
from dateutil import parser as date_parser
from . import projection
from . import settings
from . import utils
from . import utils_geo
from ._errors import CacheOnlyModeInterrupt
# capture getaddrinfo function to use original later after mutating it
_original_getaddrinfo = socket.getaddrinfo
def _get_osm_filter(network_type):
"""
Create a filter to query OSM for the specified network type.
Parameters
----------
network_type : string {"all_private", "all", "bike", "drive", "drive_service", "walk"}
what type of street network to get
Returns
-------
string
"""
# define built-in queries to send to the API. specifying way["highway"]
# means that all ways returned must have a highway tag. the filters then
# remove ways by tag/value.
filters = dict()
# driving: filter out un-drivable roads, service roads, private ways, and
# anything specifying motor=no. also filter out any non-service roads that
# are tagged as providing certain services
filters["drive"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bridleway|bus_guideway|construction|corridor|cycleway|elevator|'
f"escalator|footway|path|pedestrian|planned|platform|proposed|raceway|service|"
f'steps|track"]'
f'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
f'["service"!~"alley|driveway|emergency_access|parking|parking_aisle|private"]'
)
# drive+service: allow ways tagged 'service' but filter out certain types
filters["drive_service"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bridleway|bus_guideway|construction|corridor|cycleway|elevator|'
f'escalator|footway|path|pedestrian|planned|platform|proposed|raceway|steps|track"]'
f'["motor_vehicle"!~"no"]["motorcar"!~"no"]'
f'["service"!~"emergency_access|parking|parking_aisle|private"]'
)
# walking: filter out cycle ways, motor ways, private ways, and anything
# specifying foot=no. allow service roads, permitting things like parking
# lot lanes, alleys, etc that you *can* walk on even if they're not
# exactly pleasant walks. some cycleways may allow pedestrians, but this
# filter ignores such cycleways.
filters["walk"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bus_guideway|construction|cycleway|motor|planned|platform|'
f'proposed|raceway"]'
f'["foot"!~"no"]["service"!~"private"]'
)
# biking: filter out foot ways, motor ways, private ways, and anything
# specifying biking=no
filters["bike"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|bus_guideway|construction|corridor|elevator|escalator|footway|'
f'motor|planned|platform|proposed|raceway|steps"]'
f'["bicycle"!~"no"]["service"!~"private"]'
)
# to download all ways, just filter out everything not currently in use or
# that is private-access only
filters["all"] = (
f'["highway"]["area"!~"yes"]{settings.default_access}'
f'["highway"!~"abandoned|construction|planned|platform|proposed|raceway"]'
f'["service"!~"private"]'
)
# to download all ways, including private-access ones, just filter out
# everything not currently in use
filters[
"all_private"
] = '["highway"]["area"!~"yes"]["highway"!~"abandoned|construction|planned|platform|proposed|raceway"]'
if network_type in filters:
osm_filter = filters[network_type]
else: # pragma: no cover
raise ValueError(f'Unrecognized network_type "{network_type}"')
return osm_filter
def _save_to_cache(url, response_json, sc):
"""
Save a HTTP response JSON object to a file in the cache folder.
Function calculates the checksum of url to generate the cache file's name.
If the request was sent to server via POST instead of GET, then URL should
be a GET-style representation of request. Response is only saved to a
cache file if settings.use_cache is True, response_json is not None, and
sc = 200.
Users should always pass OrderedDicts instead of dicts of parameters into
request functions, so the parameters remain in the same order each time,
producing the same URL string, and thus the same hash. Otherwise the cache
will eventually contain multiple saved responses for the same request
because the URL's parameters appeared in a different order each time.
Parameters
----------
url : string
the URL of the request
response_json : dict
the JSON response
sc : int
the response's HTTP status code
Returns
-------
None
"""
if settings.use_cache:
if sc != 200:
utils.log(f"Did not save to cache because status code is {sc}")
elif response_json is None:
utils.log("Did not save to cache because response_json is None")
else:
# create the folder on the disk if it doesn't already exist
cache_folder = Path(settings.cache_folder)
cache_folder.mkdir(parents=True, exist_ok=True)
# hash the url to make the filename succinct but unique
# sha1 digest is 160 bits = 20 bytes = 40 hexadecimal characters
filename = sha1(url.encode("utf-8")).hexdigest() + ".json"
cache_filepath = cache_folder / filename
# dump to json, and save to file
cache_filepath.write_text(json.dumps(response_json), encoding="utf-8")
utils.log(f'Saved response to cache file "{cache_filepath}"')
def _url_in_cache(url):
"""
Determine if a URL's response exists in the cache.
Calculates the checksum of url to determine the cache file's name.
Parameters
----------
url : string
the URL to look for in the cache
Returns
-------
filepath : pathlib.Path
path to cached response for url if it exists, otherwise None
"""
# hash the url to generate the cache filename
filename = sha1(url.encode("utf-8")).hexdigest() + ".json"
filepath = Path(settings.cache_folder) / filename
# if this file exists in the cache, return its full path
return filepath if filepath.is_file() else None
def _retrieve_from_cache(url, check_remark=False):
"""
Retrieve a HTTP response JSON object from the cache, if it exists.
Parameters
----------
url : string
the URL of the request
check_remark : string
if True, only return filepath if cached response does not have a
remark key indicating a server warning
Returns
-------
response_json : dict
cached response for url if it exists in the cache, otherwise None
"""
# if the tool is configured to use the cache
if settings.use_cache:
# return cached response for this url if exists, otherwise return None
cache_filepath = _url_in_cache(url)
if cache_filepath is not None:
response_json = json.loads(cache_filepath.read_text(encoding="utf-8"))
# return None if check_remark is True and there is a server
# remark in the cached response
if check_remark and "remark" in response_json:
utils.log(f'Found remark, so ignoring cache file "{cache_filepath}"')
return None
utils.log(f'Retrieved response from cache file "{cache_filepath}"')
return response_json
def _get_http_headers(user_agent=None, referer=None, accept_language=None):
"""
Update the default requests HTTP headers with OSMnx info.
Parameters
----------
user_agent : string
the user agent string, if None will set with OSMnx default
referer : string
the referer string, if None will set with OSMnx default
accept_language : string
make accept-language explicit e.g. for consistent nominatim result
sorting
Returns
-------
headers : dict
"""
if user_agent is None:
user_agent = settings.default_user_agent
if referer is None:
referer = settings.default_referer
if accept_language is None:
accept_language = settings.default_accept_language
headers = requests.utils.default_headers()
headers.update(
{"User-Agent": user_agent, "referer": referer, "Accept-Language": accept_language}
)
return headers
def _get_host_by_name(host):
"""
Resolve IP address from host using Google's public API for DNS over HTTPS.
Necessary fallback as socket.gethostbyname will not always work when using
a proxy. See https://developers.google.com/speed/public-dns/docs/doh/json
Parameters
----------
host : string
the host to consistently resolve the IP address of
Returns
-------
ip_address : string
resolved IP address
"""
dns_url = f"https://dns.google/resolve?name={host}"
response = requests.get(dns_url)
data = response.json()
# status = 0 means NOERROR: standard DNS response code
if response.ok and data["Status"] == 0:
ip_address = data["Answer"][0]["data"]
utils.log(f"Google resolved '{host}' to '{ip_address}'")
return ip_address
# in case host could not be resolved return the host itself
else:
utils.log(f"Google could not resolve '{host}'. Response status: {data['Status']}")
return host
def _config_dns(url):
"""
Force socket.getaddrinfo to use IP address instead of host.
Resolves the URL's domain to an IP address so that we use the same server
for both 1) checking the necessary pause duration and 2) sending the query
itself even if there is round-robin redirecting among multiple server
machines on the server-side. Mutates the getaddrinfo function so it uses
the same IP address everytime it finds the host name in the URL.
For example, the domain overpass-api.de just redirects to one of its
subdomains (currently z.overpass-api.de and lz4.overpass-api.de). So if we
check the status endpoint of overpass-api.de, we may see results for
subdomain z, but when we submit the query itself it gets redirected to
subdomain lz4. This could result in violating server lz4's slot management
timing.
Parameters
----------
url : string
the URL to consistently resolve the IP address of
Returns
-------
None
"""
host = urlparse(url).netloc.split(":")[0]
try:
ip = socket.gethostbyname(host)
except socket.gaierror: # pragma: no cover
# this error occurs sometimes when using a proxy. instead, you must
# get IP address using google's public JSON API for DNS over HTTPS
ip = _get_host_by_name(host)[0]
def _getaddrinfo(*args):
if args[0] == host:
utils.log(f"Resolved {host} to {ip}")
return _original_getaddrinfo(ip, *args[1:])
else:
return _original_getaddrinfo(*args)
socket.getaddrinfo = _getaddrinfo
def _get_pause(base_endpoint, recursive_delay=5, default_duration=60):
"""
Get a pause duration from the Overpass API status endpoint.
Check the Overpass API status endpoint to determine how long to wait until
the next slot is available. You can disable this via the `ox.config`
function's `overpass_rate_limit` argument.
Parameters
----------
base_endpoint : string
base Overpass API endpoint (without "/status" at the end)
recursive_delay : int
how long to wait between recursive calls if the server is currently
running a query
default_duration : int
if fatal error, fall back on returning this value
Returns
-------
pause : int
"""
if not settings.overpass_rate_limit: # pragma: no cover
# if overpass rate limiting is False, then there is zero pause
return 0
sc = None
try:
url = base_endpoint.rstrip("/") + "/status"
response = requests.get(url, headers=_get_http_headers(), **settings.requests_kwargs)
sc = response.status_code
status = response.text.split("\n")[3]
status_first_token = status.split(" ")[0]
except Exception: # pragma: no cover
# if we cannot reach the status endpoint or parse its output, log an
# error and return default duration
utils.log(f"Unable to query {url}, got status {sc}", level=lg.ERROR)
return default_duration
try:
# if first token is numeric, it's how many slots you have available,
# no wait required
_ = int(status_first_token) # number of available slots
pause = 0
except Exception: # pragma: no cover
# if first token is 'Slot', it tells you when your slot will be free
if status_first_token == "Slot":
utc_time_str = status.split(" ")[3]
utc_time = date_parser.parse(utc_time_str).replace(tzinfo=None)
pause = int(np.ceil((utc_time - dt.datetime.utcnow()).total_seconds()))
pause = max(pause, 1)
# if first token is 'Currently', it is currently running a query so
# check back in recursive_delay seconds
elif status_first_token == "Currently":
time.sleep(recursive_delay)
pause = _get_pause(base_endpoint)
# any other status is unrecognized: log error, return default duration
else:
utils.log(f'Unrecognized server status: "{status}"', level=lg.ERROR)
return default_duration
return pause
def _make_overpass_settings():
"""
Make settings string to send in Overpass query.
Returns
-------
string
"""
if settings.memory is None:
maxsize = ""
else:
maxsize = f"[maxsize:{settings.memory}]"
return settings.overpass_settings.format(timeout=settings.timeout, maxsize=maxsize)
def _make_overpass_polygon_coord_strs(polygon):
"""
Subdivide query polygon and return list of coordinate strings.
Project to utm, divide polygon up into sub-polygons if area exceeds a
max size (in meters), project back to lat-lng, then get a list of
polygon(s) exterior coordinates
Parameters
----------
polygon : shapely.geometry.Polygon or shapely.geometry.MultiPolygon
geographic boundaries to fetch the OSM geometries within
Returns
-------
polygon_coord_strs : list
list of exterior coordinate strings for smaller sub-divided polygons
"""
geometry_proj, crs_proj = projection.project_geometry(polygon)
gpcs = utils_geo._consolidate_subdivide_geometry(geometry_proj)
geometry, _ = projection.project_geometry(gpcs, crs=crs_proj, to_latlong=True)
polygon_coord_strs = utils_geo._get_polygons_coordinates(geometry)
utils.log(f"Requesting data within polygon from API in {len(polygon_coord_strs)} request(s)")
return polygon_coord_strs
def _create_overpass_query(polygon_coord_str, tags):
"""
Create an overpass query string based on passed tags.
Parameters
----------
polygon_coord_str : list
list of lat lng coordinates
tags : dict
dict of tags used for finding elements in the selected area
Returns
-------
query : string
"""
# create overpass settings string
overpass_settings = _make_overpass_settings()
# make sure every value in dict is bool, str, or list of str
error_msg = "tags must be a dict with values of bool, str, or list of str"
if not isinstance(tags, dict): # pragma: no cover
raise TypeError(error_msg)
tags_dict = dict()
for key, value in tags.items():
if isinstance(value, bool):
tags_dict[key] = value
elif isinstance(value, str):
tags_dict[key] = [value]
elif isinstance(value, list):
if not all(isinstance(s, str) for s in value): # pragma: no cover
raise TypeError(error_msg)
tags_dict[key] = value
else: # pragma: no cover
raise TypeError(error_msg)
# convert the tags dict into a list of {tag:value} dicts
tags_list = []
for key, value in tags_dict.items():
if isinstance(value, bool):
tags_list.append({key: value})
else:
for value_item in value:
tags_list.append({key: value_item})
# add node/way/relation query components one at a time
components = []
for d in tags_list:
for key, value in d.items():
if isinstance(value, bool):
# if bool (ie, True) just pass the key, no value
tag_str = f"['{key}'](poly:'{polygon_coord_str}');(._;>;);"
else:
# otherwise, pass "key"="value"
tag_str = f"['{key}'='{value}'](poly:'{polygon_coord_str}');(._;>;);"
for kind in ("node", "way", "relation"):
components.append(f"({kind}{tag_str});")
# finalize query and return
components = "".join(components)
query = f"{overpass_settings};({components});out;"
return query
def _osm_network_download(polygon, network_type, custom_filter):
"""
Retrieve networked ways and nodes within boundary from the Overpass API.
Parameters
----------
polygon : shapely.geometry.Polygon or shapely.geometry.MultiPolygon
boundary to fetch the network ways/nodes within
network_type : string
what type of street network to get if custom_filter is None
custom_filter : string
a custom ways filter to be used instead of the network_type presets
Returns
-------
response_jsons : list
list of JSON responses from the Overpass server
"""
# create a filter to exclude certain kinds of ways based on the requested
# network_type, if provided, otherwise use custom_filter
if custom_filter is not None:
osm_filter = custom_filter
else:
osm_filter = _get_osm_filter(network_type)
response_jsons = []
# create overpass settings string
overpass_settings = _make_overpass_settings()
# subdivide query polygon to get list of sub-divided polygon coord strings
polygon_coord_strs = _make_overpass_polygon_coord_strs(polygon)
# pass each polygon exterior coordinates in the list to the API, one at a
# time. The '>' makes it recurse so we get ways and the ways' nodes.
for polygon_coord_str in polygon_coord_strs:
query_str = f"{overpass_settings};(way{osm_filter}(poly:'{polygon_coord_str}');>;);out;"
response_json = overpass_request(data={"data": query_str})
response_jsons.append(response_json)
utils.log(
f"Got all network data within polygon from API in {len(polygon_coord_strs)} request(s)"
)
if settings.cache_only_mode: # pragma: no cover
raise CacheOnlyModeInterrupt("settings.cache_only_mode=True")
return response_jsons
def _osm_geometries_download(polygon, tags):
"""
Retrieve non-networked elements within boundary from the Overpass API.
Parameters
----------
polygon : shapely.geometry.Polygon
boundaries to fetch elements within
tags : dict
dict of tags used for finding elements in the selected area
Returns
-------
response_jsons : list
list of JSON responses from the Overpass server
"""
response_jsons = []
# subdivide query polygon to get list of sub-divided polygon coord strings
polygon_coord_strs = _make_overpass_polygon_coord_strs(polygon)
# pass exterior coordinates of each polygon in list to API, one at a time
for polygon_coord_str in polygon_coord_strs:
query_str = _create_overpass_query(polygon_coord_str, tags)
response_json = overpass_request(data={"data": query_str})
response_jsons.append(response_json)
utils.log(
f"Got all geometries data within polygon from API in {len(polygon_coord_strs)} request(s)"
)
return response_jsons
def _osm_place_download(query, by_osmid=False, limit=1, polygon_geojson=1):
"""
Retrieve a place from the Nominatim API.
Parameters
----------
query : string or dict
query string or structured query dict
by_osmid : bool
if True, handle query as an OSM ID for lookup rather than text search
limit : int
max number of results to return
polygon_geojson : int
retrieve the place's geometry from the API, 0=no, 1=yes
Returns
-------
response_json : dict
JSON response from the Nominatim server
"""
# define the parameters
params = OrderedDict()
params["format"] = "json"
params["polygon_geojson"] = polygon_geojson
if by_osmid:
# if querying by OSM ID, use the lookup endpoint
request_type = "lookup"
params["osm_ids"] = query
else:
# if not querying by OSM ID, use the search endpoint
request_type = "search"
# prevent OSM from deduping so we get precise number of results
params["dedupe"] = 0
params["limit"] = limit
if isinstance(query, str):
params["q"] = query
elif isinstance(query, dict):
# add query keys in alphabetical order so URL is the same string
# each time, for caching purposes
for key in sorted(query):
params[key] = query[key]
else: # pragma: no cover
raise TypeError("query must be a dict or a string")
# request the URL, return the JSON
response_json = nominatim_request(params=params, request_type=request_type)
return response_json
def nominatim_request(params, request_type="search", pause=1, error_pause=60):
"""
Send a HTTP GET request to the Nominatim API and return JSON response.
Parameters
----------
params : OrderedDict
key-value pairs of parameters
request_type : string {"search", "reverse", "lookup"}
which Nominatim API endpoint to query
pause : int
how long to pause before request, in seconds. per the nominatim usage
policy: "an absolute maximum of 1 request per second" is allowed
error_pause : int
how long to pause in seconds before re-trying request if error
Returns
-------
response_json : dict
"""
if request_type not in {"search", "reverse", "lookup"}: # pragma: no cover
raise ValueError('Nominatim request_type must be "search", "reverse", or "lookup"')
# resolve url to same IP even if there is server round-robin redirecting
_config_dns(settings.nominatim_endpoint.rstrip("/"))
# prepare Nominatim API URL and see if request already exists in cache
url = settings.nominatim_endpoint.rstrip("/") + "/" + request_type
prepared_url = requests.Request("GET", url, params=params).prepare().url
cached_response_json = _retrieve_from_cache(prepared_url)
if settings.nominatim_key:
params["key"] = settings.nominatim_key
if cached_response_json is not None:
# found response in the cache, return it instead of calling server
return cached_response_json
else:
# if this URL is not already in the cache, pause, then request it
utils.log(f"Pausing {pause} seconds before making HTTP GET request")
time.sleep(pause)
# transmit the HTTP GET request
utils.log(f"Get {prepared_url} with timeout={settings.timeout}")
headers = _get_http_headers()
response = requests.get(
url,
params=params,
timeout=settings.timeout,
headers=headers,
**settings.requests_kwargs,
)
sc = response.status_code
# log the response size and domain
size_kb = len(response.content) / 1000
domain = re.findall(r"(?s)//(.*?)/", url)[0]
utils.log(f"Downloaded {size_kb:,.1f}kB from {domain}")
try:
response_json = response.json()
except Exception: # pragma: no cover
if sc in {429, 504}:
# 429 is 'too many requests' and 504 is 'gateway timeout' from
# server overload: handle these by pausing then recursively
# re-trying until we get a valid response from the server
utils.log(f"{domain} returned {sc}: retry in {error_pause} secs", level=lg.WARNING)
time.sleep(error_pause)
response_json = nominatim_request(params, request_type, pause, error_pause)
else:
# else, this was an unhandled status code, throw an exception
utils.log(f"{domain} returned {sc}", level=lg.ERROR)
raise Exception(f"Server returned:\n{response} {response.reason}\n{response.text}")
_save_to_cache(prepared_url, response_json, sc)
return response_json
def overpass_request(data, pause=None, error_pause=60):
"""
Send a HTTP POST request to the Overpass API and return JSON response.
Parameters
----------
data : OrderedDict
key-value pairs of parameters
pause : int
how long to pause in seconds before request, if None, will query API
status endpoint to find when next slot is available
error_pause : int
how long to pause in seconds (in addition to `pause`) before re-trying
request if error
Returns
-------
response_json : dict
"""
base_endpoint = settings.overpass_endpoint
# resolve url to same IP even if there is server round-robin redirecting
_config_dns(base_endpoint)
# define the Overpass API URL, then construct a GET-style URL as a string to
# hash to look up/save to cache
url = base_endpoint.rstrip("/") + "/interpreter"
prepared_url = requests.Request("GET", url, params=data).prepare().url
cached_response_json = _retrieve_from_cache(prepared_url, check_remark=True)
if cached_response_json is not None:
# found response in the cache, return it instead of calling server
return cached_response_json
else:
# if this URL is not already in the cache, pause, then request it
if pause is None:
this_pause = _get_pause(base_endpoint)
utils.log(f"Pausing {this_pause} seconds before making HTTP POST request")
time.sleep(this_pause)
# transmit the HTTP POST request
utils.log(f"Post {prepared_url} with timeout={settings.timeout}")
headers = _get_http_headers()
response = requests.post(
url, data=data, timeout=settings.timeout, headers=headers, **settings.requests_kwargs
)
sc = response.status_code
# log the response size and domain
size_kb = len(response.content) / 1000
domain = re.findall(r"(?s)//(.*?)/", url)[0]
utils.log(f"Downloaded {size_kb:,.1f}kB from {domain}")
try:
response_json = response.json()
if "remark" in response_json:
utils.log(f'Server remark: "{response_json["remark"]}"', level=lg.WARNING)
except Exception: # pragma: no cover
if sc in {429, 504}:
# 429 is 'too many requests' and 504 is 'gateway timeout' from
# server overload: handle these by pausing then recursively
# re-trying until we get a valid response from the server
this_pause = error_pause + _get_pause(base_endpoint)
utils.log(f"{domain} returned {sc}: retry in {this_pause} secs", level=lg.WARNING)
time.sleep(this_pause)
response_json = overpass_request(data, pause, error_pause)
else:
# else, this was an unhandled status code, throw an exception
utils.log(f"{domain} returned {sc}", level=lg.ERROR)
raise Exception(f"Server returned\n{response} {response.reason}\n{response.text}")
_save_to_cache(prepared_url, response_json, sc)
return response_json
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'CiscoIetfPwMplsMib.Cpwvcmplsobjects' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsobjects',
False,
[
_MetaInfoClassMember('cpwVcMplsInboundIndexNext', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object contains an appropriate value to
be used for cpwVcMplsInboundIndex when creating
entries in the cpwVcMplsInboundTable. The value
0 indicates that no unassigned entries are
available. To obtain the cpwVcMplsInboundIndex
value for a new entry, the manager issues a
management protocol retrieval operation to obtain
the current value of this object. After each
retrieval, the agent should modify the value to
the next unassigned index, however the agent MUST
NOT assume such retrieval will be done for each
row created.
''',
'cpwvcmplsinboundindexnext',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundIndexNext', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object contains an appropriate value to
be used for cpwVcMplsOutboundIndex when creating
entries in the cpwVcMplsOutboundTable. The value
0 indicates that no unassigned entries are
available. To obtain the cpwVcMplsOutboundIndex
value for a new entry, the manager issues a
management protocol retrieval operation to obtain
the current value of this object. After each
retrieval, the agent should modify the value to
the next unassigned index, however the agent MUST
NOT assume such retrieval will be done for each
row created.
''',
'cpwvcmplsoutboundindexnext',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsObjects',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry.CpwvcmplsexpbitsmodeEnum' : _MetaInfoEnum('CpwvcmplsexpbitsmodeEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB',
{
'outerTunnel':'outerTunnel',
'specifiedValue':'specifiedValue',
'serviceDependant':'serviceDependant',
}, 'CISCO-IETF-PW-MPLS-MIB', _yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB']),
'CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry',
False,
[
_MetaInfoClassMember('cpwVcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ''',
'cpwvcindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsExpBits', ATTRIBUTE, 'int' , None, None,
[('0', '7')], [],
''' Set by the operator to indicate the MPLS EXP bits to be
used on the VC shim label if cpwVcMplsExpBitsMode is
specifiedValue(2), zero otherwise.
''',
'cpwvcmplsexpbits',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsExpBitsMode', REFERENCE_ENUM_CLASS, 'CpwvcmplsexpbitsmodeEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry.CpwvcmplsexpbitsmodeEnum',
[], [],
''' Set by the operator to indicate the way the VC shim label
EXP bits are to be determined. The value of outerTunnel(1)
is used where there is an outer tunnel - cpwVcMplsMplsType
is mplsTe or mplsNonTe. Note that in this case there is no
need to mark the VC label with the EXP bits since the VC
label is not visible to the intermediate nodes.
If there is no outer tunnel, specifiedValue(2) indicate
that the value is specified by cpwVcMplsExpBits, and
serviceDependant(3) indicate that the EXP bits are setup
based on a rule specified in the emulated service specific
tables, for example when the EXP bits are a function of
802.1p marking for Ethernet emulated service.
''',
'cpwvcmplsexpbitsmode',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsLocalLdpEntityID', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The local LDP Entity index of the LDP entity to be used
for this VC on the local node. Should be set to all zeros
if not used.
''',
'cpwvcmplslocalldpentityid',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsLocalLdpID', ATTRIBUTE, 'str' , None, None,
[], [],
''' The local LDP identifier of the LDP entity creating
this VC in the local node. As the VC labels are always
set from the per platform label space, the last two octets
in the LDP ID MUST be always both zeros.
''',
'cpwvcmplslocalldpid',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsMplsType', REFERENCE_BITS, 'Cpwvcmplsmplstype' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry.Cpwvcmplsmplstype',
[], [],
''' Set by the operator to indicate the outer tunnel types, if
exists. mplsTe is used if the outer tunnel was set-up by
MPLS-TE, and mplsNonTe is used the outer tunnel was set up
by LDP or manually. Combination of mplsTe and mplsNonTe
may exist in case of outer tunnel protection.
vcOnly is used if there is no outer tunnel label. vcOnly
cannot be combined with mplsNonTe or mplsTe.
''',
'cpwvcmplsmplstype',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsPeerLdpID', ATTRIBUTE, 'str' , None, None,
[], [],
''' The peer LDP identifier as identified from the LDP
session. Should be zero if not relevant or not known yet.
''',
'cpwvcmplspeerldpid',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsStorageType', REFERENCE_ENUM_CLASS, 'StoragetypeEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'StoragetypeEnum',
[], [],
''' This variable indicates the storage type for this row.
''',
'cpwvcmplsstoragetype',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsTtl', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Set by the operator to indicate the VC TTL bits to be used
on the VC shim label.
''',
'cpwvcmplsttl',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsEntry',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplstable' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplstable',
False,
[
_MetaInfoClassMember('cpwVcMplsEntry', REFERENCE_LIST, 'Cpwvcmplsentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry',
[], [],
''' A row in this table represents parameters specific to MPLS
PSN for a pseudo wire connection (VC). The row is created
automatically by the local agent if the cpwVcPsnType is
MPLS. It is indexed by cpwVcIndex, which uniquely
identifying a singular connection.
''',
'cpwvcmplsentry',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsTable',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable.Cpwvcmplsoutboundentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable.Cpwvcmplsoutboundentry',
False,
[
_MetaInfoClassMember('cpwVcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ''',
'cpwvcindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsOutboundIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Arbitrary index for enabling multiple rows per VC in
this table. Next available free index can be retrieved
using cpwVcMplsOutboundIndexNext.
''',
'cpwvcmplsoutboundindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsOutboundIfIndex', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' In case of VC only (no outer tunnel), this object holds
the ifIndex of the outbound port, otherwise set to zero.
''',
'cpwvcmplsoutboundifindex',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundLsrXcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' This object will be set by the operator. If the outer
label is defined in the MPL-LSR-MIB, i.e. set by LDP
or manually, this object points to the XC index
of the outer tunnel. Otherwise, it is set to zero.
''',
'cpwvcmplsoutboundlsrxcindex',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundRowStatus', REFERENCE_ENUM_CLASS, 'RowstatusEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowstatusEnum',
[], [],
''' For creating, modifying, and deleting this row.
''',
'cpwvcmplsoutboundrowstatus',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundStorageType', REFERENCE_ENUM_CLASS, 'StoragetypeEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'StoragetypeEnum',
[], [],
''' This variable indicates the storage type for this object.
''',
'cpwvcmplsoutboundstoragetype',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundTunnelIndex', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsoutboundtunnelindex',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundTunnelInstance', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsoutboundtunnelinstance',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundTunnelLclLSR', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsoutboundtunnellcllsr',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundTunnelPeerLSR', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsoutboundtunnelpeerlsr',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsOutboundEntry',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable',
False,
[
_MetaInfoClassMember('cpwVcMplsOutboundEntry', REFERENCE_LIST, 'Cpwvcmplsoutboundentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable.Cpwvcmplsoutboundentry',
[], [],
''' A row in this table represents a link between PW VC (that
require MPLS tunnels) and MPLS tunnel toward the PSN.
In the case of VC only, it associate the VC with the
interface that shall carry the VC.
This table is indexed by the pwVcIndex and an additional
index enabling multiple rows for the same VC index.
At least one entry is created in this table by the operator
for each PW VC that requires MPLS PSN. Note that the first
entry for each VC can be indexed by cpwVcMplsOutboundIndex
equal zero without a need for retrieval of
cpwVcMplsOutboundIndexNext.
This table points to the appropriate MPLS MIB. In the case
of MPLS-TE, the 4 variables relevant to the indexing of
a TE MPLS tunnel are set as in Srinivasan, et al, <draft-
ietf-mpls-te-mib>.
In case of Non-TE MPLS (an outer tunnel label assigned by
LDP or manually) the table points to the XC entry in the
LSR MIB as in Srinivasan, et al, <draft-ietf-mpls-lsr-mib>.
In case of VC only (no outer tunnel) the ifIndex of the
port to carry the VC is configured.
Each VC may have multiple rows in this tables if protection
is available at the outer tunnel level, each row may be of
different type except for VC only, on which only rows with
ifIndex of the port are allowed.
''',
'cpwvcmplsoutboundentry',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsOutboundTable',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplsinboundtable.Cpwvcmplsinboundentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsinboundtable.Cpwvcmplsinboundentry',
False,
[
_MetaInfoClassMember('cpwVcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' ''',
'cpwvcindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsInboundIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Arbitrary index for enabling multiple rows per VC in
this table. Next available free index can be retrieved
using cpwVcMplsInboundIndexNext.
''',
'cpwvcmplsinboundindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsInboundIfIndex', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' In case of VC only (no outer tunnel), this object holds the
ifIndex of the inbound port, otherwise set to zero.
''',
'cpwvcmplsinboundifindex',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundLsrXcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' If the outer label is defined in the MPL-LSR-MIB, i.e. set
by LDP or manually, this object points to the XC index
of the outer tunnel. Otherwise, it is set to zero.
''',
'cpwvcmplsinboundlsrxcindex',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundRowStatus', REFERENCE_ENUM_CLASS, 'RowstatusEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'RowstatusEnum',
[], [],
''' For creating, modifying, and deleting this row.
''',
'cpwvcmplsinboundrowstatus',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundStorageType', REFERENCE_ENUM_CLASS, 'StoragetypeEnum' , 'ydk.models.cisco_ios_xe.SNMPv2_TC', 'StoragetypeEnum',
[], [],
''' This variable indicates the storage type for this row.
''',
'cpwvcmplsinboundstoragetype',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundTunnelIndex', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsinboundtunnelindex',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundTunnelInstance', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsinboundtunnelinstance',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundTunnelLclLSR', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsinboundtunnellcllsr',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsInboundTunnelPeerLSR', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Part of set of indexes for outbound tunnel in the case of
MPLS-TE outer tunnel, otherwise set to zero.
''',
'cpwvcmplsinboundtunnelpeerlsr',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsInboundEntry',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplsinboundtable' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsinboundtable',
False,
[
_MetaInfoClassMember('cpwVcMplsInboundEntry', REFERENCE_LIST, 'Cpwvcmplsinboundentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsinboundtable.Cpwvcmplsinboundentry',
[], [],
''' A row in this table represents a link between PW VCs (that
require MPLS tunnels) and MPLS tunnel for packets arriving
from the PSN.
This table is indexed by the set of indexes used to
identify the VC - cpwVcIndex and an additional
index enabling multiple rows for the same VC index.
Note that the first entry for each VC can be indexed by
cpwVcMplsOutboundIndex equal zero without a need for
retrieval of cpwVcMplsInboundIndexNext.
An entry is created in this table either automatically by
the local agent or created manually by the operator in
cases that strict mode is required.
Note that the control messages contain VC ID and VC type,
which together with the remote IP address identify the
cpwVcIndex in the local node.
This table points to the appropriate MPLS MIB. In the case
of MPLS-TE, the 4 variables relevant to the indexing of a
TE MPLS tunnel are set as in Srinivasan, et al, <draft-
ietf-mpls-te-mib>.
In case of non-TE MPLS tunnel (an outer tunnel label
assigned by LDP or manually) the table points to the XC
entry in the MPLS-LSR-MIB as in Srinivasan, et al, <draft-
ietf-mpls-lsr-mib>.
Each VC may have multiple rows in this tables if protection
is available at the outer tunnel level, each row may be of
different type except for VC only, on which only rows with
ifIndex of the port are allowed.
''',
'cpwvcmplsinboundentry',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsInboundTable',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable.Cpwvcmplsnontemappingentry.CpwvcmplsnontemappingtunneldirectionEnum' : _MetaInfoEnum('CpwvcmplsnontemappingtunneldirectionEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB',
{
'outbound':'outbound',
'inbound':'inbound',
}, 'CISCO-IETF-PW-MPLS-MIB', _yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB']),
'CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable.Cpwvcmplsnontemappingentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable.Cpwvcmplsnontemappingentry',
False,
[
_MetaInfoClassMember('cpwVcMplsNonTeMappingTunnelDirection', REFERENCE_ENUM_CLASS, 'CpwvcmplsnontemappingtunneldirectionEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable.Cpwvcmplsnontemappingentry.CpwvcmplsnontemappingtunneldirectionEnum',
[], [],
''' Identifies if the row represent an outbound or inbound
mapping.
''',
'cpwvcmplsnontemappingtunneldirection',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsNonTeMappingXcTunnelIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Index for the conceptual XC row identifying Tunnel to VC
mappings when the outer tunnel is created by the MPLS-LSR-
MIB, Zero otherwise.
''',
'cpwvcmplsnontemappingxctunnelindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsNonTeMappingIfIndex', ATTRIBUTE, 'int' , None, None,
[('0', '2147483647')], [],
''' Identify the port on which the VC is carried for VC only
case.
''',
'cpwvcmplsnontemappingifindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsNonTeMappingVcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value that represent the VC in the cpwVcTable.
''',
'cpwvcmplsnontemappingvcindex',
'CISCO-IETF-PW-MPLS-MIB', True),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsNonTeMappingEntry',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable',
False,
[
_MetaInfoClassMember('cpwVcMplsNonTeMappingEntry', REFERENCE_LIST, 'Cpwvcmplsnontemappingentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable.Cpwvcmplsnontemappingentry',
[], [],
''' A row in this table represents the association
between the PW VC and it's non TE MPLS outer Tunnel
it's physical interface if there is no outer tunnel
(VC only).
An application can use this table to quickly retrieve the
PW carried over specific non-TE MPLS outer tunnel or
physical interface.
The table in indexed by the XC index for MPLS Non-TE
tunnel, or ifIndex of the port in VC only case, the
direction of the VC in the specific entry and the VCIndex.
The same table is used in both inbound and outbound
directions, but in a different row for each direction. If
the inbound association is not known, no rows should exist
for it.
Rows are created by the local agent when all the
association data is available for display.
''',
'cpwvcmplsnontemappingentry',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsNonTeMappingTable',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplstemappingtable.Cpwvcmplstemappingentry.CpwvcmplstemappingtunneldirectionEnum' : _MetaInfoEnum('CpwvcmplstemappingtunneldirectionEnum', 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB',
{
'outbound':'outbound',
'inbound':'inbound',
}, 'CISCO-IETF-PW-MPLS-MIB', _yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB']),
'CiscoIetfPwMplsMib.Cpwvcmplstemappingtable.Cpwvcmplstemappingentry' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplstemappingtable.Cpwvcmplstemappingentry',
False,
[
_MetaInfoClassMember('cpwVcMplsTeMappingTunnelDirection', REFERENCE_ENUM_CLASS, 'CpwvcmplstemappingtunneldirectionEnum' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstemappingtable.Cpwvcmplstemappingentry.CpwvcmplstemappingtunneldirectionEnum',
[], [],
''' Identifies if the row represent an outbound or inbound
mapping.
''',
'cpwvcmplstemappingtunneldirection',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsTeMappingTunnelIndex', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Primary index for the conceptual row identifying the
MPLS-TE tunnel.
''',
'cpwvcmplstemappingtunnelindex',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsTeMappingTunnelInstance', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Identifies an instance of the MPLS-TE tunnel.
''',
'cpwvcmplstemappingtunnelinstance',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsTeMappingTunnelPeerLsrID', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Identifies an Peer LSR when the outer tunnel is MPLS-TE
based.
''',
'cpwvcmplstemappingtunnelpeerlsrid',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsTeMappingTunnelLocalLsrID', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' Identifies the local LSR.
''',
'cpwvcmplstemappingtunnellocallsrid',
'CISCO-IETF-PW-MPLS-MIB', True),
_MetaInfoClassMember('cpwVcMplsTeMappingVcIndex', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value that represent the VC in the cpwVcTable.
''',
'cpwvcmplstemappingvcindex',
'CISCO-IETF-PW-MPLS-MIB', True),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsTeMappingEntry',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib.Cpwvcmplstemappingtable' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib.Cpwvcmplstemappingtable',
False,
[
_MetaInfoClassMember('cpwVcMplsTeMappingEntry', REFERENCE_LIST, 'Cpwvcmplstemappingentry' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstemappingtable.Cpwvcmplstemappingentry',
[], [],
''' A row in this table represents the association
between a PW VC and it's MPLS-TE outer Tunnel.
An application can use this table to quickly retrieve the
PW carried over specific TE MPLS outer tunnel.
The table in indexed by the 4 indexes of a TE tunnel,
the direction of the VC specific entry and the VcIndex.
The same table is used in both inbound and outbound
directions, a different row for each direction. If the
inbound association is not known, no rows should exist for
it.
Rows are created by the local agent when all the
association data is available for display.
''',
'cpwvcmplstemappingentry',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'cpwVcMplsTeMappingTable',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
'CiscoIetfPwMplsMib' : {
'meta_info' : _MetaInfoClass('CiscoIetfPwMplsMib',
False,
[
_MetaInfoClassMember('cpwVcMplsInboundTable', REFERENCE_CLASS, 'Cpwvcmplsinboundtable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsinboundtable',
[], [],
''' This table associates VCs using MPLS PSN with the inbound
MPLS tunnels (i.e. for packets coming from the PSN),
if such association is desired (mainly for security
reasons).
''',
'cpwvcmplsinboundtable',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsNonTeMappingTable', REFERENCE_CLASS, 'Cpwvcmplsnontemappingtable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable',
[], [],
''' This table maps an inbound/outbound Tunnel to a VC in non-
TE applications.
''',
'cpwvcmplsnontemappingtable',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsObjects', REFERENCE_CLASS, 'Cpwvcmplsobjects' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsobjects',
[], [],
''' ''',
'cpwvcmplsobjects',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsOutboundTable', REFERENCE_CLASS, 'Cpwvcmplsoutboundtable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable',
[], [],
''' This table associates VCs using MPLS PSN with the outbound
MPLS tunnels (i.e. toward the PSN) or the physical
interface in case of VC only.
''',
'cpwvcmplsoutboundtable',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsTable', REFERENCE_CLASS, 'Cpwvcmplstable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstable',
[], [],
''' This table specifies information for VC to be carried over
MPLS PSN.
''',
'cpwvcmplstable',
'CISCO-IETF-PW-MPLS-MIB', False),
_MetaInfoClassMember('cpwVcMplsTeMappingTable', REFERENCE_CLASS, 'Cpwvcmplstemappingtable' , 'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB', 'CiscoIetfPwMplsMib.Cpwvcmplstemappingtable',
[], [],
''' This table maps an inbound/outbound Tunnel to a VC in
MPLS-TE applications.
''',
'cpwvcmplstemappingtable',
'CISCO-IETF-PW-MPLS-MIB', False),
],
'CISCO-IETF-PW-MPLS-MIB',
'CISCO-IETF-PW-MPLS-MIB',
_yang_ns._namespaces['CISCO-IETF-PW-MPLS-MIB'],
'ydk.models.cisco_ios_xe.CISCO_IETF_PW_MPLS_MIB'
),
},
}
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplstable.Cpwvcmplsentry']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib.Cpwvcmplstable']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable.Cpwvcmplsoutboundentry']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsinboundtable.Cpwvcmplsinboundentry']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsinboundtable']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable.Cpwvcmplsnontemappingentry']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplstemappingtable.Cpwvcmplstemappingentry']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib.Cpwvcmplstemappingtable']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsobjects']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplstable']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsoutboundtable']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsinboundtable']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplsnontemappingtable']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib']['meta_info']
_meta_table['CiscoIetfPwMplsMib.Cpwvcmplstemappingtable']['meta_info'].parent =_meta_table['CiscoIetfPwMplsMib']['meta_info']
|
|
import datetime
import os
from django.conf import settings
from django.db.models.fields import Field
from django.core.files.base import File, ContentFile
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile, get_image_dimensions
from django.core.files.uploadedfile import UploadedFile
from django.utils.functional import curry
from django.db.models import signals
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext_lazy, ugettext as _
from django import forms
from django.db.models.loading import cache
class FieldFile(File):
def __init__(self, instance, field, name):
self.instance = instance
self.field = field
self.storage = field.storage
self._name = name or u''
self._closed = False
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file'):
self._file = self.storage.open(self.name, 'rb')
return self._file
file = property(_get_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
return super(FieldFile, self).open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self._name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = len(content)
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self._file
self.storage.delete(self.name)
self._name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
if save:
self.instance.save()
delete.alters_data = True
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'_name': self.name, '_closed': False}
class FileDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError, "%s can only be accessed from %s instances." % (self.field.name(self.owner.__name__))
file = instance.__dict__[self.field.name]
if not isinstance(file, FieldFile):
# Create a new instance of FieldFile, based on a given file name
instance.__dict__[self.field.name] = self.field.attr_class(instance, self.field, file)
elif not hasattr(file, 'field'):
# The FieldFile was pickled, so some attributes need to be reset.
file.instance = instance
file.field = self.field
file.storage = self.field.storage
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
attr_class = FieldFile
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_db_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_db_prep_lookup(lookup_type, value)
def get_db_prep_value(self, value):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return unicode(value)
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, FileDescriptor(self))
signals.post_delete.connect(self.delete_file, sender=cls)
def delete_file(self, instance, sender, **kwargs):
file = getattr(instance, self.attname)
# If no other object of this type references the file,
# and it's not the default value for future objects,
# delete it from the backend.
if file and file.name != self.default and \
not sender._default_manager.filter(**{self.name: file.name}):
file.delete(save=False)
elif file:
# Otherwise, just close the file, so it doesn't tie up resources.
file.close()
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
if data and isinstance(data, UploadedFile):
getattr(instance, self.name).save(data.name, data, save=False)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFieldFile(ImageFile, FieldFile):
def save(self, name, content, save=True):
# Repopulate the image dimension cache.
self._dimensions_cache = get_image_dimensions(content)
# Update width/height fields, if needed
if self.field.width_field:
setattr(self.instance, self.field.width_field, self.width)
if self.field.height_field:
setattr(self.instance, self.field.height_field, self.height)
super(ImageFieldFile, self).save(name, content, save)
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
FileField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to load CIFAR dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from data import augment as augment_lib
from data import data_util
import numpy as np
import tensorflow as tf
CIFAR_DIR = os.path.join(os.getenv('ML_DATA'), 'cifar')
CIFAR_LT_DIR = os.path.join(os.getenv('ML_DATA'), 'cifar-lt')
CIFAR_DARP_DIR = os.path.join(os.getenv('ML_DATA'), 'cifar-darp')
class CIFAR10(object):
"""CIFAR10 dataloader."""
def __init__(self):
self.load_raw_data()
def load_raw_data(self):
"""Loads CIFAR10 raw data."""
self.data_name = 'cifar10'
(x_train, y_train) = data_util.load_tfrecord(
os.path.join(CIFAR_DIR, 'cifar10-train.tfrecord'))
(x_test, y_test) = data_util.load_tfrecord(
os.path.join(CIFAR_DIR, 'cifar10-test.tfrecord'))
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_unlab = x_train
self.y_unlab = y_train
self.num_class = 10
def get_prefix(self,
fold=1,
num_labeled_per_class=10,
augment=None,
is_balanced=False):
"""Gets prefix for file name."""
if num_labeled_per_class > 0:
self.fname = '{}.{}.{}@{}'.format(
self.data_name, '.'.join(['_'.join(aug) for aug in augment]), fold,
num_labeled_per_class * self.num_class)
if not is_balanced:
self.fname = '{}.{}'.format(self.fname, 'imbalance')
else:
self.fname = '{}.{}@full'.format(self.data_name, augment[0][0])
def get_split(self, fold=1, num_labeled_per_class=10, is_balanced=True):
"""Gets labeled and unlabeled data split."""
np.random.seed(fold)
if is_balanced:
class_id = {}
for i, y in enumerate(self.y_train):
if y not in class_id:
class_id[y] = []
class_id[y].append(i)
labeled_idx = []
for c in sorted(class_id):
np.random.shuffle(class_id[c])
labeled_idx += class_id[c][:num_labeled_per_class]
self.x_train = self.x_train[labeled_idx]
self.y_train = self.y_train[labeled_idx]
else:
perm_idx = np.random.permutation(len(self.y_train))
self.x_train = self.x_train[perm_idx][:num_labeled_per_class *
self.n_class]
self.y_train = self.y_train[perm_idx][:num_labeled_per_class *
self.n_class]
def load_dataset(self,
fold=1,
num_labeled_per_class=10,
is_balanced=True,
input_shape=(32, 32, 3),
augment=None,
batch_size=64,
batch_size_unlab=0,
num_workers=4,
strategy=None,
**kwargs):
"""Loads dataset."""
del kwargs
# Generate labeled data.
if num_labeled_per_class > 0:
self.get_split(
fold=fold,
num_labeled_per_class=num_labeled_per_class,
is_balanced=is_balanced)
# Construct dataset
train_data = (self.x_train, self.y_train,
np.expand_dims(np.arange(len(self.y_train)), axis=1))
test_data = (self.x_test, self.y_test,
np.expand_dims(np.arange(len(self.y_test)), axis=1))
if len(train_data[0]) < batch_size:
# if number of examples is less than batch size,
# we increase the number by replicating
multiple = int(2 * (np.math.ceil(batch_size / len(train_data[0]))))
train_data = (np.concatenate([train_data[0] for _ in range(multiple)],
axis=0),
np.concatenate([train_data[1] for _ in range(multiple)],
axis=0),
np.concatenate([train_data[2] for _ in range(multiple)],
axis=0))
train_set = data_util.ImageFromMemory(
data=train_data, input_shape=input_shape)
test_set = data_util.ImageFromMemory(
data=test_data, input_shape=input_shape)
aug_args = {'size': input_shape[0]}
augs, augs_for_prefix = [], []
for aug in augment:
aug, num_aug = aug
if num_aug == 0:
continue
if len(aug) == num_aug:
augs_for_prefix.append(aug)
elif len(aug) < num_aug:
assert len(aug) == 1, (
'cannot have multiple aug types if num_aug is larger than the '
'number of aug types')
augs_for_prefix.append(['{}{}'.format(num_aug, a) for a in aug])
aug *= num_aug
else:
augs_for_prefix.append(aug)
num_aug = len(aug)
augs.append(augment_lib.retrieve_augment(aug, **aug_args))
self.get_prefix(
fold=fold,
num_labeled_per_class=num_labeled_per_class,
augment=augs_for_prefix,
is_balanced=is_balanced)
train_loader = train_set.input_fn(
is_training=True,
batch_size=batch_size,
aug_list=augs[0][:-1],
dtype=tf.float32,
num_cores=num_workers,
strategy=strategy)
test_loader = test_set.input_fn(
is_training=False,
batch_size=100,
aug_list=augs[0][-1],
dtype=tf.float32,
num_cores=max(num_workers // 4, 1),
strategy=strategy)
# semi-supervised setting
if batch_size_unlab > 0:
unlab_data = (self.x_unlab, self.y_unlab,
np.expand_dims(np.arange(len(self.y_unlab)), axis=1))
unlab_set = data_util.ImageFromMemory(
data=unlab_data, input_shape=input_shape)
augs_unlab = []
for sublist in augs[1:]:
for item in sublist[:-1]:
augs_unlab.append(item)
unlab_loader = unlab_set.input_fn(
is_training=True,
batch_size=batch_size_unlab,
aug_list=augs_unlab,
dtype=tf.float32,
num_cores=num_workers,
strategy=strategy)
return [
tf.data.Dataset.zip((train_loader, unlab_loader)), None, test_loader
]
return [train_loader, None, test_loader]
class CIFAR100(CIFAR10):
"""CIFAR100 dataset."""
def load_raw_data(self):
"""Loads CIFAR100 raw data."""
self.data_name = 'cifar100'
(x_train, y_train) = data_util.load_tfrecord(
os.path.join(CIFAR_DIR, 'cifar100-train.tfrecord'))
(x_test, y_test) = data_util.load_tfrecord(
os.path.join(CIFAR_DIR, 'cifar100-test.tfrecord'))
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_unlab = x_train
self.y_unlab = y_train
self.num_class = 100
class CIFAR10LT(object):
"""CIFAR10 long-tail data loader."""
def __init__(self, class_im_ratio):
self.load_raw_data(class_im_ratio)
def load_raw_data(self, class_im_ratio):
"""Loads CIFAR10 long-tail raw data."""
self.data_name = 'cifar10lt@{}'.format(class_im_ratio)
dir_path = os.path.join(CIFAR_LT_DIR,
'cifar-10-data-im-{}'.format(class_im_ratio))
data_shape = self.load_raw_data_shape()
(x_train, y_train) = data_util.load_tfrecord(
os.path.join(dir_path, 'train.tfrecords'), is_raw=True, **data_shape)
(x_test, y_test) = data_util.load_tfrecord(
os.path.join(dir_path, 'eval.tfrecords'), is_raw=True, **data_shape)
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_unlab = x_train
self.y_unlab = y_train
self.x_unlab_test = x_train
self.y_unlab_test = y_train
self.class_im_ratio = class_im_ratio
self.num_class = 10
# The class distribution is long-tailed. For K classes, the most major class
# has N_1 samples, and the most minor class has N_K = N_1 * class_im_ratio
# samples. The k-th class has N_k = N_1 * class_im_ratio^((k - 1) / (K - 1))
# samples. Reference: DARP (https://arxiv.org/pdf/2007.08844.pdf)
e = tf.cast(tf.range(self.num_class), tf.float32) / (self.num_class - 1)
base = tf.ones_like(e) * self.class_im_ratio
self.gt_p_data = tf.pow(base, e)
self.gt_p_data /= tf.math.reduce_sum(self.gt_p_data)
print('ground truth classs discribution: {}'.format(self.gt_p_data))
def load_raw_data_shape(self):
return {'depth': 3, 'height': 32, 'width': 32}
def get_prefix(self,
fold=1,
augment=None,
percent_labeled_per_class=0.1,
update_mode='distribution',
alpha=3):
"""Gets prefix for file name."""
self.fname = '{}.{}.{}@{}'.format(
self.data_name, '.'.join(['_'.join(aug) for aug in augment]), fold,
percent_labeled_per_class)
if update_mode == 'distribution':
self.fname = '{}_{}_{}'.format(self.fname, update_mode, int(alpha))
elif update_mode == 'all':
self.fname = '{}_{}'.format(self.fname, update_mode)
else:
raise NotImplementedError
def get_split(self,
fold=1,
percent_labeled_per_class=0.1,
update_mode='distribution',
alpha=3,
pseudo_label_list=None):
"""Gets labeled and unlabeled data split."""
np.random.seed(fold)
class_id = {}
for i, y in enumerate(self.y_train):
if y not in class_id:
class_id[y] = []
class_id[y].append(i)
labeled_idx = []
unlabeled_idx = []
for c in sorted(class_id):
np.random.shuffle(class_id[c])
num_labeled_this_class = int(
np.ceil(len(class_id[c]) * percent_labeled_per_class))
print('class {} has {} images, {} labels'.format(c, len(class_id[c]),
num_labeled_this_class))
labeled_idx += class_id[c][:num_labeled_this_class]
unlabeled_idx += class_id[c][num_labeled_this_class:]
if pseudo_label_list:
x_picked = []
y_picked = []
if update_mode == 'distribution':
sample_rate = self.gt_p_data[::-1] / self.gt_p_data[0]
for c in range(self.num_class):
num_picked = int(
len(pseudo_label_list[c]) *
np.math.pow(sample_rate[c], 1 / alpha))
idx_picked = pseudo_label_list[c][:num_picked]
idx_picked = [unlabeled_idx[idx] for idx in idx_picked]
x_picked.append(self.x_train[idx_picked])
y_picked.append(np.ones_like(self.y_train[idx_picked]) * c)
print('class {} is added {} pseudo images'.format(c, len(idx_picked)))
elif update_mode == 'all':
for c in range(self.num_class):
num_picked = len(pseudo_label_list[c])
idx_picked = pseudo_label_list[c][:num_picked]
idx_picked = [unlabeled_idx[idx] for idx in idx_picked]
x_picked.append(self.x_train[idx_picked])
y_picked.append(np.ones_like(self.y_train[idx_picked]) * c)
print('class {} is added {} pseudo images'.format(c, len(idx_picked)))
else:
raise NotImplementedError
x_picked.append(self.x_train[labeled_idx])
y_picked.append(self.y_train[labeled_idx])
self.x_train = np.concatenate(x_picked, axis=0)
self.y_train = np.concatenate(y_picked, axis=0)
print('update training set with mode {}'.format(update_mode))
else:
self.x_train = self.x_train[labeled_idx]
self.y_train = self.y_train[labeled_idx]
print('not update')
print('{} train set images in total'.format(len(self.x_train)))
self.x_unlab_test = self.x_unlab_test[unlabeled_idx]
self.y_unlab_test = self.y_unlab_test[unlabeled_idx]
def load_dataset(self,
fold=1,
num_labeled_per_class=10,
input_shape=(32, 32, 3),
augment=None,
batch_size=64,
batch_size_unlab=0,
num_workers=4,
strategy=None,
**kwargs):
"""Loads dataset."""
percent_labeled_per_class = kwargs.get('percent_labeled_per_class', 0.1)
update_mode = kwargs.get('update_mode', 'all')
alpha = kwargs.get('alpha', 3)
pseudo_label_list = kwargs.get('pseudo_label_list', None)
# Generate labeled data.
if num_labeled_per_class > 0:
self.get_split(
fold=fold,
percent_labeled_per_class=percent_labeled_per_class,
update_mode=update_mode,
alpha=alpha,
pseudo_label_list=pseudo_label_list)
# Construct dataset.
train_data = (self.x_train, self.y_train,
np.expand_dims(np.arange(len(self.y_train)), axis=1))
test_data = (self.x_test, self.y_test,
np.expand_dims(np.arange(len(self.y_test)), axis=1))
unlab_test_data = (self.x_unlab_test, self.y_unlab_test,
np.expand_dims(
np.arange(len(self.y_unlab_test)), axis=1))
if len(train_data[0]) < batch_size:
# if number of examples is less than batch size,
# we increase the number by replicating.
multiple = int(2 * (np.math.ceil(batch_size / len(train_data[0]))))
train_data = (np.concatenate([train_data[0] for _ in range(multiple)],
axis=0),
np.concatenate([train_data[1] for _ in range(multiple)],
axis=0),
np.concatenate([train_data[2] for _ in range(multiple)],
axis=0))
train_set = data_util.ImageFromMemory(
data=train_data, input_shape=input_shape)
test_set = data_util.ImageFromMemory(
data=test_data, input_shape=input_shape)
unlab_test_set = data_util.ImageFromMemory(
data=unlab_test_data, input_shape=input_shape)
aug_args = {'size': input_shape[0]}
augs, augs_for_prefix = [], []
for aug in augment:
aug, num_aug = aug
if num_aug == 0:
continue
if len(aug) == num_aug:
augs_for_prefix.append(aug)
elif len(aug) < num_aug:
assert len(aug) == 1, (
'cannot have multiple aug types if num_aug is larger than the '
'number of aug types')
augs_for_prefix.append(['{}{}'.format(num_aug, a) for a in aug])
aug *= num_aug
else:
augs_for_prefix.append(aug)
num_aug = len(aug)
augs.append(augment_lib.retrieve_augment(aug, **aug_args))
self.get_prefix(
fold=fold,
augment=augs_for_prefix,
percent_labeled_per_class=percent_labeled_per_class,
update_mode=update_mode,
alpha=alpha)
train_loader = train_set.input_fn(
is_training=True,
batch_size=batch_size,
aug_list=augs[0][:-1],
dtype=tf.float32,
num_cores=num_workers,
strategy=strategy)
test_loader = test_set.input_fn(
is_training=False,
batch_size=100,
aug_list=augs[0][-1],
dtype=tf.float32,
num_cores=max(num_workers // 4, 1),
strategy=strategy)
unlab_test_loader = unlab_test_set.input_fn(
is_training=False,
batch_size=100,
aug_list=augs[0][-1],
dtype=tf.float32,
num_cores=max(num_workers // 4, 1),
strategy=strategy)
# Semi-supervised setting.
if batch_size_unlab > 0:
unlab_data = (self.x_unlab, self.y_unlab,
np.expand_dims(np.arange(len(self.y_unlab)), axis=1))
unlab_set = data_util.ImageFromMemory(
data=unlab_data, input_shape=input_shape)
augs_unlab = []
for sublist in augs[1:]:
for item in sublist[:-1]:
augs_unlab.append(item)
unlab_loader = unlab_set.input_fn(
is_training=True,
batch_size=batch_size_unlab,
aug_list=augs_unlab,
dtype=tf.float32,
num_cores=num_workers,
strategy=strategy)
return [
tf.data.Dataset.zip((train_loader, unlab_loader)), unlab_test_loader,
test_loader
]
return [train_loader, unlab_test_loader, test_loader]
class CIFAR100LT(CIFAR10LT):
"""CIFAR100 long-tail data loader."""
def load_raw_data(self, class_im_ratio):
"""Loads CIFAR100 long-tail raw data."""
self.data_name = 'cifar100lt@{}'.format(class_im_ratio)
dir_path = os.path.join(CIFAR_LT_DIR,
'cifar-100-data-im-{}'.format(class_im_ratio))
data_shape = {'depth': 3, 'height': 32, 'width': 32}
(x_train, y_train) = data_util.load_tfrecord(
os.path.join(dir_path, 'train.tfrecords'), is_raw=True, **data_shape)
(x_test, y_test) = data_util.load_tfrecord(
os.path.join(dir_path, 'eval.tfrecords'), is_raw=True, **data_shape)
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_unlab = x_train
self.y_unlab = y_train
self.x_unlab_test = x_train
self.y_unlab_test = y_train
self.num_class = 100
self.class_im_ratio = class_im_ratio
e = tf.cast(tf.range(self.num_class), tf.float32) / (self.num_class - 1)
base = tf.ones_like(e) * self.class_im_ratio
self.gt_p_data = tf.pow(base, e)
self.gt_p_data /= tf.math.reduce_sum(self.gt_p_data)
print('ground truth classs discribution: {}'.format(self.gt_p_data))
class CIFAR10LTDARP(CIFAR10LT):
"""CIFAR10 long-tail data loader following DARP setting.
Jaehyung Kim, Youngbum Hur, Sejun Park, Eunho Yang,
Sung Ju Hwang, and Jinwoo Shin.
Distribution Aligning Refinery of Pseudo-label for
Imbalanced Semi-supervised Learning. (https://arxiv.org/abs/2007.08844)
"""
def load_raw_data(self, class_im_ratio):
self.data_name = 'cifar10ltdarp{}'.format(class_im_ratio)
data_shape = {'depth': 3, 'height': 32, 'width': 32}
(x_train, y_train) = data_util.load_tfrecord(
os.path.join(
CIFAR_DARP_DIR,
'cifar10-{}-train-labeled.tfrecords'.format(int(class_im_ratio))),
is_raw=True,
**data_shape)
(x_unlab_test, y_unlab_test) = data_util.load_tfrecord(
os.path.join(
CIFAR_DARP_DIR,
'cifar10-{}-train-unlabeled.tfrecords'.format(int(class_im_ratio))),
is_raw=True,
**data_shape)
(x_test, y_test) = data_util.load_tfrecord(
os.path.join(CIFAR_DARP_DIR, 'cifar10-test.tfrecords'),
is_raw=True,
**data_shape)
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_unlab_test = x_unlab_test
self.y_unlab_test = y_unlab_test
self.x_unlab = np.concatenate([x_train, x_unlab_test], axis=0)
self.y_unlab = np.concatenate([y_train, y_unlab_test], axis=0)
self.num_class = 10
self.class_im_ratio = 1 / class_im_ratio
pow_value = tf.cast(tf.range(self.num_class), tf.float32) / (
self.num_class - 1)
base = tf.ones_like(pow_value) * self.class_im_ratio
self.gt_p_data = tf.pow(base, pow_value)
self.gt_p_data /= tf.math.reduce_sum(self.gt_p_data)
print('ground truth classs discribution: {}'.format(self.gt_p_data))
def get_split(self,
fold=1,
percent_labeled_per_class=0.1,
update_mode='distribution',
alpha=3,
pseudo_label_list=None):
if pseudo_label_list is not None:
x_picked = []
y_picked = []
if update_mode == 'distribution':
mu = np.math.pow(self.class_im_ratio, 1 / 9)
for c in range(self.num_class):
num_picked = int(
len(pseudo_label_list[c]) *
np.math.pow(np.math.pow(mu, 9 - c), 1 / alpha))
idx_picked = pseudo_label_list[c][:num_picked]
x_picked.append(self.x_unlab_test[idx_picked])
y_picked.append(np.ones_like(self.y_unlab_test[idx_picked]) * c)
print('class {} is added {} pseudo labels'.format(c, num_picked))
x_picked.append(self.x_train)
y_picked.append(self.y_train)
self.x_train = np.concatenate(x_picked, axis=0)
self.y_train = np.concatenate(y_picked, axis=0)
else:
raise NotImplementedError
else:
print('not update')
print('{} train set images in total'.format(len(self.x_train)))
|
|
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Scheduler specific Functionality for the
stats framework
"""
import numpy as np
from trappy.stats.Trigger import Trigger
WINDOW_SIZE = 0.0001
# Trigger Values
SCHED_SWITCH_IN = 1
SCHED_SWITCH_OUT = -1
NO_EVENT = 0
# Field Names
CPU_FIELD = "__cpu"
NEXT_PID_FIELD = "next_pid"
PREV_PID_FIELD = "prev_pid"
TASK_RUNNING = 1
TASK_NOT_RUNNING = 0
TIME_INVAL = -1
SERIES_SANTIZED = "_sched_sanitized"
def sanitize_asymmetry(series, window=None):
"""Sanitize the cases when a SWITCH_OUT
happens before a SWITCH in. (The case when
a process is already running before the started
"""
if not hasattr(series, SERIES_SANTIZED):
events = series[series != 0]
if len(series) >= 2 and len(events):
if series.values[0] == SCHED_SWITCH_OUT:
series.values[0] = TASK_NOT_RUNNING
elif events.values[0] == SCHED_SWITCH_OUT:
series.values[0] = SCHED_SWITCH_IN
if window:
series.index.values[0] = window[0]
if series.values[-1] == SCHED_SWITCH_IN:
series.values[-1] = TASK_NOT_RUNNING
elif events.values[-1] == SCHED_SWITCH_IN:
series.values[-1] = SCHED_SWITCH_OUT
if window:
series.index.values[-1] = window[1]
# No point if the series just has one value and
# one event. We do not have sufficient data points
# for any calculation. We should Ideally never reach
# here.
elif len(series) == 1:
series.values[0] = 0
setattr(series, SERIES_SANTIZED, True)
return series
def csum(series, window=None, filter_gaps=False):
"""The following actions are done on the
input series if filter_gaps is set as True
* A sched_out of duration < WindowSize
is filtered out
* The resultant series is summed cumulatively
and returned
Args:
series (pandas.Series)
window (tuple)
filter_gaps (boolean)
Returns:
pandas.Series
"""
if filter_gaps:
series = filter_small_gaps(series)
series = series.cumsum()
return select_window(series, window)
def filter_small_gaps(series):
start = None
for index, value in series.iteritems():
if value == SCHED_SWITCH_IN:
if start == None:
continue
if index - start < WINDOW_SIZE:
series[start] = NO_EVENT
series[index] = NO_EVENT
start = None
if value == SCHED_SWITCH_OUT:
start = index
return series
def first_cpu(series, window=None):
"""This aggreator returns the time of
the first switch in event in the series
This is returned as a vector of unit length
so that it can be aggregated and reduced across
nodes to find the first cpu of a task
"""
series = select_window(series, window)
series = series[series == SCHED_SWITCH_IN]
if len(series):
return [series.index.values[0]]
else:
return [float("inf")]
def select_window(series, window):
"""Library Function to select a portion of
pandas time series
"""
if not window:
return series
start, stop = window
ix = series.index
selector = ((ix >= start) & (ix <= stop))
window_series = series[selector]
return window_series
def residency_sum(series, window=None):
"""The input series is processed for
intervals between a 1 and -1 in order
to track additive residency of a task
Args:
series (pandas.Series)
window (start, stop): A start stop
tuple to process only a section of the
series
Returns:
float (scalar)
"""
if not len(series):
return 0.0
org_series = series
series = select_window(series, window)
series = sanitize_asymmetry(series, window)
s_in = series[series == SCHED_SWITCH_IN]
s_out = series[series == SCHED_SWITCH_OUT]
if not (len(s_in) and len(s_out)):
try:
org_series = sanitize_asymmetry(org_series)
running = select_window(org_series.cumsum(), window)
if running.values[0] == TASK_RUNNING and running.values[-1] == TASK_RUNNING:
return window[1] - window[0]
except Exception,e:
pass
if len(s_in) != len(s_out):
raise RuntimeError(
"Unexpected Lengths: s_in={}, s_out={}".format(
len(s_in),
len(s_out)))
else:
return np.sum(s_out.index.values - s_in.index.values)
def first_time(series, value, window=None):
"""Return the first index where the
series == value
if no such index is found
+inf is returned
"""
series = select_window(series, window)
series = series[series == value]
if not len(series):
return [float("inf")]
return [series.index.values[0]]
def period(series, align="start", window=None):
"""Return a tuple of the average
duration between SWITCH_IN (align=start)
and SWITCH_OUT (align=end) and the number
of events
"""
series = select_window(series, window)
series = sanitize_asymmetry(series, window)
if align == "start":
series = series[series == SCHED_SWITCH_IN]
elif align == "end":
series = series[series == SCHED_SWITCH_OUT]
if len(series) % 2 == 0:
series = series[:1]
if not len(series):
return [(0,0)]
deltas = np.diff(series.index.values)
return [(np.sum(deltas), len(deltas))]
def last_time(series, value, window=None):
"""Return the first index where the
series == value
if no such index is found
TIME_INVAL is returned
"""
series = select_window(series, window)
series = series[series == value]
if not len(series):
return [TIME_INVAL]
return [series.index.values[-1]]
def binary_correlate(series_x, series_y):
"""Function to Correlate binary Data"""
if len(series_x) != len(series_y):
raise ValueError("Cannot compute binary correlation for \
unequal vectors")
agree = len(series_x[series_x == series_y])
disagree = len(series_x[series_x != series_y])
return (agree - disagree) / float(len(series_x))
def get_pids_for_process(run, execname, cls=None):
"""Returns the pids for a given process
Args:
run (trappy.Run): A trappy.Run object with a sched_switch
event
execname (str): The name of the process
cls (trappy.Base): The SchedSwitch event class
Returns:
The list of pids (unique) for the execname
"""
if not cls:
try:
df = run.sched_switch.data_frame
except AttributeError:
raise ValueError("SchedSwitch event not found in run")
else:
event = getattr(run, cls.name)
df = event.data_frame
mask = df["next_comm"].apply(lambda x : True if x.startswith(execname) else False)
return list(np.unique(df[mask]["next_pid"].values))
def get_task_name(run, pid, cls=None):
"""Returns the execname for pid
Args:
run (trappy.Run): A trappy.Run object with a sched_switch
event
pid (str): The name of the process
cls (trappy.Base): The SchedSwitch event class
Returns:
The execname for the PID
"""
if not cls:
try:
df = run.sched_switch.data_frame
except AttributeError:
raise ValueError("SchedSwitch event not found in run")
else:
event = getattr(run, cls.name)
df = event.data_frame
df = df[df["__pid"] == pid]
if not len(df):
return ""
else:
return df["__comm"].values[0]
def sched_triggers(run, pid, sched_switch_class):
"""Returns the list of sched_switch triggers
Args:
run (trappy.Run): A run object with SchedSwitch event
pid (int): pid of the associated task
sched_switch_class (trappy.Base): The SchedSwitch class
Returns:
Lits of triggers
[0] = switch_in_trigger
[1] = switch_out_trigger
"""
if not hasattr(run, "sched_switch"):
raise ValueError("SchedSwitch event not found in run")
triggers = []
triggers.append(sched_switch_in_trigger(run, pid, sched_switch_class))
triggers.append(sched_switch_out_trigger(run, pid, sched_switch_class))
return triggers
def sched_switch_in_trigger(run, pid, sched_switch_class):
"""
Args:
run (trappy.Run): A run object with SchedSwitch event
pid (int): pid of the associated task
sched_switch_class (trappy.Base): The SchedSwitch class
Returns:
Trigger on the SchedSwitch: IN
"""
task_in = {}
task_in[NEXT_PID_FIELD] = pid
return Trigger(run,
sched_switch_class, # trappy Event Class
task_in, # Filter Dictionary
SCHED_SWITCH_IN, # Trigger Value
CPU_FIELD) # Primary Pivot
def sched_switch_out_trigger(run, pid, sched_switch_class):
"""
Args:
run (trappy.Run): A run object with SchedSwitch event
pid (int): pid of the associated task
sched_switch_class (trappy.Base): The SchedSwitch class
Returns:
Trigger on the SchedSwitch: OUT
"""
task_out = {}
task_out[PREV_PID_FIELD] = pid
return Trigger(run,
sched_switch_class, # trappy Event Class
task_out, # Filter Dictionary
SCHED_SWITCH_OUT, # Trigger Value
CPU_FIELD) # Primary Pivot
def trace_event(series, window=None):
"""This aggregator returns a list of events
of the type:
{
"start" : <start_time>,
"end" " <end_time>
}
"""
rects = []
series = select_window(series, window)
series = sanitize_asymmetry(series, window)
s_in = series[series == SCHED_SWITCH_IN]
s_out = series[series == SCHED_SWITCH_OUT]
if not len(s_in):
return rects
if len(s_in) != len(s_out):
raise RuntimeError(
"Unexpected Lengths: s_in={}, s_out={}".format(
len(s_in),
len(s_out)))
return np.column_stack((s_in.index.values, s_out.index.values))
|
|
import json
import pytest
from django.test.client import Client as DjangoClient
from oauthost.models import AuthorizationCode, Token, Client, RedirectionEndpoint, Scope
from oauthost.toolbox import register_client
from oauthost.exceptions import OauthostException
URL_TOKEN = '/token/'
URL_AUTHORIZE = '/auth/'
class OAuthostClient(DjangoClient):
def post(self, path, data=None, **extra):
response = super(OAuthostClient, self).post(path, data=data, **extra)
if path == URL_TOKEN:
response.content_json = json.loads(response.content.decode('utf-8'))
return response
def parse_location_header(response, use_uri_fragment=False):
delimiter = '?'
if use_uri_fragment:
delimiter = '#'
query = response['Location'].split(delimiter)[1]
query = query.split('&')
parsed = {}
for part in query:
key, value = part.split('=')
parsed[key] = value
return parsed
@pytest.fixture
def client():
return OAuthostClient()
class TestToolbox:
def test_register_client(self, user):
scope1 = Scope(identifier='scope1')
scope1.save()
cl = register_client('client1_title', 'client1', 'http://client1url.com/client1/', user)
assert cl.identifier == 'client1'
assert cl.title == 'client1_title'
assert cl.user == user
uris = cl.redirection_uris.all()
assert len(uris) == 1
assert uris[0].uri == 'http://client1url.com/client1/'
with pytest.raises(OauthostException):
register_client(
'client2_title', 'client2', 'http://client2url.com/client2/', user,
scopes_list=[scope1, 'scope2'],
register_unknown_scopes=False)
cl = register_client(
'client2_title', 'client2', 'http://client2url.com/client2/', user,
scopes_list=[scope1, 'scope2'], token_lifetime=300, public=False,
client_params={'description': 'client2_decr'})
assert cl.identifier == 'client2'
assert cl.title == 'client2_title'
assert cl.token_lifetime == 300
assert cl.user == user
assert cl.description == 'client2_decr'
assert cl.type != Client.TYPE_PUBLIC
assert len(cl.scopes.all()) == 2
uris = cl.redirection_uris.all()
assert len(uris) == 1
assert uris[0].uri == 'http://client2url.com/client2/'
class TestEndpointToken:
def test_grant_authorization_code(self, settings, client, user):
# Secure connection check
with settings(DEBUG=False):
resp = client.get(URL_TOKEN, {})
assert resp.status_code == 403
settings.DEBUG = True
resp = client.post(URL_TOKEN, {'grant_type': 'a'})
assert resp.status_code == 400
assert resp.content_json['error'] == 'unsupported_grant_type'
client_1 = Client(user=user, title='OClient')
client_1.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
code_1 = AuthorizationCode(user=user, client=client_1, uri=redirect_1.uri)
code_1.save()
Scope(identifier='scope1').save()
# Missing client authentication data.
resp = client.post(URL_TOKEN, {'grant_type': 'authorization_code', 'scope': 'scope1'})
assert resp.status_code == 401
assert resp.content_json['error'] == 'invalid_client'
# Missing all required params.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1',
'client_id': client_1.identifier, 'client_secret': client_1.password})
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_request'
# Missing redirect URI.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': 'wrong_code',
'client_id': client_1.identifier, 'client_secret': client_1.password})
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_request'
# Missing code.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1',
'redirect_uri': 'http://wrong-url.com', 'client_id': client_1.identifier,
'client_secret': client_1.password})
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_request'
# Wrong code.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': 'invalid',
'redirect_uri': 'http://localhost:8000/abc/',
'client_id': client_1.identifier, 'client_secret': client_1.password})
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_grant'
# Wrong URI.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': code_1.code,
'redirect_uri': 'http://wrong-url.com/', 'client_id': client_1.identifier,
'client_secret': client_1.password})
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_grant'
# Valid call for a token.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': code_1.code,
'redirect_uri': redirect_1.uri, 'client_id': client_1.identifier, 'client_secret': client_1.password})
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' in resp.content_json
assert 'token_type' in resp.content_json
# An additional call for code issues token and code invalidation.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': '1234567',
'redirect_uri': 'http://localhost:8000/abc/',
'client_id': client_1.identifier, 'client_secret': client_1.password})
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_grant'
class TestEndpointAuthorize:
def test_auth(self, settings, client, user):
def login():
return client.login(username=user.username, password='password')
# User is not logged in, redirect to login page
resp = client.get(URL_AUTHORIZE, {'client_id': '100'})
assert resp.status_code == 302
# Logging the user in.
assert login()
# Secure connection check
resp = client.get(URL_AUTHORIZE, {})
assert resp.status_code == 403
settings.DEBUG = True
client_1 = Client(user=user, title='OClient', identifier='cl012345')
client_1.save()
client_2 = Client(user=user, title='OGOClient')
client_2.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
redirect_2 = RedirectionEndpoint(client=client_2, uri='http://redirect-test1.com')
redirect_2.save()
redirect_3 = RedirectionEndpoint(client=client_2, uri='http://redirect-test2.com')
redirect_3.save()
Scope(identifier='scope1').save()
# Missing client id.
login()
resp = client.get(URL_AUTHORIZE, {'response_type': 'code', 'scope': 'scope1'})
assert resp.status_code == 400
# Invalid client id.
login()
resp = client.get(URL_AUTHORIZE, {'response_type': 'code', 'scope': 'scope1', 'client_id': 'invalid'})
assert resp.status_code == 400
# Client 2 - No redirect URI in request.
login()
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'code', 'scope': 'scope1', 'client_id': client_2.identifier})
assert resp.status_code == 400
# Client 2 - Unknown URI in request.
login()
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'code', 'scope': 'scope1',
'redirect_uri': 'http://noitisnot.com', 'client_id': client_2.identifier})
assert resp.status_code == 400
# Missing response type.
login()
resp = client.get(URL_AUTHORIZE, {'client_id': client_1.identifier, 'state': 'abc', 'scope': 'scope1'})
assert resp.status_code == 302
assert parse_location_header(resp)['error'] == 'unsupported_response_type'
assert parse_location_header(resp)['state'] == 'abc'
# Wrong response type
login()
resp = client.get(
URL_AUTHORIZE,
{'client_id': client_1.identifier, 'response_type': 'habrahabr', 'state': 'abc', 'scope': 'scope1'})
assert resp.status_code == 302
assert parse_location_header(resp)['error'] == 'unsupported_response_type'
assert parse_location_header(resp)['state'] == 'abc'
# Valid code request.
login()
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'code', 'scope': 'scope1', 'state': 'somestate', 'client_id': client_1.identifier})
assert resp.status_code == 200
# User declines auth.
resp = client.post(URL_AUTHORIZE, {'auth_decision': 'is_made'})
assert resp.status_code == 302
assert parse_location_header(resp)['error'] == 'access_denied'
# Again Valid code request.
login()
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'code', 'scope': 'scope1', 'state': 'somestatetwo', 'client_id': client_1.identifier})
assert resp.status_code == 200
# User confirms auth.
resp = client.post(URL_AUTHORIZE, {'auth_decision': 'is_made', 'confirmed': 'yes'})
assert resp.status_code == 302
assert 'code' in parse_location_header(resp)
assert parse_location_header(resp)['state'] == 'somestatetwo'
# ============= Implicit grant tests.
# Valid token request.
login()
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'token', 'scope': 'scope1',
'state': 'some_state_three', 'client_id': client_1.identifier})
assert resp.status_code == 200
# User confirms token grant.
resp = client.post(URL_AUTHORIZE, {'auth_decision': 'is_made', 'confirmed': 'yes'})
assert resp.status_code == 302
params = parse_location_header(resp, True)
assert 'access_token' in params
assert 'token_type' in params
assert params['state'] == 'some_state_three'
class TestGrants:
def test_authorization_code_unsafe(self, settings, client, user):
def login():
return client.login(username=user.username, password='password')
settings.DEBUG = True # Bypass https requirement
client_1 = Client(user=user, title='OClient')
client_1.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
Scope(identifier='scope1').save()
# Logging the user in.
assert login()
# Valid code request.
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'code', 'scope': 'scope1', 'client_id': client_1.identifier})
assert resp.status_code == 200
# User confirms auth.
resp = client.post(URL_AUTHORIZE, {'auth_decision': 'is_made', 'confirmed': 'yes'})
assert resp.status_code == 302
params = parse_location_header(resp)
assert 'code' in params
# Auth code given.
code = params['code']
# Valid token by code request.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': code,
'redirect_uri': redirect_1.uri, 'client_id': client_1.identifier, 'client_secret': client_1.password})
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' in resp.content_json
assert 'token_type' in resp.content_json
def test_authorization_code_http_basic(self, settings, client, user):
def login():
return client.login(username=user.username, password='password')
settings.DEBUG = True # Bypass https requirement
client_1 = Client(user=user, title='OClient', identifier='OClient', password='cl012345')
client_1.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
# Logging the user in.
login()
Scope(identifier='scope1').save()
# Valid code request.
resp = client.get(
URL_AUTHORIZE,
{'response_type': 'code', 'scope': 'scope1', 'client_id': client_1.identifier})
assert resp.status_code == 200
# User confirms auth.
resp = client.post(URL_AUTHORIZE, {'auth_decision': 'is_made', 'confirmed': 'yes'})
assert resp.status_code == 302
params = parse_location_header(resp)
assert 'code' in params
# Auth code given.
code = params['code']
# Invalid token by code request.
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'code': code, 'redirect_uri': redirect_1.uri},
Authorization='Basic Tqrqwer==')
assert resp.status_code == 401
headers = getattr(resp, 'headers', None)
if headers is None:
# pre Django 3.2
header_auth = resp._headers['www-authenticate'][1]
else:
header_auth = headers['www-authenticate']
assert header_auth == 'Basic'
# Valid token by code request.
# HTTP Basic data - OClient:cl012345 --> T0NsaWVudDpjbDAxMjM0NQ==
resp = client.post(
URL_TOKEN,
{'grant_type': 'authorization_code', 'scope': 'scope1', 'code': code, 'redirect_uri': redirect_1.uri},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' in resp.content_json
assert 'token_type' in resp.content_json
def test_scope(self, settings, client, user):
settings.DEBUG = True
username = user.username
password = 'password'
client_1 = Client(user=user, title='OClient1', identifier='OClient', password='cl012345')
client_1.save()
# Scope is missing.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'username': username, 'password': password},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_scope'
# No scope supported by server.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'username': username, 'password': password, 'scope': 'my scope'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_scope'
scope1 = Scope(identifier='scope1')
scope1.save()
scope2 = Scope(identifier='scope2')
scope2.save()
scope3 = Scope(identifier='scope3', status=Scope.STATUS_DISABLED)
scope3.save()
client_2 = Client(user=user, title='OClien2', identifier='OClient2', password='cl012345')
client_2.save()
client_2.scopes.add(scope2)
# Unsupported (or disabled) client scope request.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'username': username, 'password': password, 'scope': 'scope1 scope2'},
Authorization='Basic T0NsaWVudDI6Y2wwMTIzNDU=')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_scope'
# Unsupported (or disabled) server scope request.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'username': username, 'password': password, 'scope': 'scope1 scope3'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_scope'
# Unsupported scope request.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'username': username, 'password': password, 'scope': 'scope1'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' in resp.content_json
assert 'token_type' in resp.content_json
assert resp.content_json['scope'] == 'scope1'
def test_token_by_user_credentials(self, settings, client, user):
settings.DEBUG = True
client_1 = Client(user=user, title='OClient', identifier='OClient', password='cl012345', token_lifetime=15)
client_1.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
Scope(identifier='scope1').save()
# Missing params.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'scope': 'scope1'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_request'
# Invalid params.
resp = client.post(
URL_TOKEN,
{'grant_type': 'password', 'scope': 'scope1', 'username': 'FalseUser', 'password': 'FalsePassword'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_grant'
# Valid token by password request.
resp = client.post(
URL_TOKEN, {'grant_type': 'password', 'scope': 'scope1', 'username': user.username, 'password': 'password'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' in resp.content_json
assert 'token_type' in resp.content_json
assert 'expires_in' in resp.content_json
def test_token_by_client_credentials(self, settings, client, user):
settings.DEBUG = True
client_1 = Client(user=user, title='OClient', identifier='OClient', password='cl012345')
client_1.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
Scope(identifier='scope1').save()
# Valid token by client credentials request.
resp = client.post(
URL_TOKEN, {'grant_type': 'client_credentials', 'scope': 'scope1'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' not in resp.content_json
assert 'token_type' in resp.content_json
access_token = resp.content_json['access_token']
token = Token.objects.get(access_token=access_token)
assert user == token.user
def test_refresh_token_http_basic(self, settings, client, user):
settings.DEBUG = True
client_1 = Client(user=user, title='OClient', identifier='OClient', password='cl012345')
client_1.save()
client_2 = Client(user=user, title='OGOClient', identifier='OGOClient', password='cl543210')
client_2.save()
redirect_1 = RedirectionEndpoint(client=client_1, uri='http://redirect-test.com')
redirect_1.save()
token_1 = Token(client=client_1, user=user)
token_1.save()
token_2 = Token(client=client_2, user=user)
token_2.save()
date_issued = token_1.date_issued
access_token = token_1.access_token
refresh_token = token_1.refresh_token
refresh_token_wrong_client = token_2.refresh_token
# Missing required params.
resp = client.post(
URL_TOKEN, {'grant_type': 'refresh_token'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_request'
# Invalid refresh token supplied.
resp = client.post(
URL_TOKEN, {'grant_type': 'refresh_token', 'refresh_token': 'invalid'},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_grant'
# Refresh token from another client is supplied.
resp = client.post(
URL_TOKEN, {'grant_type': 'refresh_token', 'refresh_token': refresh_token_wrong_client},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 400
assert resp.content_json['error'] == 'invalid_grant'
# Valid request.
resp = client.post(
URL_TOKEN, {'grant_type': 'refresh_token', 'refresh_token': refresh_token},
Authorization='Basic T0NsaWVudDpjbDAxMjM0NQ==')
assert resp.status_code == 200
assert 'access_token' in resp.content_json
assert 'refresh_token' in resp.content_json
assert 'token_type' in resp.content_json
assert 'expires_in' not in resp.content_json
assert access_token != resp.content_json['access_token']
assert refresh_token != resp.content_json['refresh_token']
token_updated = Token.objects.get(access_token=resp.content_json['access_token'])
assert date_issued != token_updated.date_issued
|
|
# Copyright (c) 2020, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import shutil
import tempfile
import logging
import pkg_resources
import atexit
from distutils.dir_util import copy_tree, remove_tree, mkpath
from distutils.file_util import copy_file
from distutils.command.build import build as dist_build
from setuptools.command.build_py import build_py as _build_py
__author__ = "Giuseppe Natale"
__copyright__ = "Copyright 2020, Xilinx"
__email__ = "pynq_support@xilinx.com"
_function_text = """
import json
def _default_repr(obj):
return repr(obj)
def _resolve_global(name):
g = globals()
return g[name] if name in g else None
"""
class _ExtensionsManager:
"""Utility class to manage a list of available extensions registered for
discovery.
Parameters
----------
package_name: str
Name of the package to inspect for extensions
"""
def __init__(self, package_name):
self.package_name = package_name
self.list = [ext for ext in
pkg_resources.iter_entry_points(self.package_name)]
atexit.register(pkg_resources.cleanup_resources, force=True)
@staticmethod
def extension_path(extension_name):
"""Return the source path of the given extension name."""
# Define monkey patch for `pkg_resources.NullProvider.__init__` to use
# `module.__path__` instead of `module.__file__`, as the latter does
# not exist for namespace packages.
# Workaround for https://github.com/pypa/setuptools/issues/1407
def init(self, module):
self.loader = getattr(module, "__loader__", None)
module_path = [p for p in getattr(module, "__path__", "")][0]
self.module_path = module_path
# Temporarily apply monkey patch to
# `pkg_resources.NullProvider.__init__`
init_backup = pkg_resources.NullProvider.__init__
pkg_resources.NullProvider.__init__ = init
src_path = pkg_resources.resource_filename(extension_name, "")
# Restore original `pkg_resources.NullProvider.__init__`
pkg_resources.NullProvider.__init__ = init_backup
return src_path
@property
def printable(self):
"""Return a list of extension names and related parent packages
for printing.
"""
return ["{} (source: {})".format(e.name, e.module_name.split(".")[0])
for e in self.list]
@property
def paths(self):
"""Return a list of paths from the discovered extensions.
"""
return [self.extension_path(e.module_name) for e in self.list]
class _PynqLoggingFormatter(logging.Formatter):
FORMATS = {
logging.ERROR: "ERROR: %(msg)s",
logging.WARNING: "WARNING: %(msg)s",
logging.DEBUG: "DEBUG: %(module)s: %(lineno)d: %(msg)s",
"DEFAULT": "%(msg)s",
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno, self.FORMATS["DEFAULT"])
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def get_logger(level=logging.INFO, force_lvl=False):
"""Returns an instance of the pynq.utils logger.
Parameters
----------
level: str or int
String or integer constant representing the logging level following
Python's logging standard levels. By default, the level is not
updated if the current level is higher, unless `force_lvl` is set
to `True`.
force_lvl: bool
If `True`, sets the logging level to `level` in any case.
"""
levels = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG
}
logger = logging.getLogger(__name__)
if not logger.handlers:
ch = logging.StreamHandler()
ch.setFormatter(_PynqLoggingFormatter())
logger.addHandler(ch)
logger_lvl = logger.getEffectiveLevel()
if type(level) is str:
level = levels[level.lower()]
if level > logger_lvl or force_lvl:
logger.setLevel(level)
return logger
def _detect_devices(active_only=False):
"""Return a list containing all the detected devices names."""
from pynq.pl_server import Device
devices = Device.devices
if not devices:
raise RuntimeError("No device found in the system")
if active_only:
return Device.active_device.name
return [d.name for d in devices]
class DownloadedFileChecksumError(Exception):
"""This exception is raised when a downloaded file has an incorrect
checksum."""
pass
def _download_file(download_link, path, md5sum=None):
"""Download a file from the web.
Parameters
----------
download_link: str
The download link to use
path: str
The path where to save the file. The path must include the target
file
md5sum: str or None
If specified, it is used after download to check for correctness.
Raises a `DownloadedFileChecksumError` exception when the checksum
is incorrect, and deletes the downloaded file.
"""
import urllib.request
import hashlib
with urllib.request.urlopen(download_link) as response, \
open(path, "wb") as out_file:
data = response.read()
out_file.write(data)
if md5sum:
file_md5sum = hashlib.md5()
with open(path, "rb") as out_file:
for chunk in iter(lambda: out_file.read(4096), b""):
file_md5sum.update(chunk)
if md5sum != file_md5sum.hexdigest():
os.remove(path)
raise DownloadedFileChecksumError("Incorrect checksum for file "
"'{}'. The file has been "
"deleted as a result".format(
path))
def _find_local_overlay_res(device_name, overlay_res_filename, src_path):
"""Inspects ``overlay_res.ext.d` directory for an available
``overlay_res.ext`` file for ``device_name``.
Returns ``None`` if ``device_name`` is not found.
If a ``overlay_res.ext`` file is also found, always return that one
without doing any resolution based on ``device_name``.
Parameters
----------
device_name: str
The target device name
overlay_res_filename: str
The target filename to resolve
src_path: str
The path where to perform this search
"""
overlay_res_path = os.path.join(src_path, overlay_res_filename)
if os.path.isfile(overlay_res_path):
return overlay_res_path
overlay_res_filename_split = os.path.splitext(overlay_res_filename)
overlay_res_filename_ext = "{}.{}{}".format(overlay_res_filename_split[0],
device_name,
overlay_res_filename_split[1])
overlay_res_path = os.path.join(src_path, overlay_res_filename + ".d",
overlay_res_filename_ext)
if os.path.isfile(overlay_res_path):
return overlay_res_path
return None
def _find_remote_overlay_res(device_name, links_json_path):
"""Get download link for ``overlay_res.ext`` file and related checksum from
``overlay_res.ext.link`` json file, based on ``device_name``.
The ``.link`` file is generally a dict of device names and associated url
and md5sum.
.. code-block:: python3
{
"device_1": {
"url": "https://link.to/overlay.xclbin",
"md5sum": "da1e100gh8e7becb810976e37875de38"
}.
"device_2": {
"url": "https://link.to/overlay.xclbin",
"md5sum": "da1e100gh8e7becb810976e37875de38"
}
}
Expected return content from the ``.link`` json file is a dict with two
entries:
.. code-block:: python3
{
"url": "https://link.to/overlay.xclbin",
"md5sum": "da1e100gh8e7becb810976e37875de38"
}
Returns `None` if ``device_name`` is not found.
If the ``.link`` file contains a *url* and *md5sum* entries at the top
level, these are returned and no device-based resolution is performed.
Parameters
----------
device_name: str
The target device name
links_json_path: str
The full path to the ``.link`` json file
"""
with open(links_json_path) as f:
links = json.load(f)
if "url" in links and "md5sum" in links:
return {"url": links["url"], "md5sum": links["md5sum"]}
if device_name in links:
return links[device_name]
return None
class OverlayNotFoundError(Exception):
"""This exception is raised when an overlay for the target device could not
be located."""
pass
def _resolve_overlay_res_from_folder(device_name, overlay_res_folder, src_path,
dst_path, rel_path, files_to_copy):
"""Resolve ``overlay_res.ext`` file from ``overlay_res.ext.d`` folder,
based on ``device_name``. Updates ``files_to_copy`` with the resolved file
to use. If a ``overlay_res.ext.link`` file is found, resolution is skipped
here. This is to avoid inspecting the ``overlay_res.ext.d`` folder twice.
See ``_resolve_overlay_res_from_link()``.
"""
overlay_res_filename = os.path.splitext(overlay_res_folder)[0]
# Avoid checking a .d folder twice when also a
# related .link file is found
if not os.path.isfile(os.path.join(src_path,
overlay_res_filename + ".link")):
overlay_res_src_path = _find_local_overlay_res(device_name,
overlay_res_filename,
src_path)
if overlay_res_src_path:
overlay_res_dst_path = os.path.join(dst_path, rel_path,
overlay_res_filename)
files_to_copy[overlay_res_src_path] = overlay_res_dst_path
else:
raise OverlayNotFoundError(overlay_res_filename)
def _resolve_overlay_res_from_link(device_name, overlay_res_link, src_path,
dst_path, rel_path, files_to_copy,
files_to_move, logger):
"""Resolve ``overlay_res.ext`` file from ``overlay_res.ext.link`` file,
based on ``device_name``. Updates ``files_to_copy`` with the resolved file
to use if found locally (by inspecting ``overlay_res.ext.d`` folder), or
updates ``files_to_move`` in case the file is downloaded.
"""
overlay_res_filename = os.path.splitext(overlay_res_link)[0]
overlay_res_dst_path = os.path.join(dst_path, rel_path,
overlay_res_filename)
overlay_res_src_path = _find_local_overlay_res(device_name,
overlay_res_filename,
src_path)
if overlay_res_src_path:
files_to_copy[overlay_res_src_path] = overlay_res_dst_path
else:
overlay_res_download_dict = _find_remote_overlay_res(
device_name, os.path.join(src_path, overlay_res_link))
if overlay_res_download_dict:
# attempt overlay_res.ext file download
try:
tmp_file = tempfile.mkstemp()[1]
logger.info("Downloading file '{}'. This may take a while"
"...".format(overlay_res_filename))
_download_file(
overlay_res_download_dict["url"],
tmp_file,
overlay_res_download_dict["md5sum"]
)
files_to_move[tmp_file] = overlay_res_dst_path
except DownloadedFileChecksumError:
raise OverlayNotFoundError(overlay_res_filename)
else:
raise OverlayNotFoundError(overlay_res_filename)
def _copy_and_move_files(files_to_copy, files_to_move):
"""Copy and move files and folders. ``files_to_copy`` and ``files_to_move``
are expected to be dict where the key is the source path, and the value is
destination path.
"""
# copy files and folders
for src, dst in files_to_copy.items():
if os.path.isfile(src):
mkpath(os.path.dirname(dst))
copy_file(src, dst)
else:
copy_tree(src, dst)
# and move files previously downloaded
for src, dst in files_to_move.items():
shutil.move(src, dst)
def _roll_back_copy(files_to_copy, files_to_move):
"""Roll-back previously performed copy of files and folders.
``files_to_copy`` and ``files_to_move`` are expected to be dict where the
key is the source path, and the value is destination path.
"""
for _, dst in files_to_copy.items():
if os.path.isfile(dst):
os.remove(dst)
while(len(os.listdir(os.path.dirname(dst))) == 0):
os.rmdir(os.path.dirname(dst))
dst = os.path.dirname(dst)
elif os.path.isdir(dst):
remove_tree(dst)
for _, dst in files_to_move.items():
if os.path.isfile(dst):
os.remove(dst)
while(len(os.listdir(os.path.dirname(dst))) == 0):
os.rmdir(os.path.dirname(dst))
dst = os.path.dirname(dst)
def deliver_notebooks(device_name, src_path, dst_path, name, folder=False,
overlays_res_lookup=True):
"""Deliver notebooks to target destination path.
If a ``overlay_res.ext.link`` file or a ``overlay_res.ext.d`` folders is
found, then ``overlay_res.ext`` (where ``.ext`` represents a generic file
extension) is considered to be a file that need to be resolved dynamically,
based on ``device_name``.
The following resolution strategy is applied when inspecting ``src_path``:
1. If an ``overlay_res.ext`` file is found, prioritize that file and do
not perform any resolution.
2. In case step 1 fails, if a ``overlay_res.ext.d`` folder is found,
try to retrieve the right ``overlau_res.ext`` file from there. The
files in this folder are expected to contain the device name as a
string, before the file extension ``.ext``.
Format should be ``overlay_res.device_name.ext``.
3. In case step 2 fails, if there is an ``overlay_res.ext.link`` file,
attempt to download the correct file from the provided url, assumed
that a valid entry for ``device_name`` is available in the ``.link``
json file.
4. If all steps fail, notebooks that are in the same folder as
``overlay_res.ext`` are not delivered, and the user is warned.
For simplicity, it is assumed that ``.link`` files and ``.d`` folders are
located next to the notebooks that use the associated resource. Folders
that does not contain notebooks will not be inspected.
In case no ``.link`` or ``overlay_res.d`` files are found, notebooks are
simply copied as is, no resolution is performed.
It is assumed that for this scenario, overlays are delivered somewhere
else.
Parameters
----------
device_name: str
The target device name to use when doing resolution of ``.link``
files and ``.d`` folders. If an ``overlay_res.ext`` file is also
found, no resolution will be done and ``device_name`` will be
ignored, as it is assumed that the ``overlay_res.ext`` file is
prioritized and no automatic resolution is expected
src_path: str
The source path to copy from
dst_path: str
The destination path to copy to
name: str
The name of the notebooks module
folder: bool
Indicates whether to use ``name`` as target folder to copy
notebooks, inside ``dst_path``. Notebooks will be copied directly
in ``dst_path`` if ``False``.
overlays_res_lookup: bool
Dynamic resolution of ``.link`` files and ``.d`` folders is
disabled if ```False``.
"""
logger = get_logger()
dst_fullpath = os.path.join(dst_path, name) if folder else dst_path
files_to_copy = {}
files_to_move = {}
for root, dirs, files in os.walk(src_path):
# If there is at least one notebook, inspect the folder
if [f for f in files if f.endswith(".ipynb")]:
# If folder is in the list of files to copy, remove it as it is
# going to be inspected
if root in files_to_copy:
files_to_copy.pop(root)
relpath = os.path.relpath(root, src_path)
relpath = "" if relpath == "." else relpath
try:
files_to_copy_tmp = {}
files_to_move_tmp = {}
for d in dirs:
if d.endswith(".d"):
if overlays_res_lookup:
_resolve_overlay_res_from_folder(
device_name, d, root, dst_fullpath, relpath,
files_to_copy_tmp)
elif d != "__pycache__": # exclude __pycache__ folder
dir_dst_path = os.path.join(dst_fullpath, relpath, d)
files_to_copy_tmp[os.path.join(root, d)] = \
dir_dst_path
for f in files:
if f.endswith(".link"):
if overlays_res_lookup:
_resolve_overlay_res_from_link(
device_name, f, root, dst_fullpath, relpath,
files_to_copy_tmp, files_to_move_tmp, logger)
else:
file_dst_path = os.path.join(dst_fullpath, relpath, f)
files_to_copy_tmp[os.path.join(root, f)] = \
file_dst_path
# No OverlayNotFoundError exception raised, can add
# files_to_copy_tmp to files_to_copy
files_to_copy.update(files_to_copy_tmp)
# and files_to_move_tmp to files_to_move
files_to_move.update(files_to_move_tmp)
except OverlayNotFoundError as e:
# files_to_copy not updated, folder skipped
if relpath:
nb_str = os.path.join(name, relpath)
logger.info("Could not resolve file '{}' in folder "
"'{}', notebooks will not be "
"delivered".format(str(e), nb_str))
try:
# exclude root __init__.py from copy, if it exists
files_to_copy.pop(os.path.join(src_path, "__init__.py"))
except KeyError:
pass
try:
if not files_to_copy:
logger.info("The notebooks module '{}' could not be delivered. "
"The module has no notebooks, or no valid overlays "
"were found".format(name))
else:
_copy_and_move_files(files_to_copy, files_to_move)
except (Exception, KeyboardInterrupt) as e:
# roll-back copy
logger.info("Exception detected. Cleaning up as the delivery process "
"did not complete...")
_roll_back_copy(files_to_copy, files_to_move)
raise e
def _resolve_global_overlay_res(overlay_res_link, src_path, logger,
fail=False):
"""Resolve resource that is global to every device (using a ``device=None``
when calling ``_find_remote_overlay_res``). File is downloaded in
``src_path``.
"""
overlay_res_filename = os.path.splitext(overlay_res_link)[0]
overlay_res_download_dict = \
_find_remote_overlay_res(None,
os.path.join(src_path, overlay_res_link))
if overlay_res_download_dict:
overlay_res_fullpath = os.path.join(
src_path, overlay_res_filename)
try:
logger.info("Downloading file '{}'. "
"This may take a while"
"...".format(
overlay_res_filename))
_download_file(
overlay_res_download_dict["url"],
overlay_res_fullpath,
overlay_res_download_dict["md5sum"])
except Exception as e:
if fail:
raise e
finally:
if not os.path.isfile(
overlay_res_fullpath):
err_msg = "Could not resolve file '{}'".format(
overlay_res_filename)
logger.info(err_msg)
else:
return True # overlay_res_download_dict was not empty
return False
def _resolve_devices_overlay_res_helper(device, src_path, overlay_res_filename,
overlay_res_link, overlay_res_fullpath,
logger, fail=False,
overlay_res_download_path=None):
"""Helper function for `_resolve_devices_overlay_res`."""
overlay_res_src_path = _find_local_overlay_res(device,
overlay_res_filename,
src_path)
err_msg = "Could not resolve file '{}' for " \
"device '{}'".format(overlay_res_filename, device)
if not overlay_res_src_path:
overlay_res_download_dict = _find_remote_overlay_res(
device, os.path.join(src_path, overlay_res_link))
if overlay_res_download_dict:
if overlay_res_download_path:
mkpath(overlay_res_download_path)
try:
logger.info("Downloading file '{}'. This may take a while"
"...".format(overlay_res_filename))
_download_file(
overlay_res_download_dict["url"],
overlay_res_fullpath,
overlay_res_download_dict["md5sum"])
except Exception as e:
if fail:
raise e
finally:
if not os.path.isfile(
overlay_res_fullpath):
logger.info(err_msg)
if overlay_res_download_path and \
len(os.listdir(overlay_res_download_path)) == 0:
os.rmdir(overlay_res_download_path)
else:
if fail:
raise OverlayNotFoundError(err_msg)
logger.info(err_msg)
def _resolve_devices_overlay_res(overlay_res_link, src_path, devices, logger,
fail=False):
"""Resolve ``overlay_res.ext`` file for every device in ``devices``.
Files are downloaded in a ``overlay_res.ext.d`` folder in ``src_path``.
If the device is only one and is an edge device, file is resolved directly
to ``overlay_res.ext``.
"""
from pynq.pl_server.device import Device, XlnkDevice
overlay_res_filename = os.path.splitext(overlay_res_link)[0]
if len(devices) == 1 and type(Device.devices[0]) == XlnkDevice:
overlay_res_fullpath = os.path.join(src_path, overlay_res_filename)
_resolve_devices_overlay_res_helper(devices[0], src_path,
overlay_res_filename,
overlay_res_link,
overlay_res_fullpath, logger, fail)
return
for device in devices:
overlay_res_download_path = os.path.join(
src_path, overlay_res_filename + ".d")
overlay_res_filename_split = \
os.path.splitext(overlay_res_filename)
overlay_res_filename_ext = "{}.{}{}".format(
overlay_res_filename_split[0], device,
overlay_res_filename_split[1])
overlay_res_fullpath = os.path.join(overlay_res_download_path,
overlay_res_filename_ext)
_resolve_devices_overlay_res_helper(device, src_path,
overlay_res_filename,
overlay_res_link,
overlay_res_fullpath, logger, fail,
overlay_res_download_path)
def _resolve_all_overlay_res_from_link(overlay_res_link, src_path, logger,
fail=False):
"""Resolve every entry of ``.link`` files regardless of detected devices.
"""
overlay_res_filename = os.path.splitext(overlay_res_link)[0]
with open(os.path.join(src_path, overlay_res_link)) as f:
links = json.load(f)
if not _resolve_global_overlay_res(overlay_res_link, src_path, logger,
fail):
for device, download_link_dict in links.items():
if not _find_local_overlay_res(
device, overlay_res_filename, src_path):
err_msg = "Could not resolve file '{}' for " \
"device '{}'".format(overlay_res_filename, device)
overlay_res_download_path = os.path.join(
src_path, overlay_res_filename + ".d")
overlay_res_filename_split = \
os.path.splitext(overlay_res_filename)
overlay_res_filename_ext = "{}.{}{}".format(
overlay_res_filename_split[0], device,
overlay_res_filename_split[1])
mkpath(overlay_res_download_path)
overlay_res_fullpath = os.path.join(
overlay_res_download_path,
overlay_res_filename_ext)
try:
logger.info("Downloading file '{}'. "
"This may take a while"
"...".format(
overlay_res_filename))
_download_file(
download_link_dict["url"],
overlay_res_fullpath,
download_link_dict["md5sum"])
except Exception as e:
if fail:
raise e
finally:
if not os.path.isfile(
overlay_res_fullpath):
logger.info(err_msg)
if len(os.listdir(
overlay_res_download_path)) == 0:
os.rmdir(overlay_res_download_path)
def download_overlays(path, download_all=False, fail_at_lookup=False,
fail_at_device_detection=False, cleanup=False):
"""Download overlays for detected devices in destination path.
Resolve ``overlay_res.ext`` files from ``overlay_res.ext.link``
json files. Downloaded ``overlay_res.ext`` files are put in a
``overlay_res.ext.d`` directory, with the device name added to their
filename, as ``overlay_res.device_name.ext``.
If the detected device is only one and is an edge device, target file is
resolved directly to ``overlay_res.ext``.
If target ``overlay_res.ext`` already exists, resolution is skipped.
Parameters
----------
path: str
The path to inspect for overlays installation
download_all: bool
Causes all overlays to be downloaded from .link files, regardless
of the detected devices.
fail_at_lookup: bool
Determines whether the function should raise an exception in case
overlay lookup fails.
fail_at_device_detection: bool
Determines whether the function should raise an exception in case
no device is detected.
cleanup: bool
Dictates whether .link files need to be deleted after resolution.
If `True`, all .link files are removed as last step.
"""
logger = get_logger()
try:
devices = _detect_devices()
except RuntimeError as e:
if fail_at_device_detection:
raise e
devices = []
cleanup_list = []
for root, dirs, files in os.walk(path):
for f in files:
if f.endswith(".link"):
if not download_all:
if not _resolve_global_overlay_res(f, root, logger,
fail_at_lookup):
_resolve_devices_overlay_res(f, root, devices, logger,
fail_at_lookup)
else: # download all overlays regardless of detected devices
_resolve_all_overlay_res_from_link(f, root, logger,
fail_at_lookup)
if cleanup:
cleanup_list.append(os.path.join(root, f))
for f in cleanup_list:
os.remove(f)
class _download_overlays(dist_build):
"""Custom distutils command to download overlays using .link files."""
description = "Download overlays using .link files"
user_options = [("download-all", "a",
"forcibly download every overlay from .link files, "
"overriding download based on detected devices"),
("force-fail", "f",
"Do not complete setup if overlays lookup fails.")]
boolean_options = ["download-all", "force-fail"]
def initialize_options(self):
self.download_all = False
self.force_fail = False
def finalize_options(self):
pass
def run(self):
cmd = self.get_finalized_command("build_py")
for package, _, build_dir, _ in cmd.data_files:
if "." not in package: # sub-packages are skipped
download_overlays(build_dir,
download_all=self.download_all,
fail_at_lookup=self.force_fail)
class build_py(_build_py):
"""Overload the standard setuptools 'build_py' command to also call the
command 'download_overlays'.
"""
def run(self):
super().run()
self.run_command("download_overlays")
class NotebookResult:
"""Class representing the result of executing a notebook
Contains members with the form ``_[0-9]*`` with the output object for
each cell or ``None`` if the cell did not return an object.
The raw outputs are available in the ``outputs`` attribute. See the
Jupyter documentation for details on the format of the dictionary
"""
def __init__(self, nb):
self.outputs = [
c['outputs'] for c in nb['cells'] if c['cell_type'] == 'code'
]
objects = json.loads(self.outputs[-1][0]['text'])
for i, o in enumerate(objects):
setattr(self, "_" + str(i+1), o)
def _create_code(num):
call_line = "print(json.dumps([{}], default=_default_repr))".format(
", ".join(("_resolve_global('_{}')".format(i+1) for i in range(num))))
return _function_text + call_line
def run_notebook(notebook, root_path=".", timeout=30, prerun=None):
"""Run a notebook in Jupyter
This function will copy all of the files in ``root_path`` to a
temporary directory, run the notebook and then return a
``NotebookResult`` object containing the outputs for each cell.
The notebook is run in a separate process and only objects that
are serializable will be returned in their entirety, otherwise
the string representation will be returned instead.
Parameters
----------
notebook : str
The notebook to run relative to ``root_path``
root_path : str
The root notebook folder (default ".")
timeout : int
Length of time to run the notebook in seconds (default 30)
prerun : function
Function to run prior to starting the notebook, takes the
temporary copy of root_path as a parameter
"""
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
with tempfile.TemporaryDirectory() as td:
workdir = os.path.join(td, 'work')
notebook_dir = os.path.join(workdir, os.path.dirname(notebook))
shutil.copytree(root_path, workdir)
if prerun is not None:
prerun(workdir)
fullpath = os.path.join(workdir, notebook)
with open(fullpath, "r") as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(kernel_name='python3', timeout=timeout)
code_cells = [c for c in nb['cells'] if c['cell_type'] == 'code']
nb['cells'].append(
nbformat.from_dict({'cell_type': 'code',
'metadata': {},
'source': _create_code(len(code_cells))}
))
ep.preprocess(nb, {'metadata': {'path': notebook_dir}})
return NotebookResult(nb)
def _default_repr(obj):
return repr(obj)
class ReprDict(dict):
"""Subclass of the built-in dict that will display using the Jupyterlab
JSON repr.
The class is recursive in that any entries that are also dictionaries
will be converted to ReprDict objects when returned.
"""
def __init__(self, *args, rootname="root", expanded=False, **kwargs):
"""Dictionary constructor
Parameters
----------
rootname : str
The value to display at the root of the tree
expanded : bool
Whether the view of the tree should start expanded
"""
self._rootname = rootname
self._expanded = expanded
super().__init__(*args, **kwargs)
def _repr_json_(self):
return json.loads(json.dumps(self, default=_default_repr)), \
{'expanded': self._expanded, 'root': self._rootname}
def __getitem__(self, key):
obj = super().__getitem__(key)
if type(obj) is dict:
return ReprDict(obj, expanded=self._expanded, rootname=key)
else:
return obj
|
|
# Copyright (c) 2010, Mats Kindahl, Charles Bell, and Lars Thalmann
# All rights reserved.
#
# Use of this source code is goverened by a BSD licence that can be
# found in the LICENCE file.
"""Test of the binary log reader.
"""
import sys
import os.path
_HERE = os.path.dirname(os.path.abspath(__file__))
_ROOTPATH = os.path.split(_HERE)[0]
sys.path.append(_ROOTPATH)
import glob
import time
import unittest
import struct
import mysql.replicant.binary_log as binlog
from itertools import izip, imap
def _data_file(fname):
return os.path.join(_HERE, 'data', fname)
def _timestamp(string):
return int(time.mktime(time.strptime(string, "%Y-%m-%d %H:%M:%S")))
_STUBS = {
'context-bin.000001': [
{ 'type_code': binlog.FORMAT_DESCRIPTION_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-23 15:20:07'),
'size': 106-4,
'pos': 4,
'end_pos': 106,
'flags': 0,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:38:48'),
'size': 182-106,
'pos': 106,
'end_pos': 182,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:38:49'),
'size': 294-182,
'pos': 182,
'end_pos': 294,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:39:18'),
'size': 382-294,
'pos': 294,
'end_pos': 382,
'flags': 16,
},
{ 'type_code': binlog.INTVAR_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:39:31'),
'size': 410-382,
'pos': 382,
'end_pos': 410,
'flags': 0,
},
{ 'type_code': binlog.USER_VAR_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:39:31'),
'size': 454-410,
'pos': 410,
'end_pos': 454,
'flags': 0,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:39:31'),
'size': 547-454,
'pos': 454,
'end_pos': 547,
'flags': 16,
},
{ 'type_code': binlog.ROTATE_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-25 00:39:54'),
'size': 592-547,
'pos': 547,
'end_pos': 592,
'flags': 0,
},
],
'mysqld1-bin.000005': [
{ 'type_code': binlog.FORMAT_DESCRIPTION_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:02'),
'size': 106-4,
'pos': 4,
'end_pos': 106,
'flags': 0,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 186-106,
'pos': 106,
'end_pos': 186,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 290-186,
'pos': 186,
'end_pos': 290,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 394-290,
'pos': 290,
'end_pos': 394,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 474-394,
'pos': 394,
'end_pos': 474,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 578-474,
'pos': 474,
'end_pos': 578,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 682-578,
'pos': 578,
'end_pos': 682,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 768-682,
'pos': 682,
'end_pos': 768,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 11:27:03'),
'size': 854-768,
'pos': 768,
'end_pos': 854,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 934-854,
'pos': 854,
'end_pos': 934,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1038-934,
'pos': 934,
'end_pos': 1038,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1142-1038,
'pos': 1038,
'end_pos': 1142,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1222-1142,
'pos': 1142,
'end_pos': 1222,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1326-1222,
'pos': 1222,
'end_pos': 1326,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1430-1326,
'pos': 1326,
'end_pos': 1430,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1516-1430,
'pos': 1430,
'end_pos': 1516,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:55:09'),
'size': 1602-1516,
'pos': 1516,
'end_pos': 1602,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 1682-1602,
'pos': 1602,
'end_pos': 1682,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 1786-1682,
'pos': 1682,
'end_pos': 1786,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 1890-1786,
'pos': 1786,
'end_pos': 1890,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 1970-1890,
'pos': 1890,
'end_pos': 1970,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 2074-1970,
'pos': 1970,
'end_pos': 2074,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 2178-2074,
'pos': 2074,
'end_pos': 2178,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 2264-2178,
'pos': 2178,
'end_pos': 2264,
'flags': 16,
},
{ 'type_code': binlog.QUERY_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-16 14:56:31'),
'size': 2350-2264,
'pos': 2264,
'end_pos': 2350,
'flags': 16,
},
{ 'type_code': binlog.STOP_EVENT,
'server_id': 1,
'when': _timestamp('2011-08-19 21:44:29'),
'size': 2369-2350,
'pos': 2350,
'end_pos': 2369,
'flags': 0,
},
],
}
_DECODED = {
'context-bin.000001': [
{ 'type_name': 'FormatDescription',
'binlog_version': 4,
'server_version': '5.1.41-3ubuntu12.10-log',
'created': _timestamp('2011-08-23 15:20:07'),
},
{ 'type_name': 'Query',
'database': 'test',
'query': 'drop table t1',
'thread_id': 82,
'exec_time': 0,
'error_code': 0,
'sql_mode': 0,
},
{
'type_name': 'Query',
'database': 'test',
'query': 'create table t1(a int primary key auto_increment)',
'thread_id': 82,
'exec_time': 0,
'error_code': 0,
},
{
'type_name': 'Query',
'database': 'test',
'query': 'alter table t1 add b text',
'thread_id': 82,
'exec_time': 0,
'error_code': 0,
},
{
'type_name': 'Intvar',
'variable': binlog.IntvarEvent.INSERT_ID,
'value': 1,
},
{
'type_name': 'Uservar',
'variable': 'foo',
'value': 42,
},
{
'type_name': 'Query',
'database': 'test',
'query': 'insert into t1(b) values(@foo)',
'thread_id': 82,
'exec_time': 0,
'error_code': 0,
'sql_mode': 0,
},
{
'type_name': 'Rotate',
'next_pos': 4,
'next_file': 'mysqld1-bin.000002',
}
],
'mysqld1-bin.000005': [
{ 'type_name': 'FormatDescription',
'binlog_version': 4,
'server_version': '5.1.41-3ubuntu12.10-log',
'created': _timestamp('2011-08-16 11:27:02'),
},
{ 'type_name': 'Query',
'thread_id': 2,
'exec_time': 0,
'error_code': 0,
'query': "DROP USER 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 2,
'exec_time': 0,
'error_code': 0,
'query': "CREATE USER 'repl_user' IDENTIFIED BY 'xyzzy'",
},
{ 'type_name': 'Query',
'thread_id': 2,
'exec_time': 0,
'error_code': 0,
'query': "GRANT REPLICATION SLAVE ON *.* TO 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 6,
'exec_time': 0,
'error_code': 0,
'query': "DROP USER 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 6,
'exec_time': 0,
'error_code': 0,
'query': "CREATE USER 'repl_user' IDENTIFIED BY 'xyzzy'",
},
{ 'type_name': 'Query',
'thread_id': 6,
'exec_time': 0,
'error_code': 0,
'query': "GRANT REPLICATION SLAVE ON *.* TO 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 7,
'exec_time': 0,
'error_code': 0,
'database': 'test',
'query': "DROP TABLE IF EXISTS t1",
},
{ 'type_name': 'Query',
'thread_id': 7,
'exec_time': 0,
'error_code': 0,
'database': 'test',
'query': "CREATE TABLE t1 (a INT)",
},
{ 'type_name': 'Query',
'thread_id': 14,
'exec_time': 0,
'error_code': 0,
'query': "DROP USER 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 14,
'exec_time': 0,
'error_code': 0,
'query': "CREATE USER 'repl_user' IDENTIFIED BY 'xyzzy'",
},
{ 'type_name': 'Query',
'thread_id': 14,
'exec_time': 0,
'error_code': 0,
'query': "GRANT REPLICATION SLAVE ON *.* TO 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 18,
'exec_time': 0,
'error_code': 0,
'query': "DROP USER 'repl_user'",
},
{ 'type_name': 'Query',
'thread_id': 18,
'exec_time': 0,
'error_code': 0,
'query': "CREATE USER 'repl_user' IDENTIFIED BY 'xyzzy'",
},
{ 'type_name': 'Query',
'thread_id': 18,
'exec_time': 0,
'error_code': 0,
'query': "GRANT REPLICATION SLAVE ON *.* TO 'repl_user'",
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Query',
},
{ 'type_name': 'Stop',
},
],
}
_FIELD_CHECK_FRM = "{fname}: '{field}' for event at position {pos} is '{value}', expected '{expect}'"
class TestBinlogParser(unittest.TestCase):
"""Unit test for testing the binary log parser support.
"""
def __init__(self, methodName, options={}):
super(TestBinlogParser, self).__init__(methodName)
def test_size_and_end_pos(self):
"""Test that the length and end position of each event matches
the expected position. This only works for binary logs, not
for relay logs.
"""
for fname in _STUBS.keys():
binary_log = binlog.BinaryLog(_data_file(fname))
for event in binary_log.events():
if event.type_code not in [binlog.STOP_EVENT]:
msg = _FIELD_CHECK_FRM.format(fname=fname,
field='pos + size',
pos=event.pos,
value=event.pos + event.size,
expect=event.end_pos)
self.assertEqual(event.pos + event.size, event.end_pos, msg)
def test_common_header(self):
"""Test that the common header for event in each file is correct.
"""
for fname in _STUBS.keys():
binary_log = binlog.BinaryLog(_data_file(fname))
for event, stub in izip(binary_log.events(), _STUBS[fname]):
for field in stub.keys():
msg = _FIELD_CHECK_FRM.format(fname=fname, field=field,
pos=event.pos,
value=getattr(event, field),
expect=stub[field])
self.assertEqual(getattr(event, field), stub[field], msg)
def test_decoded(self):
"""Test that the decoded events work as expected.
"""
for fname in _DECODED.keys():
binary_log = binlog.BinaryLog(_data_file(fname))
events = imap((lambda stub: stub.decode()), binary_log.events())
for event, data in izip(events, _DECODED[fname]):
flds = filter(lambda x: not x.startswith('_'), dir(event))
# print "Event fields: %s" % ' '.join(flds)
for field in data.keys():
value = getattr(event, field)
expect = data[field]
msg = _FIELD_CHECK_FRM.format(fname=fname, field=field,
pos=event.pos,
value=value, expect=expect)
self.assertEqual(value, expect, msg)
def suite(options={}):
suite = unittest.TestSuite()
for test in unittest.defaultTestLoader.getTestCaseNames(TestBinlogParser):
suite.addTest(TestBinlogParser(test, options))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
#!/usr/bin/env python
"""Library for handling batch HTTP requests for apitools."""
import collections
import email.generator as generator
import email.mime.multipart as mime_multipart
import email.mime.nonmultipart as mime_nonmultipart
import email.parser as email_parser
import itertools
import time
import uuid
import six
from six.moves import http_client
from six.moves import urllib_parse
from apitools.base.py import exceptions
from apitools.base.py import http_wrapper
__all__ = [
'BatchApiRequest',
]
class RequestResponseAndHandler(collections.namedtuple(
'RequestResponseAndHandler', ['request', 'response', 'handler'])):
"""Container for data related to completing an HTTP request.
This contains an HTTP request, its response, and a callback for handling
the response from the server.
Attributes:
request: An http_wrapper.Request object representing the HTTP request.
response: The http_wrapper.Response object returned from the server.
handler: A callback function accepting two arguments, response
and exception. Response is an http_wrapper.Response object, and
exception is an apiclient.errors.HttpError object if an error
occurred, or otherwise None.
"""
class BatchApiRequest(object):
class ApiCall(object):
"""Holds request and response information for each request.
ApiCalls are ultimately exposed to the client once the HTTP
batch request has been completed.
Attributes:
http_request: A client-supplied http_wrapper.Request to be
submitted to the server.
response: A http_wrapper.Response object given by the server as a
response to the user request, or None if an error occurred.
exception: An apiclient.errors.HttpError object if an error
occurred, or None.
"""
def __init__(self, request, retryable_codes, service, method_config):
"""Initialize an individual API request.
Args:
request: An http_wrapper.Request object.
retryable_codes: A list of integer HTTP codes that can
be retried.
service: A service inheriting from base_api.BaseApiService.
method_config: Method config for the desired API request.
"""
self.__retryable_codes = list(
set(retryable_codes + [http_client.UNAUTHORIZED]))
self.__http_response = None
self.__service = service
self.__method_config = method_config
self.http_request = request
# TODO(user): Add some validation to these fields.
self.__response = None
self.__exception = None
@property
def is_error(self):
return self.exception is not None
@property
def response(self):
return self.__response
@property
def exception(self):
return self.__exception
@property
def authorization_failed(self):
return (self.__http_response and (
self.__http_response.status_code == http_client.UNAUTHORIZED))
@property
def terminal_state(self):
if self.__http_response is None:
return False
response_code = self.__http_response.status_code
return response_code not in self.__retryable_codes
def HandleResponse(self, http_response, exception):
"""Handles an incoming http response to the request in http_request.
This is intended to be used as a callback function for
BatchHttpRequest.Add.
Args:
http_response: Deserialized http_wrapper.Response object.
exception: apiclient.errors.HttpError object if an error
occurred.
"""
self.__http_response = http_response
self.__exception = exception
if self.terminal_state and not self.__exception:
self.__response = self.__service.ProcessHttpResponse(
self.__method_config, self.__http_response)
def __init__(self, batch_url=None, retryable_codes=None):
"""Initialize a batch API request object.
Args:
batch_url: Base URL for batch API calls.
retryable_codes: A list of integer HTTP codes that can be retried.
"""
self.api_requests = []
self.retryable_codes = retryable_codes or []
self.batch_url = batch_url or 'https://www.googleapis.com/batch'
def Add(self, service, method, request, global_params=None):
"""Add a request to the batch.
Args:
service: A class inheriting base_api.BaseApiService.
method: A string indicated desired method from the service. See
the example in the class docstring.
request: An input message appropriate for the specified
service.method.
global_params: Optional additional parameters to pass into
method.PrepareHttpRequest.
Returns:
None
"""
# Retrieve the configs for the desired method and service.
method_config = service.GetMethodConfig(method)
upload_config = service.GetUploadConfig(method)
# Prepare the HTTP Request.
http_request = service.PrepareHttpRequest(
method_config, request, global_params=global_params,
upload_config=upload_config)
# Create the request and add it to our master list.
api_request = self.ApiCall(
http_request, self.retryable_codes, service, method_config)
self.api_requests.append(api_request)
def Execute(self, http, sleep_between_polls=5, max_retries=5):
"""Execute all of the requests in the batch.
Args:
http: httplib2.Http object for use in the request.
sleep_between_polls: Integer number of seconds to sleep between
polls.
max_retries: Max retries. Any requests that have not succeeded by
this number of retries simply report the last response or
exception, whatever it happened to be.
Returns:
List of ApiCalls.
"""
requests = [request for request in self.api_requests
if not request.terminal_state]
for attempt in range(max_retries):
if attempt:
time.sleep(sleep_between_polls)
# Create a batch_http_request object and populate it with
# incomplete requests.
batch_http_request = BatchHttpRequest(batch_url=self.batch_url)
for request in requests:
batch_http_request.Add(
request.http_request, request.HandleResponse)
batch_http_request.Execute(http)
# Collect retryable requests.
requests = [request for request in self.api_requests if not
request.terminal_state]
if hasattr(http.request, 'credentials'):
if any(request.authorization_failed for request in requests):
http.request.credentials.refresh(http)
if not requests:
break
return self.api_requests
class BatchHttpRequest(object):
"""Batches multiple http_wrapper.Request objects into a single request."""
def __init__(self, batch_url, callback=None):
"""Constructor for a BatchHttpRequest.
Args:
batch_url: URL to send batch requests to.
callback: A callback to be called for each response, of the
form callback(response, exception). The first parameter is
the deserialized Response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no error
occurred.
"""
# Endpoint to which these requests are sent.
self.__batch_url = batch_url
# Global callback to be called for each individual response in the
# batch.
self.__callback = callback
# List of requests, responses and handlers.
self.__request_response_handlers = {}
# The last auto generated id.
self.__last_auto_id = itertools.count()
# Unique ID on which to base the Content-ID headers.
self.__base_id = uuid.uuid4()
def _ConvertIdToHeader(self, request_id):
"""Convert an id to a Content-ID header value.
Args:
request_id: String identifier for a individual request.
Returns:
A Content-ID header with the id_ encoded into it. A UUID is
prepended to the value because Content-ID headers are
supposed to be universally unique.
"""
return '<%s+%s>' % (self.__base_id, urllib_parse.quote(request_id))
@staticmethod
def _ConvertHeaderToId(header):
"""Convert a Content-ID header value to an id.
Presumes the Content-ID header conforms to the format that
_ConvertIdToHeader() returns.
Args:
header: A string indicating the Content-ID header value.
Returns:
The extracted id value.
Raises:
BatchError if the header is not in the expected format.
"""
if not (header.startswith('<') or header.endswith('>')):
raise exceptions.BatchError(
'Invalid value for Content-ID: %s' % header)
if '+' not in header:
raise exceptions.BatchError(
'Invalid value for Content-ID: %s' % header)
_, request_id = header[1:-1].rsplit('+', 1)
return urllib_parse.unquote(request_id)
def _SerializeRequest(self, request):
"""Convert a http_wrapper.Request object into a string.
Args:
request: A http_wrapper.Request to serialize.
Returns:
The request as a string in application/http format.
"""
# Construct status line
parsed = urllib_parse.urlsplit(request.url)
request_line = urllib_parse.urlunsplit(
(None, None, parsed.path, parsed.query, None))
status_line = u' '.join((
request.http_method,
request_line.decode('utf-8'),
u'HTTP/1.1\n'
))
major, minor = request.headers.get(
'content-type', 'application/json').split('/')
msg = mime_nonmultipart.MIMENonMultipart(major, minor)
# MIMENonMultipart adds its own Content-Type header.
# Keep all of the other headers in `request.headers`.
for key, value in request.headers.items():
if key == 'content-type':
continue
msg[key] = value
msg['Host'] = parsed.netloc
msg.set_unixfrom(None)
if request.body is not None:
msg.set_payload(request.body)
# Serialize the mime message.
str_io = six.StringIO()
# maxheaderlen=0 means don't line wrap headers.
gen = generator.Generator(str_io, maxheaderlen=0)
gen.flatten(msg, unixfrom=False)
body = str_io.getvalue()
return status_line + body
def _DeserializeResponse(self, payload):
"""Convert string into Response and content.
Args:
payload: Header and body string to be deserialized.
Returns:
A Response object
"""
# Strip off the status line.
status_line, payload = payload.split('\n', 1)
_, status, _ = status_line.split(' ', 2)
# Parse the rest of the response.
parser = email_parser.Parser()
msg = parser.parsestr(payload)
# Get the headers.
info = dict(msg)
info['status'] = status
# Create Response from the parsed headers.
content = msg.get_payload()
return http_wrapper.Response(info, content, self.__batch_url)
def _NewId(self):
"""Create a new id.
Auto incrementing number that avoids conflicts with ids already used.
Returns:
A new unique id string.
"""
return str(next(self.__last_auto_id))
def Add(self, request, callback=None):
"""Add a new request.
Args:
request: A http_wrapper.Request to add to the batch.
callback: A callback to be called for this response, of the
form callback(response, exception). The first parameter is the
deserialized response object. The second is an
apiclient.errors.HttpError exception object if an HTTP error
occurred while processing the request, or None if no errors
occurred.
Returns:
None
"""
handler = RequestResponseAndHandler(request, None, callback)
self.__request_response_handlers[self._NewId()] = handler
def _Execute(self, http):
"""Serialize batch request, send to server, process response.
Args:
http: A httplib2.Http object to be used to make the request with.
Raises:
httplib2.HttpLib2Error if a transport error has occured.
apiclient.errors.BatchError if the response is the wrong format.
"""
message = mime_multipart.MIMEMultipart('mixed')
# Message should not write out its own headers.
setattr(message, '_write_headers', lambda self: None)
# Add all the individual requests.
for key in self.__request_response_handlers:
msg = mime_nonmultipart.MIMENonMultipart('application', 'http')
msg['Content-Transfer-Encoding'] = 'binary'
msg['Content-ID'] = self._ConvertIdToHeader(key)
body = self._SerializeRequest(
self.__request_response_handlers[key].request)
msg.set_payload(body)
message.attach(msg)
request = http_wrapper.Request(self.__batch_url, 'POST')
request.body = message.as_string()
request.headers['content-type'] = (
'multipart/mixed; boundary="%s"') % message.get_boundary()
response = http_wrapper.MakeRequest(http, request)
if response.status_code >= 300:
raise exceptions.HttpError.FromResponse(response)
# Prepend with a content-type header so Parser can handle it.
header = 'content-type: %s\r\n\r\n' % response.info['content-type']
parser = email_parser.Parser()
mime_response = parser.parsestr(header + response.content)
if not mime_response.is_multipart():
raise exceptions.BatchError(
'Response not in multipart/mixed format.')
for part in mime_response.get_payload():
request_id = self._ConvertHeaderToId(part['Content-ID'])
response = self._DeserializeResponse(part.get_payload())
# Disable protected access because namedtuple._replace(...)
# is not actually meant to be protected.
self.__request_response_handlers[request_id] = (
self.__request_response_handlers[request_id]._replace(
response=response))
def Execute(self, http):
"""Execute all the requests as a single batched HTTP request.
Args:
http: A httplib2.Http object to be used with the request.
Returns:
None
Raises:
BatchError if the response is the wrong format.
"""
self._Execute(http)
for key in self.__request_response_handlers:
response = self.__request_response_handlers[key].response
callback = self.__request_response_handlers[key].handler
exception = None
if response.status_code >= 300:
exception = exceptions.HttpError.FromResponse(response)
if callback is not None:
callback(response, exception)
if self.__callback is not None:
self.__callback(response, exception)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Runtime type checking support.
For internal use only; no backwards-compatibility guarantees.
"""
# pytype: skip-file
from __future__ import absolute_import
import collections
import inspect
import types
from future.utils import raise_with_traceback
from past.builtins import unicode
from apache_beam import pipeline
from apache_beam.pvalue import TaggedOutput
from apache_beam.transforms import core
from apache_beam.transforms.core import DoFn
from apache_beam.transforms.window import WindowedValue
from apache_beam.typehints.decorators import GeneratorWrapper
from apache_beam.typehints.decorators import TypeCheckError
from apache_beam.typehints.decorators import _check_instance_type
from apache_beam.typehints.decorators import getcallargs_forhints
from apache_beam.typehints.typehints import CompositeTypeHintError
from apache_beam.typehints.typehints import SimpleTypeHintError
from apache_beam.typehints.typehints import check_constraint
class AbstractDoFnWrapper(DoFn):
"""An abstract class to create wrapper around DoFn"""
def __init__(self, dofn):
super(AbstractDoFnWrapper, self).__init__()
self.dofn = dofn
def _inspect_start_bundle(self):
return self.dofn.get_function_arguments('start_bundle')
def _inspect_process(self):
return self.dofn.get_function_arguments('process')
def _inspect_finish_bundle(self):
return self.dofn.get_function_arguments('finish_bundle')
def wrapper(self, method, args, kwargs):
return method(*args, **kwargs)
def setup(self):
return self.dofn.setup()
def start_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.start_bundle, args, kwargs)
def process(self, *args, **kwargs):
return self.wrapper(self.dofn.process, args, kwargs)
def finish_bundle(self, *args, **kwargs):
return self.wrapper(self.dofn.finish_bundle, args, kwargs)
def teardown(self):
return self.dofn.teardown()
class OutputCheckWrapperDoFn(AbstractDoFnWrapper):
"""A DoFn that verifies against common errors in the output type."""
def __init__(self, dofn, full_label):
super(OutputCheckWrapperDoFn, self).__init__(dofn)
self.full_label = full_label
def wrapper(self, method, args, kwargs):
try:
result = method(*args, **kwargs)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within ParDo(%s): '
'%s' % (self.full_label, e))
raise_with_traceback(TypeCheckError(error_msg))
else:
return self._check_type(result)
def _check_type(self, output):
if output is None:
return output
elif isinstance(output, (dict, bytes, str, unicode)):
object_type = type(output).__name__
raise TypeCheckError(
'Returning a %s from a ParDo or FlatMap is '
'discouraged. Please use list("%s") if you really '
'want this behavior.' % (object_type, output))
elif not isinstance(output, collections.Iterable):
raise TypeCheckError(
'FlatMap and ParDo must return an '
'iterable. %s was returned instead.' % type(output))
return output
class TypeCheckWrapperDoFn(AbstractDoFnWrapper):
"""A wrapper around a DoFn which performs type-checking of input and output.
"""
def __init__(self, dofn, type_hints, label=None):
super(TypeCheckWrapperDoFn, self).__init__(dofn)
self._process_fn = self.dofn._process_argspec_fn()
if type_hints.input_types:
input_args, input_kwargs = type_hints.input_types
self._input_hints = getcallargs_forhints(
self._process_fn, *input_args, **input_kwargs)
else:
self._input_hints = None
# TODO(robertwb): Multi-output.
self._output_type_hint = type_hints.simple_output_type(label)
def wrapper(self, method, args, kwargs):
result = method(*args, **kwargs)
return self._type_check_result(result)
def process(self, *args, **kwargs):
if self._input_hints:
actual_inputs = inspect.getcallargs(self._process_fn, *args, **kwargs) # pylint: disable=deprecated-method
for var, hint in self._input_hints.items():
if hint is actual_inputs[var]:
# self parameter
continue
_check_instance_type(hint, actual_inputs[var], var, True)
return self._type_check_result(self.dofn.process(*args, **kwargs))
def _type_check_result(self, transform_results):
if self._output_type_hint is None or transform_results is None:
return transform_results
def type_check_output(o):
# TODO(robertwb): Multi-output.
x = o.value if isinstance(o, (TaggedOutput, WindowedValue)) else o
self._type_check(self._output_type_hint, x, is_input=False)
# If the return type is a generator, then we will need to interleave our
# type-checking with its normal iteration so we don't deplete the
# generator initially just by type-checking its yielded contents.
if isinstance(transform_results, types.GeneratorType):
return GeneratorWrapper(transform_results, type_check_output)
for o in transform_results:
type_check_output(o)
return transform_results
def _type_check(self, type_constraint, datum, is_input):
"""Typecheck a PTransform related datum according to a type constraint.
This function is used to optionally type-check either an input or an output
to a PTransform.
Args:
type_constraint: An instance of a typehints.TypeContraint, one of the
white-listed builtin Python types, or a custom user class.
datum: An instance of a Python object.
is_input: True if 'datum' is an input to a PTransform's DoFn. False
otherwise.
Raises:
TypeError: If 'datum' fails to type-check according to 'type_constraint'.
"""
datum_type = 'input' if is_input else 'output'
try:
check_constraint(type_constraint, datum)
except CompositeTypeHintError as e:
raise_with_traceback(TypeCheckError(e.args[0]))
except SimpleTypeHintError:
error_msg = (
"According to type-hint expected %s should be of type %s. "
"Instead, received '%s', an instance of type %s." %
(datum_type, type_constraint, datum, type(datum)))
raise_with_traceback(TypeCheckError(error_msg))
class TypeCheckCombineFn(core.CombineFn):
"""A wrapper around a CombineFn performing type-checking of input and output.
"""
def __init__(self, combinefn, type_hints, label=None):
self._combinefn = combinefn
self._input_type_hint = type_hints.input_types
self._output_type_hint = type_hints.simple_output_type(label)
self._label = label
def create_accumulator(self, *args, **kwargs):
return self._combinefn.create_accumulator(*args, **kwargs)
def add_input(self, accumulator, element, *args, **kwargs):
if self._input_type_hint:
try:
_check_instance_type(
self._input_type_hint[0][0].tuple_types[1],
element,
'element',
True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return self._combinefn.add_input(accumulator, element, *args, **kwargs)
def merge_accumulators(self, accumulators, *args, **kwargs):
return self._combinefn.merge_accumulators(accumulators, *args, **kwargs)
def compact(self, accumulator, *args, **kwargs):
return self._combinefn.compact(accumulator, *args, **kwargs)
def extract_output(self, accumulator, *args, **kwargs):
result = self._combinefn.extract_output(accumulator, *args, **kwargs)
if self._output_type_hint:
try:
_check_instance_type(
self._output_type_hint.tuple_types[1], result, None, True)
except TypeCheckError as e:
error_msg = (
'Runtime type violation detected within %s: '
'%s' % (self._label, e))
raise_with_traceback(TypeCheckError(error_msg))
return result
class TypeCheckVisitor(pipeline.PipelineVisitor):
_in_combine = False
def enter_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = True
self._wrapped_fn = applied_transform.transform.fn = TypeCheckCombineFn(
applied_transform.transform.fn,
applied_transform.transform.get_type_hints(),
applied_transform.full_label)
def leave_composite_transform(self, applied_transform):
if isinstance(applied_transform.transform, core.CombinePerKey):
self._in_combine = False
def visit_transform(self, applied_transform):
transform = applied_transform.transform
if isinstance(transform, core.ParDo):
if self._in_combine:
if isinstance(transform.fn, core.CombineValuesDoFn):
transform.fn.combinefn = self._wrapped_fn
else:
transform.fn = transform.dofn = OutputCheckWrapperDoFn(
TypeCheckWrapperDoFn(
transform.fn,
transform.get_type_hints(),
applied_transform.full_label),
applied_transform.full_label)
|
|
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Ashley Whetter <ashley@awhetter.co.uk>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Unit tests for the yield documentation checking in the
`DocstringChecker` in :mod:`pylint.extensions.check_docs`
"""
from __future__ import division, print_function, absolute_import
import astroid
from pylint.testutils import CheckerTestCase, Message, set_config
from pylint.extensions.docparams import DocstringParameterChecker
class TestDocstringCheckerYield(CheckerTestCase):
"""Tests for pylint_plugin.RaiseDocChecker"""
CHECKER_CLASS = DocstringParameterChecker
def test_ignores_no_docstring(self):
yield_node = astroid.extract_node('''
def my_func(self):
yield False #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
@set_config(accept_no_yields_doc=False)
def test_warns_no_docstring(self):
node = astroid.extract_node('''
def my_func(self):
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node),
Message(msg_id='missing-yield-type-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_ignores_unknown_style(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring."""
yield False #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_warn_partial_sphinx_yields(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
:returns: Always False
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-type-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warn_partial_sphinx_yields_type(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
:rtype: bool
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warn_missing_sphinx_yields(self):
node = astroid.extract_node('''
def my_func(self, doc_type):
"""This is a docstring.
:param doc_type: Sphinx
:type doc_type: str
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node),
Message(msg_id='missing-yield-type-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warn_partial_google_yields(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
Always False
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-type-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warn_partial_google_yields_type(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
bool:
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warn_missing_google_yields(self):
node = astroid.extract_node('''
def my_func(self, doc_type):
"""This is a docstring.
Parameters:
doc_type (str): Google
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node),
Message(msg_id='missing-yield-type-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warn_missing_numpy_yields(self):
node = astroid.extract_node('''
def my_func(self, doc_type):
"""This is a docstring.
Arguments
---------
doc_type : str
Numpy
"""
yield False
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node),
Message(msg_id='missing-yield-type-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_find_sphinx_yields(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
:return: Always False
:rtype: bool
"""
yield False #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_find_google_yields(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
bool: Always False
"""
yield False #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_find_numpy_yields(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields
-------
bool
Always False
"""
yield False #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_finds_sphinx_yield_custom_class(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
:returns: An object
:rtype: :class:`mymodule.Class`
"""
yield mymodule.Class() #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_finds_google_yield_custom_class(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
mymodule.Class: An object
"""
yield mymodule.Class() #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_finds_numpy_yield_custom_class(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields
-------
mymodule.Class
An object
"""
yield mymodule.Class() #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_finds_sphinx_yield_list_of_custom_class(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
:returns: An object
:rtype: list(:class:`mymodule.Class`)
"""
yield [mymodule.Class()] #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_finds_google_yield_list_of_custom_class(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
list(:class:`mymodule.Class`): An object
"""
yield [mymodule.Class()] #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_finds_numpy_yield_list_of_custom_class(self):
yield_node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields
-------
list(:class:`mymodule.Class`)
An object
"""
yield [mymodule.Class()] #@
''')
with self.assertNoMessages():
self.checker.visit_yield(yield_node)
def test_warns_sphinx_yield_list_of_custom_class_without_description(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
:rtype: list(:class:`mymodule.Class`)
"""
yield [mymodule.Class()]
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warns_google_yield_list_of_custom_class_without_description(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
list(:class:`mymodule.Class`):
"""
yield [mymodule.Class()]
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node)):
self.checker.visit_yield(yield_node)
def test_warns_numpy_yield_list_of_custom_class_without_description(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields
-------
list(:class:`mymodule.Class`)
"""
yield [mymodule.Class()]
''')
yield_node = node.body[0]
with self.assertAddsMessages(
Message(msg_id='missing-yield-doc', node=node)):
self.checker.visit_yield(yield_node)
# No such thing as redundant yield documentation for sphinx because it
# doesn't support yield documentation
def test_ignores_google_redundant_yield_doc_multiple_yields(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
int or None: One, or sometimes None.
"""
if a_func():
yield None
yield 1
''')
with self.assertNoMessages():
self.checker.visit_functiondef(node)
def test_ignores_numpy_redundant_yield_doc_multiple_yields(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields
-------
int
One
None
Sometimes
"""
if a_func():
yield None
yield 1
''')
with self.assertNoMessages():
self.checker.visit_functiondef(node)
# No such thing as redundant yield documentation for sphinx because it
# doesn't support yield documentation
def test_warns_google_redundant_yield_doc_return(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields:
int: One
"""
return 1
''')
with self.assertAddsMessages(
Message(msg_id='redundant-yields-doc', node=node)):
self.checker.visit_functiondef(node)
def test_warns_numpy_redundant_yield_doc_return(self):
node = astroid.extract_node('''
def my_func(self):
"""This is a docstring.
Yields
-------
int
One
"""
return 1
''')
with self.assertAddsMessages(
Message(msg_id='redundant-yields-doc', node=node)):
self.checker.visit_functiondef(node)
|
|
"""Standalone Authenticator."""
import collections
import errno
import logging
import socket
from typing import DefaultDict
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple
from typing import TYPE_CHECKING
import OpenSSL
from acme import challenges
from acme import standalone as acme_standalone
from certbot import achallenges
from certbot import errors
from certbot import interfaces
from certbot.display import util as display_util
from certbot.plugins import common
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
ServedType = DefaultDict[
acme_standalone.BaseDualNetworkedServers,
Set[achallenges.KeyAuthorizationAnnotatedChallenge]
]
class ServerManager:
"""Standalone servers manager.
Manager for `ACMEServer` and `ACMETLSServer` instances.
`certs` and `http_01_resources` correspond to
`acme.crypto_util.SSLSocket.certs` and
`acme.crypto_util.SSLSocket.http_01_resources` respectively. All
created servers share the same certificates and resources, so if
you're running both TLS and non-TLS instances, HTTP01 handlers
will serve the same URLs!
"""
def __init__(self, certs, http_01_resources):
self._instances: Dict[int, acme_standalone.BaseDualNetworkedServers] = {}
self.certs = certs
self.http_01_resources = http_01_resources
def run(self, port, challenge_type, listenaddr=""):
"""Run ACME server on specified ``port``.
This method is idempotent, i.e. all calls with the same pair of
``(port, challenge_type)`` will reuse the same server.
:param int port: Port to run the server on.
:param challenge_type: Subclass of `acme.challenges.Challenge`,
currently only `acme.challenge.HTTP01`.
:param str listenaddr: (optional) The address to listen on. Defaults to all addrs.
:returns: DualNetworkedServers instance.
:rtype: ACMEServerMixin
"""
assert challenge_type == challenges.HTTP01
if port in self._instances:
return self._instances[port]
address = (listenaddr, port)
try:
servers = acme_standalone.HTTP01DualNetworkedServers(
address, self.http_01_resources)
except socket.error as error:
raise errors.StandaloneBindError(error, port)
servers.serve_forever()
# if port == 0, then random free port on OS is taken
# both servers, if they exist, have the same port
real_port = servers.getsocknames()[0][1]
self._instances[real_port] = servers
return servers
def stop(self, port):
"""Stop ACME server running on the specified ``port``.
:param int port:
"""
instance = self._instances[port]
for sockname in instance.getsocknames():
logger.debug("Stopping server at %s:%d...",
*sockname[:2])
instance.shutdown_and_server_close()
del self._instances[port]
def running(self):
"""Return all running instances.
Once the server is stopped using `stop`, it will not be
returned.
:returns: Mapping from ``port`` to ``servers``.
:rtype: tuple
"""
return self._instances.copy()
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Standalone Authenticator.
This authenticator creates its own ephemeral TCP listener on the
necessary port in order to respond to incoming http-01
challenges from the certificate authority. Therefore, it does not
rely on any existing server program.
"""
description = "Spin up a temporary webserver"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.served: ServedType = collections.defaultdict(set)
# Stuff below is shared across threads (i.e. servers read
# values, main thread writes). Due to the nature of CPython's
# GIL, the operations are safe, c.f.
# https://docs.python.org/2/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.certs: Dict[bytes, Tuple[OpenSSL.crypto.PKey, OpenSSL.crypto.X509]] = {}
self.http_01_resources: Set[acme_standalone.HTTP01RequestHandler.HTTP01Resource] = set()
self.servers = ServerManager(self.certs, self.http_01_resources)
@classmethod
def add_parser_arguments(cls, add):
pass # No additional argument for the standalone plugin parser
def more_info(self): # pylint: disable=missing-function-docstring
return("This authenticator creates its own ephemeral TCP listener "
"on the necessary port in order to respond to incoming "
"http-01 challenges from the certificate authority. Therefore, "
"it does not rely on any existing server program.")
def prepare(self): # pylint: disable=missing-function-docstring
pass
def get_chall_pref(self, domain):
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def perform(self, achalls): # pylint: disable=missing-function-docstring
return [self._try_perform_single(achall) for achall in achalls]
def _try_perform_single(self, achall):
while True:
try:
return self._perform_single(achall)
except errors.StandaloneBindError as error:
_handle_perform_error(error)
def _perform_single(self, achall):
servers, response = self._perform_http_01(achall)
self.served[servers].add(achall)
return response
def _perform_http_01(self, achall):
port = self.config.http01_port
addr = self.config.http01_address
servers = self.servers.run(port, challenges.HTTP01, listenaddr=addr)
response, validation = achall.response_and_validation()
resource = acme_standalone.HTTP01RequestHandler.HTTP01Resource(
chall=achall.chall, response=response, validation=validation)
self.http_01_resources.add(resource)
return servers, response
def cleanup(self, achalls): # pylint: disable=missing-function-docstring
# reduce self.served and close servers if no challenges are served
for unused_servers, server_achalls in self.served.items():
for achall in achalls:
if achall in server_achalls:
server_achalls.remove(achall)
for port, servers in self.servers.running().items():
if not self.served[servers]:
self.servers.stop(port)
def auth_hint(self, failed_achalls: List[achallenges.AnnotatedChallenge]) -> str:
port, addr = self.config.http01_port, self.config.http01_address
neat_addr = f"{addr}:{port}" if addr else f"port {port}"
return ("The Certificate Authority failed to download the challenge files from "
f"the temporary standalone webserver started by Certbot on {neat_addr}. "
"Ensure that the listed domains point to this machine and that it can "
"accept inbound connections from the internet.")
def _handle_perform_error(error):
if error.socket_error.errno == errno.EACCES:
raise errors.PluginError(
"Could not bind TCP port {0} because you don't have "
"the appropriate permissions (for example, you "
"aren't running this program as "
"root).".format(error.port))
if error.socket_error.errno == errno.EADDRINUSE:
msg = (
"Could not bind TCP port {0} because it is already in "
"use by another process on this system (such as a web "
"server). Please stop the program in question and "
"then try again.".format(error.port))
should_retry = display_util.yesno(msg, "Retry", "Cancel", default=False)
if not should_retry:
raise errors.PluginError(msg)
else:
raise error
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from helpdesk.settings import HAS_TAG_SUPPORT
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Ticket.tags' if HAS_TAG_SUPPORT is True
if HAS_TAG_SUPPORT:
db.add_column('helpdesk_ticket', 'tags',
self.gf('tagging.fields.TagField')(default=''),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Ticket.tags'
if HAS_TAG_SUPPORT:
db.delete_column('helpdesk_ticket', 'tags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.attachment': {
'Meta': {'ordering': "['filename']", 'object_name': 'Attachment'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'followup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.FollowUp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mime_type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'size': ('django.db.models.fields.IntegerField', [], {})
},
'helpdesk.customfield': {
'Meta': {'object_name': 'CustomField'},
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'decimal_places': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'empty_selection_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'help_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': "'30'"}),
'list_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'max_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'staff_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'helpdesk.emailtemplate': {
'Meta': {'ordering': "['template_name', 'locale']", 'object_name': 'EmailTemplate'},
'heading': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'plain_text': ('django.db.models.fields.TextField', [], {}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.escalationexclusion': {
'Meta': {'object_name': 'EscalationExclusion'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.followup': {
'Meta': {'ordering': "['date']", 'object_name': 'FollowUp'},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 5, 11, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.ignoreemail': {
'Meta': {'object_name': 'IgnoreEmail'},
'date': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'email_address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keep_in_mailbox': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.kbcategory': {
'Meta': {'ordering': "['title']", 'object_name': 'KBCategory'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'helpdesk.kbitem': {
'Meta': {'ordering': "['title']", 'object_name': 'KBItem'},
'answer': ('django.db.models.fields.TextField', [], {}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.KBCategory']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'question': ('django.db.models.fields.TextField', [], {}),
'recommendations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'votes': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'helpdesk.presetreply': {
'Meta': {'ordering': "['name']", 'object_name': 'PreSetReply'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'queues': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['helpdesk.Queue']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.queue': {
'Meta': {'ordering': "('title',)", 'object_name': 'Queue'},
'allow_email_submission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_public_submission': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'email_box_host': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email_box_imap_folder': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email_box_interval': ('django.db.models.fields.IntegerField', [], {'default': "'5'", 'null': 'True', 'blank': 'True'}),
'email_box_last_check': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'email_box_pass': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'email_box_port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'email_box_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_box_type': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'email_box_user': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'escalate_days': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locale': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'new_ticket_cc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_ticket_cc': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'helpdesk.savedsearch': {
'Meta': {'object_name': 'SavedSearch'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'query': ('django.db.models.fields.TextField', [], {}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'helpdesk.ticket': {
'Meta': {'object_name': 'Ticket'},
'assigned_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assigned_to'", 'null': 'True', 'to': "orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'due_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_escalation': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'on_hold': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3', 'blank': '3'}),
'queue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Queue']"}),
'resolution': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'submitter_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'helpdesk.ticketcc': {
'Meta': {'object_name': 'TicketCC'},
'can_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketchange': {
'Meta': {'object_name': 'TicketChange'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'followup': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.FollowUp']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'old_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketcustomfieldvalue': {
'Meta': {'unique_together': "(('ticket', 'field'),)", 'object_name': 'TicketCustomFieldValue'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.CustomField']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['helpdesk.Ticket']"}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'helpdesk.ticketdependency': {
'Meta': {'unique_together': "(('ticket', 'depends_on'),)", 'object_name': 'TicketDependency'},
'depends_on': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'depends_on'", 'to': "orm['helpdesk.Ticket']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticketdependency'", 'to': "orm['helpdesk.Ticket']"})
},
'helpdesk.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'settings_pickled': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
if HAS_TAG_SUPPORT:
models['helpdesk.ticket'].update({'tags': ('tagging.fields.TagField', [],
{}),})
complete_apps = ['helpdesk']
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap, product
import operator
import os
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from pyspark import cloudpickle
from pyspark.serializers import batched, Batch, dump_pickle, load_pickle, \
read_from_pickle_file
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._partitionFunc = None
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
# TODO persist(self, storageLevel)
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD containing the distinct elements in this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithSplit(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithSplit(func)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
# TODO: sampling needs to be re-implemented due to Batch
#def sample(self, withReplacement, fraction, seed):
# jrdd = self._jrdd.sample(withReplacement, fraction, seed)
# return RDD(jrdd, self.ctx)
#def takeSample(self, withReplacement, num, seed):
# vals = self._jrdd.takeSample(withReplacement, num, seed)
# return [load_pickle(bytes(x)) for x in vals]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
return RDD(self._jrdd.union(other._jrdd), self.ctx)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
# TODO: sort
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
java_cartesian = RDD(self._jrdd.cartesian(other._jrdd), self.ctx)
def unpack_batches(pair):
(x, y) = pair
if type(x) == Batch or type(y) == Batch:
xs = x.items if type(x) == Batch else [x]
ys = y.items if type(y) == Batch else [y]
for pair in product(xs, ys):
yield pair
else:
yield pair
return java_cartesian.flatMap(unpack_batches)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize([1, 2, 3]).pipe('cat').collect()
['1', '2', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in pipe.stdout)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
picklesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(picklesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeIteratorToPickleFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in read_from_pickle_file(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
# TODO: aggregate
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def take(self, num):
"""
Take the first num elements of the RDD.
This currently scans the partitions *one by one*, so it will be slow if
a lot of partitions are required. In that case, use L{collect} to get
the whole RDD instead.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
"""
def takeUpToNum(iterator):
taken = 0
while taken < num:
yield next(iterator)
taken += 1
# Take only up to num elements from each partition we try
mapped = self.mapPartitions(takeUpToNum)
items = []
for partition in range(mapped._jrdd.splits().size()):
iterator = self.ctx._takePartition(mapped._jrdd.rdd(), partition)
items.extend(self._collect_iterator_through_file(iterator))
if len(items) >= num:
break
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
"""
def func(split, iterator):
return (str(x).encode("utf-8") for x in iterator)
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield str(split)
yield dump_pickle(Batch(items))
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx)
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for (k, v) in iterator:
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(x.groupByKey().collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions)
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.cogroup(y).collect())
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if isinstance(prev, PipelinedRDD) and prev._is_pipelinable():
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd
else:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
func = self.func
if not self._bypass_serializer and self.ctx.batchSize != 1:
oldfunc = self.func
batchSize = self.ctx.batchSize
def batched_func(split, iterator):
return batched(oldfunc(split, iterator), batchSize)
func = batched_func
cmds = [func, self._bypass_serializer]
pipe_command = ' '.join(b64enc(cloudpickle.dumps(f)) for f in cmds)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_manifest = self._prev_jrdd.classManifest()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
pipe_command, env, self.preservesPartitioning, self.ctx.pythonExec,
broadcast_vars, self.ctx._javaAccumulator, class_manifest)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
|
# Copyright 2012 Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from oslo.config import cfg
import webob.exc
from neutron.common import constants
from neutron import context
from neutron.extensions import portbindings
from neutron import manager
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit.bigswitch import fake_server
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit import test_api_v2
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_allowedaddresspairs as test_addr_pair
patch = mock.patch
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
class BigSwitchProxyPluginV2TestCase(test_base.BigSwitchTestBase,
test_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin_name=None):
if hasattr(self, 'HAS_PORT_FILTER'):
cfg.CONF.set_override(
'enable_security_group', self.HAS_PORT_FILTER, 'SECURITYGROUP')
self.setup_config_files()
self.setup_patches()
if plugin_name:
self._plugin_name = plugin_name
super(BigSwitchProxyPluginV2TestCase,
self).setUp(self._plugin_name)
self.port_create_status = 'BUILD'
self.startHttpPatch()
class TestBigSwitchProxyBasicGet(test_plugin.TestBasicGet,
BigSwitchProxyPluginV2TestCase):
pass
class TestBigSwitchProxyV2HTTPResponse(test_plugin.TestV2HTTPResponse,
BigSwitchProxyPluginV2TestCase):
def test_failover_memory(self):
# first request causes failover so next shouldn't hit bad server
with self.network() as net:
kwargs = {'tenant_id': 'ExceptOnBadServer'}
with self.network(**kwargs) as net:
req = self.new_show_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
class TestBigSwitchProxyPortsV2(test_plugin.TestPortsV2,
BigSwitchProxyPluginV2TestCase,
test_bindings.PortBindingsTestCase):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = False
def setUp(self, plugin_name=None):
super(TestBigSwitchProxyPortsV2,
self).setUp(self._plugin_name)
def test_router_port_status_active(self):
# router ports screw up port auto-deletion so it has to be
# disabled for this test
with self.network(do_delete=False) as net:
with self.subnet(network=net, do_delete=False) as sub:
with self.port(
subnet=sub,
do_delete=False,
device_owner=constants.DEVICE_OWNER_ROUTER_INTF
) as port:
# router ports should be immediately active
self.assertEqual(port['port']['status'], 'ACTIVE')
def test_update_port_status_build(self):
# normal ports go into the pending build state for async creation
with self.port() as port:
self.assertEqual(port['port']['status'], 'BUILD')
self.assertEqual(self.port_create_status, 'BUILD')
def _get_ports(self, netid):
return self.deserialize('json',
self._list_ports('json', netid=netid))['ports']
def test_rollback_for_port_create(self):
plugin = manager.NeutronManager.get_plugin()
with self.subnet() as s:
# stop normal patch
self.httpPatch.stop()
# allow thread spawns for this test
self.spawn_p.stop()
kwargs = {'device_id': 'somedevid'}
# put in a broken 'server'
httpPatch = patch(HTTPCON, new=fake_server.HTTPConnectionMock500)
httpPatch.start()
with self.port(subnet=s, **kwargs):
# wait for async port create request to finish
plugin.evpool.waitall()
# put good 'server' back in
httpPatch.stop()
self.httpPatch.start()
ports = self._get_ports(s['subnet']['network_id'])
#failure to create should result in port in error state
self.assertEqual(ports[0]['status'], 'ERROR')
def test_rollback_for_port_update(self):
with self.network() as n:
with self.port(network_id=n['network']['id'],
device_id='66') as port:
port = self._get_ports(n['network']['id'])[0]
data = {'port': {'name': 'aNewName', 'device_id': '99'}}
# stop normal patch
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self.new_update_request(
'ports', data, port['id']).get_response(self.api)
self.httpPatch.start()
uport = self._get_ports(n['network']['id'])[0]
# name should have stayed the same
self.assertEqual(port['name'], uport['name'])
def test_rollback_for_port_delete(self):
with self.network() as n:
with self.port(network_id=n['network']['id'],
device_id='somedevid') as port:
# stop normal patch
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._delete('ports', port['port']['id'],
expected_code=
webob.exc.HTTPInternalServerError.code)
self.httpPatch.start()
port = self._get_ports(n['network']['id'])[0]
self.assertEqual('BUILD', port['status'])
def test_correct_shared_net_tenant_id(self):
# tenant_id in port requests should match network tenant_id instead
# of port tenant_id
def rest_port_op(self, ten_id, netid, port):
if ten_id != 'SHARED':
raise Exception('expecting tenant_id SHARED. got %s' % ten_id)
with self.network(tenant_id='SHARED', shared=True) as net:
with self.subnet(network=net) as sub:
pref = 'neutron.plugins.bigswitch.servermanager.ServerPool.%s'
tomock = [pref % 'rest_create_port',
pref % 'rest_update_port',
pref % 'rest_delete_port']
patches = [patch(f, create=True, new=rest_port_op)
for f in tomock]
for restp in patches:
restp.start()
with self.port(subnet=sub, tenant_id='port-owner') as port:
data = {'port': {'binding:host_id': 'someotherhost',
'device_id': 'override_dev'}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
def test_create404_triggers_sync(self):
# allow async port thread for this patch
self.spawn_p.stop()
with contextlib.nested(
self.subnet(),
patch(HTTPCON, create=True,
new=fake_server.HTTPConnectionMock404),
patch(test_base.RESTPROXY_PKG_PATH
+ '.NeutronRestProxyV2._send_all_data')
) as (s, mock_http, mock_send_all):
with self.port(subnet=s, device_id='somedevid') as p:
# wait for the async port thread to finish
plugin = manager.NeutronManager.get_plugin()
plugin.evpool.waitall()
call = mock.call(
send_routers=True, send_ports=True, send_floating_ips=True,
triggered_by_tenant=p['port']['tenant_id']
)
mock_send_all.assert_has_calls([call])
self.spawn_p.start()
def test_port_vif_details_default(self):
kwargs = {'name': 'name', 'device_id': 'override_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'],
portbindings.VIF_TYPE_OVS)
def test_port_vif_details_override(self):
# ivshost is in the test config to override to IVS
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
'device_id': 'override_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'],
portbindings.VIF_TYPE_IVS)
kwargs = {'name': 'name2', 'binding:host_id': 'someotherhost',
'device_id': 'other_dev'}
with self.port(**kwargs) as port:
self.assertEqual(port['port']['binding:vif_type'], self.VIF_TYPE)
def test_port_move(self):
# ivshost is in the test config to override to IVS
kwargs = {'name': 'name', 'binding:host_id': 'ivshost',
'device_id': 'override_dev'}
with self.port(**kwargs) as port:
data = {'port': {'binding:host_id': 'someotherhost',
'device_id': 'override_dev'}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(res['port']['binding:vif_type'], self.VIF_TYPE)
def _make_port(self, fmt, net_id, expected_res_status=None, arg_list=None,
**kwargs):
arg_list = arg_list or ()
arg_list += ('binding:host_id', )
res = self._create_port(fmt, net_id, expected_res_status,
arg_list, **kwargs)
# Things can go wrong - raise HTTP exc with res code only
# so it can be caught by unit tests
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
return self.deserialize(fmt, res)
class TestVifDifferentDefault(BigSwitchProxyPluginV2TestCase):
def setup_config_files(self):
super(TestVifDifferentDefault, self).setup_config_files()
cfg.CONF.set_override('vif_type', 'ivs', 'NOVA')
def test_default_viftype(self):
with self.port() as port:
self.assertEqual(port['port']['binding:vif_type'], 'ivs')
class TestBigSwitchProxyNetworksV2(test_plugin.TestNetworksV2,
BigSwitchProxyPluginV2TestCase):
def _get_networks(self, tenant_id):
ctx = context.Context('', tenant_id)
return manager.NeutronManager.get_plugin().get_networks(ctx)
def test_rollback_on_network_create(self):
tid = test_api_v2._uuid()
kwargs = {'tenant_id': tid}
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._create_network('json', 'netname', True, **kwargs)
self.httpPatch.start()
self.assertFalse(self._get_networks(tid))
def test_rollback_on_network_update(self):
with self.network() as n:
data = {'network': {'name': 'aNewName'}}
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self.new_update_request(
'networks', data, n['network']['id']
).get_response(self.api)
self.httpPatch.start()
updatedn = self._get_networks(n['network']['tenant_id'])[0]
# name should have stayed the same due to failure
self.assertEqual(n['network']['name'], updatedn['name'])
def test_rollback_on_network_delete(self):
with self.network() as n:
self.httpPatch.stop()
with patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._delete(
'networks', n['network']['id'],
expected_code=webob.exc.HTTPInternalServerError.code)
self.httpPatch.start()
# network should still exist in db
self.assertEqual(n['network']['id'],
self._get_networks(n['network']['tenant_id']
)[0]['id'])
class TestBigSwitchProxySubnetsV2(test_plugin.TestSubnetsV2,
BigSwitchProxyPluginV2TestCase):
pass
class TestBigSwitchProxySync(BigSwitchProxyPluginV2TestCase):
def test_send_data(self):
plugin_obj = manager.NeutronManager.get_plugin()
result = plugin_obj._send_all_data()
self.assertEqual(result[0], 200)
class TestBigSwitchAddressPairs(BigSwitchProxyPluginV2TestCase,
test_addr_pair.TestAllowedAddressPairs):
pass
|
|
#SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
from urllib.parse import urlparse
import pandas as pd
import sqlalchemy as s
from sqlalchemy import MetaData
from sqlalchemy.ext.automap import automap_base
from workers.worker_base import Worker
#TODO - fully edit to match releases
class ReleaseWorker(Worker):
"""
Worker that collects Repository Releases data from the Github API
and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
"""
def __init__(self, config={}):
worker_type = "release_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['releases']
# Define the tables needed to insert, update, or delete on
data_tables = ['releases']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Release Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def get_release_inf(self, repo_id, release, tag_only):
if not tag_only:
name = "" if release['author']['name'] is None else release['author']['name']
company = "" if release['author']['company'] is None else release['author']['company']
author = name + '_' + company
release_inf = {
'release_id': release['id'],
'repo_id': repo_id,
'release_name': release['name'],
'release_description': release['description'],
'release_author': author,
'release_created_at': release['createdAt'],
'release_published_at': release['publishedAt'],
'release_updated_at': release['updatedAt'],
'release_is_draft': release['isDraft'],
'release_is_prerelease': release['isPrerelease'],
'release_tag_name': release['tagName'],
'release_url': release['url'],
'tag_only': tag_only,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
else:
if 'tagger' in release['target']:
if 'name' in release['target']['tagger']:
name = release['target']['tagger']['name']
else:
name = ""
if 'email' in release['target']['tagger']:
email = '_' + release['target']['tagger']['email']
else:
email = ""
author = name + email
if 'date' in release['target']['tagger']:
date = release['target']['tagger']['date']
else:
date = ""
else:
author = ""
date = ""
release_inf = {
'release_id': release['id'],
'repo_id': repo_id,
'release_name': release['name'],
'release_author': author,
'release_tag_name': release['name'],
'tag_only': tag_only,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if date:
release_inf['release_created_at'] = date
return release_inf
def insert_release(self, task, repo_id, owner, release, tag_only = False):
# Get current table values
release_id_data_sql = s.sql.text("""
SELECT releases.release_id
FROM releases
WHERE repo_id = :repo_id
""")
self.logger.info(f'Getting release table values with the following PSQL query: \n{release_id_data_sql}\n')
release_id_data = pd.read_sql(release_id_data_sql, self.db, params={'repo_id': repo_id})
release_id_data = release_id_data.apply(lambda x: x.str.strip())
# Put all data together in format of the table
self.logger.info(f'Inserting release for repo with id:{repo_id}, owner:{owner}, release name:{release["name"]}\n')
release_inf = self.get_release_inf(repo_id, release, tag_only)
if release_id_data.size > 0 and release['id'] in release_id_data.values:
result = self.db.execute(self.releases_table.update().where(
self.releases_table.c.release_id==release['id']).values(release_inf))
self.logger.info(f"Release {release['id']} updated into releases table\n")
else:
result = self.db.execute(self.releases_table.insert().values(release_inf))
self.logger.info(f"Release {release['id']} inserted into releases table\n")
self.logger.info(f"Primary Key inserted into releases table: {result.inserted_primary_key}\n")
self.results_counter += 1
self.logger.info(f"Inserted info for {owner}/{repo_id}/{release['name']}\n")
#Register this task as completed
self.register_task_completion(task, repo_id, "releases")
return
def get_query(self, owner, repo, tag_only):
if not tag_only:
query = """
{
repository(owner:"%s", name:"%s"){
id
releases(orderBy: {field: CREATED_AT, direction: ASC}, last: %d) {
edges {
node {
name
publishedAt
createdAt
description
id
isDraft
isPrerelease
tagName
url
updatedAt
author {
name
company
}
}
}
}
}
}
""" % (owner, repo, 10)
else:
query = """
{
repository(owner:"%s", name:"%s"){
id
refs(refPrefix: "refs/tags/", last: %d){
edges {
node {
name
id
target {
... on Tag {
tagger {
name
email
date
}
}
}
}
}
}
}
}
""" % (owner, repo, 10)
return query
def fetch_data(self, task, repo_id, tag_only = False):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the releases model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = self.get_query(owner, repo, tag_only)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.register_task_failure(task, repo_id, "Failed to hit endpoint: {}".format(url))
return
data['owner'] = owner
return data
def releases_model(self, task, repo_id):
data = self.fetch_data(task, repo_id)
self.logger.info("repository value is: {}\n".format(data))
if 'releases' in data:
if 'edges' in data['releases'] and data['releases']['edges']:
for n in data['releases']['edges']:
if 'node' in n:
release = n['node']
self.insert_release(task, repo_id, data['owner'], release)
else:
self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n))
elif 'edges' in data['releases'] and not data['releases']['edges']:
self.logger.info("Searching for tags instead of releases...")
data = self.fetch_data(task, repo_id, True)
self.logger.info("refs value is: {}\n".format(data))
if 'refs' in data:
if 'edges' in data['refs']:
for n in data['refs']['edges']:
if 'node' in n:
release = n['node']
self.insert_release(task, repo_id, data['owner'], release, True)
else:
self.logger.info("There's no release to insert. Current node is not available in releases: {}\n".format(n))
else:
self.logger.info("There are no releases to insert for current repository: {}\n".format(data))
else:
self.logger.info("There are no refs in data: {}\n".format(data))
else:
self.logger.info("There are no releases to insert for current repository: {}\n".format(data))
else:
self.logger.info("Graphql response does not contain repository: {}\n".format(data))
|
|
# Python Imports
import datetime
import csv
import itertools as it
import collections as co
import argparse
import os
# Django Imports
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.db import models , transaction
# Local Imports
import contacts.models as cont
class Command(BaseCommand):
help = "manage success/sent time from AT message dump"
def add_arguments(self,parser):
parser.add_argument('-d','--dir',default='ignore/at_ids',help='default base dir')
parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
subparsers = parser.add_subparsers(help='at status actions')
# The cmd argument is required for django.core.management.base.CommandParser
find_parser = subparsers.add_parser('find',cmd=parser.cmd,help='find AT IDs')
find_parser.set_defaults(action='find_at_ids')
find_parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
zeros_parser = subparsers.add_parser('zeros',cmd=parser.cmd,help="find AT IDs for zeros")
zeros_parser.set_defaults(action='find_zero_ids')
zeros_parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
check_parser = subparsers.add_parser('check',cmd=parser.cmd,help='check csv files')
check_parser.add_argument('input_csv',nargs='?',default='at_merged_list.csv',help='input csv file to check')
check_parser.set_defaults(action='check_csv')
check_parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
current_parser = subparsers.add_parser('current',cmd=parser.cmd,help='current csv files')
current_parser.set_defaults(action='current_status')
current_parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
make_parser = subparsers.add_parser('make',cmd=parser.cmd,help='make final update list')
make_parser.add_argument('input_csv',nargs='?',default='at_merged_list.csv',help='input csv file to check')
make_parser.set_defaults(action='make_update_list')
make_parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
update_parser = subparsers.add_parser('update',cmd=parser.cmd,help='update database')
update_parser.add_argument('input_csv',nargs='?',default='at_update_ids.csv',help='input csv file to check')
update_parser.add_argument('--live-run',action='store_true',default=False,help='make updates')
update_parser.set_defaults(action='update_ids')
update_parser.formatter_class = argparse.ArgumentDefaultsHelpFormatter
def handle(self,*args,**options):
print options
self.options = options
getattr(self,options['action'])()
def find_at_ids(self):
self.print_header( "Finding AT Ids" )
self.stdout.write( self.style.ERROR( 'Exiting' ) )
return
at_msg_ids = csv.writer(open("ignore/at_msg_ids.csv",'w'))
at_msg_ids.writerow( ('date','to','identity','at_id','at_status','msg_status','msg') )
at_msg_ids_2 = csv.writer(open('ignore/at_msg_ids_2.csv','w'))
at_msg_ids_2.writerow( ('date','to','identity','at_id','status','msg_status','msg') )
at_msg_ids_0 = csv.writer(open('ignore/at_msg_ids_0.csv','w'))
at_msg_ids_0.writerow( ('date','to','status','msg') )
at_csv = csv.reader(open("ignore/2017_01_31_at_dump.csv"))
# Make header row tuple
HeaderRow = co.namedtuple( "HeaderRow",next(at_csv) )
row_maker = row_factory_maker( HeaderRow )
for row in it.imap(row_maker, at_csv ):
msgs = find_msg_match(row)
if msgs.count() == 1:
msg = msgs.first()
at_msg_ids.writerow( (
row.date,
row.to,
msg.connection.identity,
msg.external_id,
row.status,
msg.external_status,
row.msg
) )
elif msgs.count() > 1:
for msg in msgs:
at_msg_ids_2.writerow( (
row.date,
row.to,
msg.connection.identity,
msg.external_id,
row.status,
msg.external_status,
row.msg
) )
else:
at_msg_ids_0.writerow( (row.date, row.to, row.status, row.msg) )
def check_csv(self):
at_csv = csv_row_maker( self.dir_fp(self.options['input_csv']) )
counts = co.Counter( (row.at_status,row.msg_status) for row in at_csv )
for count in counts.items():
print count
self.stdout.write( self.style.SQL_KEYWORD( 'Total: {}'.format( sum(counts.values()) ) ) )
def current_status(self):
self.print_header( 'Current Status' )
counts = cont.Message.objects.order_by().values('external_status').annotate(count=models.Count('external_status'))
for count in counts:
self.stdout.write( " Status: {0[external_status]:<20.16} Count: {0[count]}".format(count) )
def make_update_list(self):
at_csv = csv_row_maker( self.dif_fp(self.options['input_csv']) )
at_final = csv.writer(open(self.dif_fp('at_update_ids.csv'),'w'))
at_final.writerow( ('date','at_id','at_status','msg_status') )
for row in it.imap( row_maker , at_csv):
if row.status != row.msg_status:
at_final.writerow( (
row.date,
row.at_id,
row.status,
row.msg_status
))
def update_ids(self):
self.print_header( "Update: Live={}".format(self.options['live_run']) )
at_csv = csv_row_maker( self.dir_fp(self.options['input_csv']) )
scheduled , updated = 0 , 0
with transaction.atomic():
for row in at_csv:
if row.at_status != row.msg_status:
scheduled += 1
if self.options['live_run']:
updated += cont.Message.objects.filter(external_id=row.at_id).update(external_status=row.at_status)
self.stdout.write( self.style.WARNING( "Scheduled: {} Updated: {}".format(scheduled,updated) ) )
def print_header(self,header):
self.stdout.write( self.style.WARNING( '*' * 50 + '\n*' ), ending='' )
self.stdout.write( self.style.SQL_KEYWORD( '{:^48}'.format(header) ), ending='' )
self.stdout.write( self.style.WARNING( '*\n' + '*' * 50 ) )
def dir_fp(self,fp):
return os.path.join(self.options['dir'],fp)
########################################
# Utilities
########################################
def csv_row_maker(fp):
csv_fp = csv.reader(open(fp))
# Make header row tuple
HeaderRow = co.namedtuple( "HeaderRow",next(csv_fp) )
row_maker = row_factory_maker( HeaderRow )
return it.imap( row_maker , csv_fp )
def row_factory_maker(row_cls):
""" Return a funciton that takes a row and returns a row_cls tuple
Convert the first column from String Date to datetime
"""
def _row_factory(row):
try:
new_date = datetime.datetime.strptime(row[0],"%I:%M %p %B %d, %Y")
new_date -= datetime.timedelta(hours=3) # Convert from EAT to UCT
except ValueError as e:
new_date = datetime.datetime.strptime(row[0],"%Y-%m-%d %H:%M:%S+00:00")
row[0] = timezone.make_aware(new_date,timezone.utc)
return row_cls._make(row)
return _row_factory
def find_msg_match(row,with_text=True,td=1):
end_time = row.date + datetime.timedelta(minutes=td)
identity = "+%s" % row.to
msg_Q = models.Q(created__range=(row.date,end_time),connection__identity=identity)
if with_text is True:
msg_Q &= models.Q(text=row.msg.strip())
msgs = cont.Message.objects.filter(msg_Q)
return msgs
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_spectastic
----------------------------------
Tests for `spectastic` module.
"""
from builtins import str
from math import isnan
from mock import patch
import json
import six
from hypothesis import (
strategies as st,
assume,
given,
)
from werkzeug.datastructures import MultiDict, Headers
import unittest
from spectastic.operation import (
Operation,
OperationNotFound,
coerce_param,
)
from spectastic.schema import (
Schema,
)
from spectastic.errors import (
ValidationErrors,
)
from spectastic.request import (
BasicRequest,
)
from .spec import (
generate_schema,
generate_method,
generate_param,
)
from .spec import SPEC
class TestSchema(unittest.TestCase):
def test_spec_copy(self):
original_dict = {'definitions': 'Woot'}
spec = Schema(original_dict)
spec['definitions'] = 'Bar'
self.assertEqual(
'Bar',
spec['definitions']
)
self.assertEqual(
'Woot',
original_dict['definitions']
)
def test_spec_resolution(self):
spec = Schema(SPEC)
self.assertEqual(
spec['definitions']['Error'],
spec['definitions']['NastyError']['allOf'][0]
)
def test_spec_resolution_depth(self):
spec = Schema(SPEC)
def _validate_current(current, path=None):
if path is None:
path = ['spec']
if isinstance(current, dict):
for key, value in six.iteritems(current):
if key == '$ref':
self.fail('Found a ref at {}:{}'.format(
path + key, value
))
_validate_current(value, path + [key])
if isinstance(current, list):
for key, value in enumerate(current):
_validate_current(value, path)
_validate_current(spec)
class TestCoercion(unittest.TestCase):
@given(st.integers())
def test_coercion_int(self, hypo):
value = str(hypo)
schema = {
'name': 'param',
'in': 'path',
'type': 'integer',
'format': 'int32',
'name': 'Thing',
}
self.assertEqual(hypo, coerce_param(value, schema))
@given(st.floats())
def test_coercion_float(self, hypo):
assume(not isnan(hypo))
value = "{:.9f}".format(hypo)
schema = {
'name': 'param',
'in': 'path',
'type': 'number',
'format': 'double',
'name': 'Thing',
}
self.assertAlmostEqual(hypo, coerce_param(value, schema), 5)
@given(st.text())
def test_coercion_text(self, hypo):
value = str(hypo)
schema = {
'name': 'param',
'in': 'path',
'type': 'string',
'name': 'Thing',
}
self.assertEqual(hypo, coerce_param(value, schema))
class TestBasicRequest(unittest.TestCase):
def test_request_body_json(self):
request = BasicRequest(
'{"hello": "world"}', {'content-type': 'application/json'}, '', '')
self.assertEqual(
request.body,
{"hello": "world"}
)
def test_request_body_dict(self):
request = BasicRequest(
{"hello": "world"}, {'content-type': 'application/json'}, '', '')
self.assertEqual(
{"hello": "world"},
request.body,
)
def test_request_body_null(self):
request = BasicRequest(
'null', {'content-type': 'application/json'}, '', '')
self.assertEqual(
None,
request.body,
)
def test_request_body_empty(self):
request = BasicRequest(
'', {'content-type': 'application/json'}, '', '')
self.assertEqual(
None,
request.body,
)
def test_request_body_raw(self):
request = BasicRequest(
'hello', {}, '', '')
self.assertEqual(
'hello',
request.body,
)
def test_request_body_headers(self):
request = BasicRequest(
None, {'AuthOrization': 'woot'}, '', '')
self.assertIn(
'Authorization',
request.headers,
)
self.assertNotIn(
'derpy',
request.headers,
)
class TestOperation(unittest.TestCase):
def test_from_schema_found(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
self.assertEqual(
'GetItem',
operation.local_schema['operationId'],
)
def test_from_schema_missing(self):
with self.assertRaises(OperationNotFound):
Operation.from_schema(Schema(SPEC), 'Derp')
def test_headers_schemas(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
header_schemas = operation.header_schemas()
self.assertEqual(1, len(header_schemas))
self.assertEqual(
'Authorization',
header_schemas['authorization']['name']
)
def test_path_schemas(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
path_schemas = operation.path_schemas()
self.assertEqual(1, len(path_schemas))
self.assertEqual('ItemID', path_schemas['itemid']['name'])
def test_query_schemas(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
self.assertEqual(1, len(operation.header_schemas()))
def test_body_schema(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
self.assertEqual(1, len(operation.header_schemas()))
def test_path_matcher(self):
cases = [
{'base': '/', 'method': '/', 'expected': '/'},
{'base': '/', 'method': '/woot', 'expected': '/woot'},
{'base': '/', 'method': '/woot/', 'expected': '/woot/'},
{'base': '/hello', 'method': 'woot', 'expected': '/hello/woot'},
{'base': '/hello', 'method': '/woot', 'expected': '/hello/woot'},
{'base': '/hello', 'method': '/woot/', 'expected': '/hello/woot/'},
]
for case in cases:
method = generate_method(path=case['method'])
schema = Schema(generate_schema(
base_path=case['base'],
methods=method,
))
operation = Operation.from_schema(schema, 'Get')
self.assertIsNotNone(
operation._path_matcher.match(
case['expected']
)
)
class RequestValidationTests(unittest.TestCase):
def test_query_parameters_success(self):
"""
Validates that an operation's query parameters pass validation when
conforming to an operation's query param specification.
"""
query = MultiDict([
('search', 'woot')
])
operation = Operation.from_schema(Schema(SPEC), 'Search')
self.assertEqual(True, operation.validate_request_query(query))
def test_query_parameter_missing(self):
"""
Validates that an operation's query parameters fail validation when
not conforming to an operation's query param specification.
"""
query = {}
operation = Operation.from_schema(Schema(SPEC), 'Search')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_query(query)
self.assertEqual('search', exc_info.exception.errors[0].field)
def test_query_parameter_validation_error(self):
query = MultiDict([
('search', 5)
])
operation = Operation.from_schema(Schema(SPEC), 'Search')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_query(query)
self.assertEqual('search', exc_info.exception.errors[0].field)
def test_query_parameter_multidict_validation(self):
query = MultiDict([
('search', 'boop'),
('search', 5),
])
operation = Operation.from_schema(Schema(SPEC), 'Search')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_query(query)
self.assertEqual('search', exc_info.exception.errors[0].field)
def test_allow_unknown_query_parameter(self):
"""
Allow unrecognized query parameters.
"""
query = {'search': 'woot', 'unKnown_param': 'foo'}
operation = Operation.from_schema(Schema(SPEC), 'Search')
self.assertEqual(True, operation.validate_request_query(query))
def test_query_param_types(self):
"""
Ensures that we validates types of query parameters.
"""
cases = [
{'type': 'string', 'value': 'dfs', 'success': True},
{'type': 'integer', 'value': '5', 'success': True},
{'type': 'integer', 'value': '-5', 'success': True},
{'type': 'number', 'value': '5', 'success': True},
{'type': 'boolean', 'value': 'true', 'success': True},
{'type': 'integer', 'value': 'x', 'success': False},
{'type': 'number', 'value': 'x', 'success': False},
{'type': 'boolean', 'value': 'xxx', 'success': False},
{'type': 'string', 'format': 'date-time', 'value': 'xxx',
'success': False},
{'type': 'string', 'format': 'date-time',
'value': '2015-05-05T00:00:00.123456Z', 'success': True},
{'type': 'string', 'format': 'date-time',
'value': '2015-05-05T00:00:00.123456+00:00', 'success': True},
]
for case in cases:
param = generate_param(
'param', 'query', case['type'],
_format=case.get('format')
)
method = generate_method(parameters=[param])
schema = generate_schema(methods=method)
operation = Operation.from_schema(Schema(schema), 'Get')
if case['success']:
self.assertEqual(
True,
operation.validate_request_query({'param': case['value']})
)
else:
with self.assertRaises(ValidationErrors):
operation.validate_request_query({'param': case['value']})
def test_header_success(self):
"""
Validates that an operation's response headers pass validation when
conforming to an operation's header param specification.
"""
headers = Headers([
('Authorization', 'bearer woot'),
])
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
self.assertEqual(True, operation.validate_request_headers(headers))
def test_header_success_with_dict(self):
"""
Validates that we can also validates headers derived from a dict.
"""
headers = {
'Authorization': 'bearer woot'
}
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
self.assertEqual(True, operation.validate_request_headers(headers))
def test_header_insensitive_success(self):
"""
Validates that an operation's response headers pass validation when
conforming to an operation's header param specification.
"""
headers = Headers([
('authorization', 'bearer woot'),
])
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
self.assertEqual(True, operation.validate_request_headers(headers))
def test_header_error(self):
"""
Validates that an operation's response headers fail validation when
not conforming to an operation's header param specification.
"""
headers = Headers()
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_headers(headers)
self.assertEqual('Authorization', exc_info.exception.errors[0].field)
def test_body_success(self):
"""
Validates that an operation's response body passes validation when
conforming to an operation's body specification.
"""
body = {
'type': 'CandyItem',
'color': 'red',
}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
self.assertEqual(True, operation.validate_request_body(body))
def test_body_error_unknown_discriminator(self):
"""
Validates that an operation's response body fails validation when
the type identified by the discriminator is undefined. In this case,
'DerpItem' does not appear in the test spec's definitions.
"""
body = {
'type': 'DerpItem'
}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('type', exc_info.exception.errors[0].field)
def test_body_error_missing_discriminator(self):
"""
Validates that an operation's response body fails validation when
the type identified by the discriminator is undefined. In this case,
'DerpItem' does not appear in the test spec's definitions.
"""
body = {}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('type', exc_info.exception.errors[0].field)
def test_body_error_list_discriminator(self):
"""
Validates that an operation's response body fails validation when
the discriminator is a list.
"""
body = {
'type': ['CandyItem'],
'color': 'red',
}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('type', exc_info.exception.errors[0].field)
def test_body_failures_non_object(self):
"""
Validates an empty request body.
"""
cases = [None, 5, []]
for body in cases:
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('item', exc_info.exception.errors[0].field)
def test_body_failures_object(self):
"""
Validates an empty request body.
"""
body = {}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('type', exc_info.exception.errors[0].field)
def test_body_error_bad_polymorphic_type(self):
"""
Validates that we also apply the validations for the type referred
to by the discriminator to the current instance. The fixture is missing
a required field, `color`.
"""
body = {
'type': 'CandyItem'
}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('color', exc_info.exception.errors[0].field)
def test_schemaless_param_success(self):
"""
Validates that we allow arbitrary bodies when a body parameter
hasn't been specified.
"""
body = {'type': 'CandyItem'}
query = {'woot': 'woot'}
path = '/null/'
headers = {'Whatever': 'woot'}
operation = Operation.from_schema(Schema(SPEC), 'Null')
self.assertEqual(True, operation.validate_request_body(body))
self.assertEqual(True, operation.validate_request_query(query))
self.assertEqual(True, operation.validate_request_path(path))
self.assertEqual(True, operation.validate_request_headers(headers))
def test_heterogenous_nested_collection_success(self):
"""
Tests a heterogenous collection of items within a generic collection
succeed when each individual item is well-formed with respect to it's
discriminator.
"""
body = {
'items': [
{
'type': 'CandyItem',
'color': 'red',
},
{
'type': 'BoogieItem',
'groove': 'funky',
},
],
}
operation = Operation.from_schema(Schema(SPEC), 'CreateCollection')
self.assertEqual(True, operation.validate_request_body(body))
def test_heterogenous_nested_collection_error(self):
"""
Tests that a heterogenous collection of items within a generic
collection will return validation errors when one or more items is
ill-formed with respect to it's discriminator.
"""
body = {
'items': [
{
'type': 'CandyItem',
'color': 'red',
},
{
'type': 'BoogieItem',
# Expected error on invalid type.
'groove': 10,
},
{
'type': 'BoogieItem',
# Expected error on missing required param.
},
],
}
operation = Operation.from_schema(Schema(SPEC), 'CreateCollection')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('items.1.groove', exc_info.exception.errors[0].field)
self.assertEqual('items.2.groove', exc_info.exception.errors[1].field)
def test_path_arg_extraction(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
successes = {
# Correct format
'/items/5': {
'ItemID': '5',
},
# Almost the correct format but should still pass our intermediate
# regexp.
'/items/hello': {
'ItemID': 'hello',
},
# Periods and nonesense
'/items/.sfs2342': {
'ItemID': '.sfs2342',
},
# Spaces
'/items/%20things': {
'ItemID': ' things',
},
}
for case, expected in six.iteritems(successes):
self.assertEqual(
expected,
operation.extract_path_args(case)
)
def test_path_validation_success(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
successes = {
# Correct format
'/items/5': True,
}
for case, expected in six.iteritems(successes):
self.assertEqual(
expected,
operation.validate_request_path(case)
)
def test_path_validation_errors(self):
operation = Operation.from_schema(Schema(SPEC), 'GetItem')
errors = {
'/items/der': 'ItemID',
'/items/': 'ItemID',
}
for case, expected in six.iteritems(errors):
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_path(case)
self.assertEqual(expected, exc_info.exception.errors[0].field)
@patch('spectastic.operation.Operation.iter_request_header_errors')
@patch('spectastic.operation.Operation.iter_request_body_errors')
@patch('spectastic.operation.Operation.iter_request_path_errors')
@patch('spectastic.operation.Operation.iter_request_query_errors')
def test_validate_request(
self, query_errors, path_errors, body_errors, header_errors
):
"""
Ensures that we validate an entire request.
"""
body = {'type': 'CandyItem', 'color': 'red'}
encoded_body = json.dumps(body)
headers = {
'Authorization': 'bearer',
'Content-Type': 'application/json',
}
query = {'unknown': 'param'}
path = '/items/'
request = BasicRequest(encoded_body, headers, query, path)
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
operation.validate_request(request)
query_errors.assert_called_once_with(MultiDict(query))
path_errors.assert_called_once_with(path)
body_errors.assert_called_once_with(body)
header_errors.assert_called_once_with(Headers(headers))
def test_complex_allof_discriminator(self):
"""
In cases like SweetCandyItem which extends CandyItem, which extends
Item, we want to ensure things behave sensibly.
"""
body = {
'type': 'SweetCandyItem',
'color': 'red',
}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
with self.assertRaises(ValidationErrors) as exc_info:
operation.validate_request_body(body)
self.assertEqual('sweetness', exc_info.exception.errors[0].field)
def test_discriminator_non_allof(self):
"""
Regression test for discriminator's that are not within an allOf block.
"""
body = {
'type': 'Item',
}
operation = Operation.from_schema(Schema(SPEC), 'CreateItem')
operation.validate_request_body(body)
if __name__ == '__main__':
import sys
sys.exit(unittest.main())
|
|
import gzip
import pickle
from collections import Counter
from threading import Lock
from typing import List, Dict, Iterable, Tuple, Optional
import numpy as np
from docqa.dataset import TrainingData, Dataset
from tqdm import tqdm
from docqa.utils import split, flatten_iterable, group, ResourceLoader
from docqa.configurable import Configurable
class Preprocessor(Configurable):
def preprocess(self, question: Iterable, evidence) -> object:
""" Map elements to an unspecified intermediate format """
raise NotImplementedError()
def finalize_chunk(self, x):
""" Finalize the output from `preprocess`, in multi-processing senarios this will still be run on
the main thread so it can be used for things like interning """
pass
class DatasetBuilder(Configurable):
def build_dataset(self, data, evidence) -> Dataset:
""" Map the intermeidate format to a Dataset object """
raise NotImplementedError()
def build_stats(self, data) -> object:
""" Map the intermeidate format to corpus statistic object, as will used in `TrainingData` """
raise NotImplementedError()
class LazyCorpusStatistics(object):
def __init__(self, data: List, special_tokens=None):
self.data = data
self.special_tokens = special_tokens
def get_word_counts(self):
counts = Counter()
for point in self.data:
counts.update(point.get_text())
return counts
class FilteredData(object):
def __init__(self, data: List, true_len: int):
self.data = data
self.true_len = true_len
def __add__(self, other):
return FilteredData(self.data + other.data, self.true_len + other.true_len)
def _preprocess_and_count(questions: List, evidence, preprocessor: Preprocessor):
count = len(questions)
output = preprocessor.preprocess(questions, evidence)
return output, count
def preprocess_par(questions: List, evidence, preprocessor,
n_processes=2, chunk_size=200, name=None):
if chunk_size <= 0:
raise ValueError("Chunk size must be >= 0, but got %s" % chunk_size)
if n_processes is not None and n_processes <= 0:
raise ValueError("n_processes must be >= 1 or None, but got %s" % n_processes)
n_processes = min(len(questions), n_processes)
if n_processes == 1:
out = preprocessor.preprocess(tqdm(questions, desc=name, ncols=80), evidence)
preprocessor.finalize_chunk(out)
return out
else:
from multiprocessing import Pool
chunks = split(questions, n_processes)
chunks = flatten_iterable([group(c, chunk_size) for c in chunks])
print("Processing %d chunks with %d processes" % (len(chunks), n_processes))
pbar = tqdm(total=len(questions), desc=name, ncols=80)
lock = Lock()
def call_back(results):
preprocessor.finalize_chunk(results[0])
with lock: # FIXME Even with the lock, the progress bar still is jumping around
pbar.update(results[1])
with Pool(n_processes) as pool:
results = [pool.apply_async(_preprocess_and_count, [c, evidence, preprocessor], callback=call_back)
for c in chunks]
results = [r.get()[0] for r in results]
pbar.close()
output = results[0]
for r in results[1:]:
output += r
return output
class PreprocessedData(TrainingData):
"""
Data the goes through a preprocessing pipeline, for TriviaQA this usually mean leading/choosing what
paragraphs we want to train on, the organizing them into a dataset with the desired sampling strategy
"""
def __init__(self,
corpus,
preprocesser: Optional[Preprocessor],
builder: DatasetBuilder,
eval_builder: DatasetBuilder,
eval_on_verified: bool=True,
eval_on_train: bool = True,
hold_out_train: Optional[Tuple[int, int]]= None,
sample=None, sample_dev=None,
sample_preprocessed_train=None, sample_seed=None):
self.hold_out_train = hold_out_train
self.eval_on_train = eval_on_train
self.sample = sample
self.eval_on_verified = eval_on_verified
self.sample_dev = sample_dev
self.corpus = corpus
self.preprocesser = preprocesser
self.builder = builder
self.eval_builder = eval_builder
self.sample_preprocessed_train = sample_preprocessed_train
self.sample_seed = sample_seed
self._train = None
self._dev = None
self._verified_dev = None
@property
def name(self):
return self.corpus.name
def cache_preprocess(self, filename):
if self.sample is not None or self.sample_dev is not None or self.hold_out_train is not None:
raise ValueError()
if filename.endswith("gz"):
handle = lambda a,b: gzip.open(a, b, compresslevel=3)
else:
handle = open
with handle(filename, "wb") as f:
pickle.dump([self.preprocesser, self._train, self._dev, self._verified_dev], f)
def load_preprocess(self, filename):
print("Loading preprocessed data...")
if filename.endswith("gz"):
handle = gzip.open
else:
handle = open
with handle(filename, "rb") as f:
stored = pickle.load(f)
stored_preprocesser, self._train, self._dev, self._verified_dev = stored
if stored_preprocesser.get_config() != self.preprocesser.get_config():
# print("WARNING")
import code
code.interact(local=locals())
raise ValueError()
print("done")
def preprocess(self, n_processes=1, chunk_size=500):
if self._train is not None:
return
print("Loading data...")
train_questions = self.corpus.get_train()
if self.hold_out_train is not None:
print("Using held out train")
train_questions.sort(key=lambda q:q.question_id)
np.random.RandomState(self.hold_out_train[0]).shuffle(train_questions)
dev_questions = train_questions[:self.hold_out_train[1]]
train_questions = train_questions[self.hold_out_train[1]:]
else:
dev_questions = self.corpus.get_dev()
if self.eval_on_verified and hasattr(self.corpus, "get_verified"): # TODO this is a bit hacky
verified_questions = self.corpus.get_verified()
if verified_questions is not None:
# we don't eval on verified docs w/o any valid human answer
verified_questions = [x for x in verified_questions if
any(len(ans) > 0 for ans in x.answer.human_answers)]
else:
verified_questions = None
rng = np.random.RandomState(self.sample_seed)
if self.sample is not None:
l = len(train_questions)
train_questions = rng.choice(train_questions, self.sample, replace=False)
print("Sampled %d of %d (%.4f) train questions" % (len(train_questions), l, len(train_questions)/l))
if self.sample_dev is not None:
l = len(dev_questions)
dev_questions = np.random.RandomState(self.sample_seed).choice(dev_questions, self.sample_dev, replace=False)
print("Sampled %d of %d (%.4f) dev questions" % (len(dev_questions), l, len(dev_questions) / l))
if self.preprocesser:
print("Preprocessing with %d processes..." % n_processes)
out = []
for name, questions in [("verified", verified_questions),
("dev", dev_questions),
("train", train_questions)]:
if questions is None:
out.append(None)
continue
data = preprocess_par(questions, self.corpus.evidence, self.preprocesser, n_processes, chunk_size, name)
out.append(data)
self._verified_dev, self._dev, self._train = out
else:
self._verified_dev, self._dev, self._train = verified_questions, dev_questions, train_questions
if self.sample_preprocessed_train:
if isinstance(self._train, FilteredData):
l = len(self._train.data)
self._train.data = rng.choice(self._train.data, self.sample_preprocessed_train, False)
self._train.true_len *= len(self._train.data)/l
print("Sampled %d of %d (%.4f) q-c pairs" % (len(self._train.data), l, len(self._train.data)/l))
else:
l = len(self._train)
self._train = rng.choice(self._train, self.sample_preprocessed_train, False)
print("Sampled %d of %d q-c pairs" % (len(self._train), l))
print("Done")
def get_train(self) -> Dataset:
return self.builder.build_dataset(self._train, self.corpus)
def get_train_corpus(self):
return self.builder.build_stats(self._train)
def get_eval(self) -> Dict[str, Dataset]:
corpus = self.corpus
eval_set = dict(dev=self.eval_builder.build_dataset(self._dev, corpus))
if self.eval_on_train:
eval_set["train"] = self.eval_builder.build_dataset(self._train, corpus)
if self.eval_on_verified:
eval_set["verified-dev"] = self.eval_builder.build_dataset(self._verified_dev, corpus)
return eval_set
def get_resource_loader(self) -> ResourceLoader:
return self.corpus.get_resource_loader()
def __setstate__(self, state):
if "sample_seed" not in state:
state["sample_seed"] = None
if "sample_preprocessed_train" not in state:
state["sample_preprocessed_train"] = None
self.__dict__ = state
def __getstate__(self):
state = dict(self.__dict__)
state["_train"] = None
state["_dev"] = None
state["_verified_dev"] = None
return state
|
|
import asyncio
import pytest
from gator.aiojob import (HandlerNotFound, Job, JobSimpleDispatchMixin,
NotAJobClassError)
@pytest.fixture
def dispatch_cls():
class Dispatcher(JobSimpleDispatchMixin):
pass
return Dispatcher
def test_simple_coro_registration(dispatch_cls):
async def handler(job):
pass
async def wrong_handler(no_job):
pass
with pytest.raises(NotAJobClassError):
dispatch_cls.register_handler(None, handler)
with pytest.raises(TypeError):
dispatch_cls.register_handler(Job, None)
with pytest.raises(TypeError):
dispatch_cls.register_handler(Job, wrong_handler)
dispatch_cls.register_handler(Job, handler)
disp = dispatch_cls()
assert dispatch_cls.handlers is disp.handlers
assert Job in disp.handlers
assert disp.handlers[Job] == handler
def test_decorator_coro_registration(dispatch_cls):
with pytest.raises(NotAJobClassError):
@dispatch_cls.handler(None)
async def none_handler(job):
pass
@dispatch_cls.handler(Job)
async def handler(job):
pass
disp = dispatch_cls()
assert disp.handlers
assert Job in disp.handlers
assert disp.handlers[Job] == handler
@pytest.mark.asyncio
async def test_no_handler(dispatch_cls):
disp = dispatch_cls()
job = Job()
await disp._handle(job)
assert job.done()
assert job.state() == 'ERROR'
assert isinstance(job.exception(), HandlerNotFound)
@pytest.mark.asyncio
async def test_handle_await_call(dispatch_cls):
@dispatch_cls.handler(Job)
async def handler(job):
return 'it works'
disp = dispatch_cls()
job = Job()
res = await disp._handle(job)
assert res is job
assert job.done()
assert job.state() == 'DONE'
assert job.result() == 'it works'
@pytest.mark.asyncio
async def test_handle_await_job(dispatch_cls):
@dispatch_cls.handler(Job)
async def handler(job):
return 'it works'
disp = dispatch_cls()
job = Job()
asyncio.ensure_future(disp._handle(job))
await job
assert job.done()
assert job.state() == 'DONE'
assert job.result() == 'it works'
@pytest.mark.asyncio
async def test_flat_function_handler(dispatch_cls):
@dispatch_cls.handler(Job)
def handler(job):
return 'it works'
disp = dispatch_cls()
job = Job()
asyncio.ensure_future(disp._handle(job))
await job
assert job.done()
assert job.state() == 'DONE'
assert job.result() == 'it works'
@pytest.mark.asyncio
async def test_handle_exception_raised(dispatch_cls):
class DummyError(Exception):
pass
@dispatch_cls.handler(Job)
async def handler(job):
raise DummyError()
disp = dispatch_cls()
job = await disp._handle(Job())
assert job.done()
assert job.state() == 'ERROR'
assert isinstance(job.exception(), DummyError)
@pytest.mark.asyncio
async def test_handle_timeout(dispatch_cls):
@dispatch_cls.handler(Job)
async def handler(job):
await asyncio.sleep(60)
disp = dispatch_cls()
disp.timeout = 0.1
job = await disp._handle(Job())
assert job.done()
assert job.state() == 'ERROR'
assert isinstance(job.exception(), asyncio.TimeoutError)
@pytest.mark.asyncio
async def test_handler_inheritance(dispatch_cls):
class Parent(dispatch_cls):
pass
class Child(Parent):
pass
class JobA(Job):
pass
class JobB(Job):
pass
class JobC(Job):
pass
@Parent.handler(JobA)
async def handler_a(job):
return 'handler_a'
@Parent.handler(JobB)
async def handler_b(job):
return 'handler_b'
@Child.handler(JobB)
async def handler_b_overload(job):
return 'handler_b_overload'
@Child.handler(JobC)
async def handler_c(job):
return 'handler_c'
child = Child()
parent = Parent()
assert child.handlers.maps == [
{
JobB: handler_b_overload,
JobC: handler_c,
},
{
JobA: handler_a,
JobB: handler_b
},
{}, # dispatch_cls
{} # SimpleDispatchMixin
]
assert parent.handlers.maps == [
{
JobA: handler_a,
JobB: handler_b
},
{}, # dispatch_cls
{} # SimpleDispatchMixin
]
assert parent.handlers[JobA] == handler_a
assert parent.handlers[JobB] == handler_b
assert JobC not in parent.handlers
assert child.handlers[JobA] == handler_a # inherited handler
assert child.handlers[JobB] == handler_b_overload # overloaded handler
assert child.handlers[JobC] == handler_c # specific handler
job = await parent._handle(JobA())
assert job.result() == 'handler_a'
job = await parent._handle(JobB())
assert job.result() == 'handler_b'
job = await parent._handle(JobC())
assert job.state() == 'ERROR'
assert isinstance(job.exception(), HandlerNotFound)
job = await child._handle(JobA())
assert job.result() == 'handler_a'
job = await child._handle(JobB())
assert job.result() == 'handler_b_overload'
job = await child._handle(JobC())
assert job.result() == 'handler_c'
@pytest.mark.asyncio
async def test_overwrite_handler(dispatch_cls):
disp = dispatch_cls()
@disp.handler(Job)
async def handler_old(job):
return 'handler_old'
assert disp.handlers[Job] == handler_old
job = await disp._handle(Job())
assert job.result() == 'handler_old'
@disp.handler(Job)
async def handler_new(job):
return 'handler_new'
assert disp.handlers[Job] == handler_new
job = await disp._handle(Job())
assert job.result() == 'handler_new'
del disp.handlers[Job]
assert Job not in disp.handlers
@pytest.mark.asyncio
async def test_key_inheritance(dispatch_cls):
disp = dispatch_cls()
class Child(Job):
pass
@disp.handler(Job)
async def handler(job):
return 'handler'
assert Child not in disp.handlers
job = await disp._handle(Child())
assert job.state() == 'DONE'
assert job.result() == 'handler'
|
|
import sys
sys.path.insert(0,'../')
if sys.version_info[:2] == (2,6):
import unittest2 as unittest
else:
import unittest
import os
import re
import datetime,time
from rivets_test import RivetsTest
import rivets
import execjs
import lean
class EnvironmentTests(object):
def testWorkingDirectoryIsDefaultRoot(self):
''' Test working directory is the default root '''
self.assertEqual(os.path.realpath(os.curdir),self.env.root)
def testActiveCSSCompressor(self):
''' Test active css compressor '''
self.assertIsNone(self.env.processors.css_compressor)
def testActiveJSCompressor(self):
''' Test active js compressor '''
self.assertIsNone(self.env.processors.js_compressor)
def testPaths(self):
''' Test paths '''
self.assertEqual(
[self.fixture_path("default")],
list(self.env.paths)
)
def testRegisterGlobalPath(self):
''' Test register global path '''
self.assertEqual(
[self.fixture_path('default')],
list(self.new_environment().paths)
)
rivets.path_registry.append_path(self.fixture_path('asset'))
self.assertEqual(
[self.fixture_path("asset"),self.fixture_path('default')],
list(self.new_environment().paths)
)
rivets.path_registry.clear_paths()
def testExtensions(self):
''' Test extensions '''
for ext in ['coffee','scss','str','mako']:
assert ".%s"%ext in self.env.engines.engine_extensions
for ext in ['js','css']:
assert not ".%s"%ext in self.env.engines.engine_extensions
def testFormatExtensions(self):
''' Test format extensions '''
for ext in ['js','css']:
assert ".%s"%ext in self.env.format_extensions
for ext in ['coffee','scss','str','mako']:
assert not ".%s"%ext in self.env.format_extensions
def testAssetDataURIHelper(self):
''' Test asset_data_uri helper '''
asset = self.env["with_data_uri.css"]
self.assertEqual(
"body {\n background-image: url(data:image/gif;base64,R0lGODlhAQABAIAAAP%2F%2F%2FwAAACH5BAAAAAAALAAAAAABAAEAAAICRAEAOw%3D%3D) no-repeat;\n}\n",
str(asset)
)
def testLookupMimeType(self):
''' Test lookup mime type '''
self.assertEqual("application/javascript",self.env.mimetypes[".js"])
self.assertEqual("application/javascript",self.env.mimetypes["js"])
self.assertEqual("text/css",self.env.mimetypes[".css"])
self.assertEqual(None,self.env.mimetypes["foo"])
self.assertEqual(None,self.env.mimetypes["foo"])
def testLookupBundleProcessors(self):
''' Test lookup bundle processors '''
self.assertEqual([],self.env.processors.get_bundleprocessors('application/javascript'))
self.assertEqual(
[rivets.processing.CharsetNormalizer],
self.env.processors.get_bundleprocessors('text/css')
)
def testLookupCompressors(self):
''' Test lookup comrpessors '''
self.assertEqual(
rivets.processing.CSSMinCompressor,
self.env.processors.get_compressors('text/css')['cssmin']
)
self.assertEqual(
rivets.processing.UglipyJSCompressor,
self.env.processors.get_compressors('application/javascript')['uglify']
)
def testResolveInEnvironment(self):
''' Test resolve in environment '''
self.assertEqual(
self.fixture_path('default/gallery.js'),
self.env.resolve('gallery.js')
)
self.assertEqual(
self.fixture_path('default/coffee/foo.coffee'),
self.env.resolve("coffee/foo.js")
)
def testMissingFileRaisesAnException(self):
''' Test missing file raises an exception '''
self.assertRaises(
rivets.errors.FileNotFound,
self.env.resolve,
'null'
)
def testFindBundleAssetInEnvironment(self):
''' Test find bundled asset in environment '''
self.assertEqual(
"var Gallery = {};\n",
str(self.env['gallery.js'])
)
def testFindBundledAssetWithAbsolutePathEnvironment(self):
''' Test find bundled asset with absolute path environment '''
self.assertEqual(
"var Gallery = {};\n",
str(self.env[self.fixture_path('default/gallery.js')])
)
def testFindBundledAssetWithImplicitFormat(self):
''' Test find bundled asset with implicit format '''
self.assertEqual(
"(function() {\n var foo;\n\n foo = 'hello';\n\n}).call(this);\n",
str(self.env['coffee/foo.js'])
)
def testFindStaticAssetInEnvironment(self):
''' Test find static asset in environment '''
self.assertEqual(
'Hello world\n',
str(self.env['hello.txt'])
)
def testFindStaticAssetWithLeadingSlashInEnvironment(self):
''' Test find static asset with leading slash in environment '''
self.assertEqual(
"Hello world\n",
str(self.env[self.fixture_path('default/hello.txt')])
)
def testFindIndexJSInDirectory(self):
''' Test find index.js in directory '''
self.assertEqual(
"var A;\nvar B;\n",
str(self.env['mobile.js'])
)
def testFindIndexCssInDirectory(self):
''' Test index.css in directory '''
self.assertEqual(
".c {}\n.d {}\n/*\n\n */\n\n",
str(self.env['mobile.css'])
)
def testFindComponentJsonInDirectory(self):
''' Test find component.json in directory '''
self.assertEqual(
'var bower;\n',
str(self.env['bower.js'])
)
def testFindMultipleComponentJsonInDirectory(self):
''' Test find multiple component.json in directory '''
self.assertEqual(
'var qunit;\n',
str(self.env['qunit.js'])
)
self.assertEqual(
'.qunit {}\n',
str(self.env['qunit.css'])
)
def testMissingStaticPathReturnsNone(self):
''' Test missing static path returns None '''
self.assertIsNone(self.env[self.fixture_path('default/missing.png')])
def testFindStaticDirectoryReturnsNone(self):
''' Test find static directory returns none '''
self.assertIsNone(self.env['images'])
def testMissingAssetReturnsNone(self):
''' Test missing asset returns None '''
self.assertIsNone(self.env['missing.js'])
def testMissingAssetPathReturnsNone(self):
''' Test missing asset path returns None '''
self.assertIsNone(self.env[self.fixture_path('default/missing.js')])
def testAssetWithMissingRequiresRaisesAnException(self):
''' Test asset with missing requires an exception '''
self.assertRaises(
rivets.errors.FileNotFound,
self.env.find_asset,
'missing_require.js'
)
def testAssetWithMissingDependOnRaisesAnException(self):
''' Test asset with missing depend_on raises an exception '''
self.assertRaises(
rivets.errors.FileNotFound,
self.env.find_asset,
'missing_depend_on.js'
)
def testAssetWithMissingAbsoluteDependOnRaisesAnException(self):
''' Test asset with missing absolute depend_on raises an exception '''
self.assertRaises(
rivets.errors.FileNotFound,
self.env.find_asset,
'missing_absolute_depend_on.js'
)
def testAssetLogicalPathForAbsolutePath(self):
''' Test asset logical path for absolute path '''
self.assertEqual(
'gallery.js',
self.env[self.fixture_path("default/gallery.js")].logical_path
)
self.assertEqual(
'application.js',
self.env[self.fixture_path("default/application.js.coffee")].logical_path
)
self.assertEqual(
'mobile/a.js',
self.env[self.fixture_path("default/mobile/a.js")].logical_path
)
ENTRIES_IN_PATH = 44
def testIterateOverEachEntry(self):
''' Test iterate over each entry '''
entries = []
def do_test(path):
entries.append(path)
self.env.each_entry(self.fixture_path("default"),do_test)
self.assertEqual(self.ENTRIES_IN_PATH,len(entries))
def testEachEntryEnumerator(self):
''' Test each entry enumerator '''
enum = self.env.each_entry(self.fixture_path('default'))
self.assertEqual(self.ENTRIES_IN_PATH,len(enum))
FILES_IN_PATH = 37
def testIterateOverEachFile(self):
''' Test iterate over each file '''
files = []
def do_test(filename):
files.append(filename)
self.env.each_file(callback=do_test)
for file in files:
print file
self.assertEqual(self.FILES_IN_PATH,len(files))
def testEachFileEnumerator(self):
''' Test each file enumerator '''
enum = self.env.each_file()
self.assertEqual(self.FILES_IN_PATH,len(enum))
def testIterateOverEachLogicalPath(self):
''' Test iterate over each logical path '''
paths = []
def do_test(logical_path,filename):
paths.append(logical_path)
self.env.each_logical_path(callback=do_test)
self.assertEqual(self.FILES_IN_PATH,len(paths))
self.assertEqual(len(paths),len(set(paths)),'Has Duplicates')
assert 'application.js' in paths
assert 'coffee/foo.js' in paths
assert 'coffee/index.js' in paths
assert not 'coffee' in paths
def testIterateOverEachLogicalPathAndFilename(self):
''' Test iterate over each logical path and filename '''
paths = []
filenames = []
def do_test(logical_path,filename):
paths.append(logical_path)
filenames.append(filename)
self.env.each_logical_path(callback=do_test)
self.assertEqual(self.FILES_IN_PATH,len(paths))
self.assertEqual(len(paths),len(set(paths)),'Has Duplicates')
assert 'application.js' in paths
assert 'coffee/foo.js' in paths
assert 'coffee/index.js' in paths
assert not 'coffee' in paths
match = None
for p in filenames:
if re.search(r"""application\.js\.coffee""",p):
match = p
assert match
def testEachLogicalPathEnumerator(self):
''' Test each logical path enumerator '''
enum = self.env.each_logical_path()
self.assertIsInstance(enum[0],str)
self.assertEqual(self.FILES_IN_PATH,len(list(enum)))
def testIterateOverEachLogicalPathMatchingFNMatchFilter(self):
''' Test iterate over each logical path matching fnmatch filters '''
paths = []
def do_test(path,filename):
paths.append(path)
self.env.each_logical_path('*.js',callback=do_test)
assert 'application.js' in paths
assert 'coffee/foo.js' in paths
assert 'gallery.css' not in paths
def testIterateOverEachLogicalPathMatchesIndexFiles(self):
''' Test iterate over each logical path matches index files '''
paths = []
def do_test(path,filename):
paths.append(path)
self.env.each_logical_path("coffee.js",callback=do_test)
assert 'coffee.js' in paths
assert 'coffee/index.js' not in paths
def testEachLogicalPathEnumeratorMatchingFNMatchFilters(self):
''' Test each logical path enumerator matching fnmatch filters '''
paths = []
enum = self.env.each_logical_path('*.js')
for logical_path in list(enum):
paths.append(logical_path)
assert "application.js" in paths
assert "coffee/foo.js" in paths
assert "gallery.css" not in paths
def testIterateOverEachLogicalPathMatchingRegexpFilters(self):
''' Test iterate over each logical path matching regexp filters '''
paths = []
def do_test(path,filename):
paths.append(path)
self.env.each_logical_path(re.compile(r""".*\.js"""),callback=do_test)
assert "application.js" in paths
assert "coffee/foo.js" in paths
assert "gallery.css" not in paths
def testIterateOverEachLogicalPathMatchingProcFilters(self):
''' Test iterate over each logical path matching proc filters '''
paths = []
def proc(path,fn):
name,ext = os.path.splitext(path)
return ext=='.js'
def do_test(path,filename):
paths.append(path)
self.env.each_logical_path(proc,callback=do_test)
assert "application.js" in paths
assert "coffee/foo.js" in paths
assert "gallery.css" not in paths
def testIterateOverEachLogicalPathMatchingProcFiltersWithFullPathArg(self):
''' Test iterate over each logical path matching proc filters with full path arg '''
paths = []
def proc(path,fn):
return re.match(re.escape(self.fixture_path('default/mobile')),fn)
def do_test(path,filename):
paths.append(path)
self.env.each_logical_path(proc,callback=do_test)
assert "mobile/a.js" in paths
assert "mobile/b.js" in paths
assert "application.js" not in paths
def testCoffeeScriptFilesCompiledInClosure(self):
''' CoffeeScript files are compiled in a closure '''
script = str(self.env['coffee'])
import sys
sys.stdout.write(script)
import execjs
self.assertEqual("undefined",execjs.exec_(script))
class WhitespaceCompressor(object):
def compress(self,source):
return re.sub(r"""\s+""","",self.source)
class TestEnvironment(RivetsTest,EnvironmentTests):
def new_environment(self,callback=None):
env = rivets.Environment('.')
env.append_path(self.fixture_path('default'))
env.cache = {}
return callback(env) if callback else env
def setUp(self):
self.env = self.new_environment()
def testChangingPaths(self):
''' Test changing paths '''
self.env.clear_paths()
self.env.append_path(self.fixture_path('asset'))
def testRegisterMimeType(self):
''' Test register mime type '''
assert not self.env.mimetypes['jst']
self.env.register_mimetype('jst','application/javascript')
self.assertEqual('application/javascript',self.env.mimetypes['jst'])
def testRegisterBundleProcessor(self):
''' Test register bundle processor '''
assert WhitespaceCompressor not in self.env.processors.get_bundleprocessors('text/css')
self.env.register_bundleprocessor('text/css',WhitespaceCompressor)
assert WhitespaceCompressor in self.env.processors.get_bundleprocessors('text/css')
def testRegisterCompressor(self):
''' Test register compressor '''
assert not self.env.processors.get_compressors('text/css').has_key('whitepace')
self.env.register_compressor('text/css','whitespace',WhitespaceCompressor)
assert self.env.processors.get_compressors('text/css').has_key('whitespace')
def testRegisterGlobalBlockPreprocessor(self):
''' Test register global block preprocessor '''
old_size = len(self.new_environment().processors.get_preprocessors('text/css'))
def process(context,data):
return data
rivets.processing.processor_registry.register_preprocessor('text/css','foo',callback=process)
self.assertEqual(old_size+1,len(self.new_environment().processors.get_preprocessors('text/css')))
rivets.processing.processor_registry.unregister_preprocessor('text/css','foo')
self.assertEqual(old_size,len(self.new_environment().processors.get_preprocessors('text/css')))
def testUnregisterCustomBlockPreprocessor(self):
''' Test unregister global block preprocessor '''
old_size = len(self.env.processors.get_preprocessors('text/css'))
def process(context,data):
return data
self.env.register_preprocessor('text/css','foo',callback=process)
self.assertEqual(old_size+1,len(self.env.processors.get_preprocessors('text/css')))
self.env.unregister_preprocessor('text/css','foo')
self.assertEqual(old_size,len(self.env.processors.get_preprocessors('text/css')))
def testRegisterGlobalBlockPostprocessor(self):
''' Test register global block postprocessor '''
old_size = len(self.new_environment().processors.get_postprocessors('text/css'))
def process(context,data):
return data
rivets.processing.processor_registry.register_postprocessor('text/css','foo',callback=process)
self.assertEqual(old_size+1,len(self.new_environment().processors.get_postprocessors('text/css')))
rivets.processing.processor_registry.unregister_postprocessor('text/css','foo')
self.assertEqual(old_size,len(self.new_environment().processors.get_postprocessors('text/css')))
def testUnregisterCustomBlockPostprocessor(self):
''' Test unregister global block postprocessor '''
old_size = len(self.env.processors.get_postprocessors('text/css'))
def process(context,data):
return data
self.env.register_postprocessor('text/css','foo',callback=process)
self.assertEqual(old_size+1,len(self.env.processors.get_postprocessors('text/css')))
self.env.unregister_postprocessor('text/css','foo')
self.assertEqual(old_size,len(self.env.processors.get_postprocessors('text/css')))
def testUnregisterCustomBlockBundleProcessor(self):
''' Test unregister global block bundle processor '''
old_size = len(self.env.processors.get_bundleprocessors('text/css'))
def process(context,data):
return data
self.env.register_bundleprocessor('text/css','foo',callback=process)
self.assertEqual(old_size+1,len(self.env.processors.get_bundleprocessors('text/css')))
self.env.unregister_bundleprocessor('text/css','foo')
self.assertEqual(old_size,len(self.env.processors.get_bundleprocessors('text/css')))
def testRegisterGlobalBundleProcessor(self):
''' Test register global bundle processor '''
assert WhitespaceCompressor not in self.env.processors.get_bundleprocessors('text/css')
rivets.processing.processor_registry.register_bundleprocessor('text/css',WhitespaceCompressor)
env = self.new_environment()
assert WhitespaceCompressor in env.processors.get_bundleprocessors('text/css')
rivets.processing.processor_registry.unregister_bundleprocessor('text/css',WhitespaceCompressor)
def testSettingCSSCompressorToNoneClearsCurrentCompressor(self):
''' Test setting css compressor to None clears current compressor '''
self.env.css_compressor = WhitespaceCompressor
assert self.env.css_compressor
self.env.css_compressor = None
self.assertIsNone(self.env.css_compressor)
def testSettingJSCompressorToNoneClearsCurrentCompressor(self):
''' Test setting js compressor to None clears current compressor '''
self.env.js_compressor = WhitespaceCompressor
assert self.env.js_compressor
self.env.js_compressor = None
self.assertIsNone(self.env.js_compressor)
def testSettingJSCompressorToLeanHandler(self):
''' Test setting js compressor to Lean handler '''
self.assertIsNone(self.env.js_compressor)
self.env.js_compressor = rivets.processing.UglipyJSCompressor
self.assertEqual(rivets.processing.UglipyJSCompressor,self.env.js_compressor)
self.env.js_compressor = None
self.assertIsNone(self.env.js_compressor)
def testSettingCSSCompressorToLeanHandler(self):
''' Test setting css compressor to Lean handler '''
self.assertIsNone(self.env.css_compressor)
self.env.css_compressor = rivets.processing.CSSMinCompressor
self.assertEqual(rivets.processing.CSSMinCompressor,self.env.css_compressor)
self.env.css_compressor = None
self.assertIsNone(self.env.css_compressor)
def testSettingJSCompressorToString(self):
''' Test setting js compressor to string '''
self.assertIsNone(self.env.js_compressor)
self.env.js_compressor = 'uglifier'
self.assertEqual(rivets.processing.UglipyJSCompressor,self.env.js_compressor)
self.env.js_compressor = None
self.assertIsNone(self.env.js_compressor)
def testSettingCSSCompressorToString(self):
''' Test setting css compressor to string '''
self.assertIsNone(self.env.css_compressor)
self.env.css_compressor = 'cssmin'
self.assertEqual(rivets.processing.CSSMinCompressor,self.env.css_compressor)
self.env.css_compressor = None
self.assertIsNone(self.env.css_compressor)
def testChangingDigestImplementationClass(self):
''' Test changing digest implementation class '''
old_digest = self.env.digest.hexdigest()
old_asset_digest = self.env['gallery.js'].digest
import hashlib
self.env.digest_class = hashlib.sha1
self.assertNotEqual(old_digest,self.env.digest.hexdigest())
self.assertNotEqual(old_asset_digest,self.env['gallery.js'].digest)
def testChangingDigestVersion(self):
''' Test changing digest version '''
old_digest = self.env.digest.hexdigest()
old_asset_digest = self.env['gallery.js'].digest
self.env.version = 'v2'
self.assertNotEqual(old_digest,self.env.digest.hexdigest())
self.assertNotEqual(old_asset_digest,self.env['gallery.js'].digest)
def testBundledAssetIsStaleIfItsMTimeIsUpdatedorDeleted(self):
''' Test bundled asset is stale if its mtime is updated or deleted '''
filename = os.path.join(self.fixture_path('default'),"tmp.js")
def do_test():
self.assertIsNone(self.env['tmp.js'])
f = open(filename,'w')
f.write('foo;')
f.close()
self.assertEqual('foo;\n',str(self.env['tmp.js']))
f = open(filename,'w')
f.write('bar;')
f.close()
new_time = time.mktime((datetime.datetime.now()+datetime.timedelta(seconds=1)).timetuple())
os.utime(filename,(new_time,new_time))
self.assertEqual("bar;\n",str(self.env['tmp.js']))
os.unlink(filename)
self.assertIsNone(self.env['tmp.js'])
self.sandbox(filename,callback=do_test)
def testStaticAssetIsStaleIfItsMTimeIsUpdatedorDeleted(self):
''' Test static asset is stale if its mtime is updated or deleted '''
filename = os.path.join(self.fixture_path('default'),"tmp.png")
def do_test():
self.assertIsNone(self.env['tmp.png'])
f = open(filename,'w')
f.write('foo;')
f.close()
self.assertEqual('foo;',str(self.env['tmp.png']))
f = open(filename,'w')
f.write('bar;')
f.close()
new_time = time.mktime((datetime.datetime.now()+datetime.timedelta(seconds=1)).timetuple())
os.utime(filename,(new_time,new_time))
self.assertEqual("bar;",str(self.env['tmp.png']))
os.unlink(filename)
self.assertIsNone(self.env['tmp.png'])
self.sandbox(filename,callback=do_test)
def testBundledAssetCachedIfTheresAnErrorBuildingIt(self):
''' Test bundled asset cached if there's an error building it '''
self.env.cache = None
filename = os.path.join(self.fixture_path('default'),'tmp.coffee')
def do_test():
f = open(filename,'w')
f.write('-->')
f.close()
self.assertRaises(
execjs.ProgramError,
self.env.find_asset,
'tmp.js'
)
f = open(filename,'w')
f.write('->')
f.close()
new_time = time.mktime((datetime.datetime.now()+datetime.timedelta(seconds=1)).timetuple())
os.utime(filename,(new_time,new_time))
self.assertEqual("(function() {\n\n (function() {});\n\n}).call(this);\n",str(self.env['tmp.js']))
self.sandbox(filename,callback=do_test)
def testSeperateContextsClassesForEachInstance(self):
''' Test seperate contexts classes for each instance '''
e1 = self.new_environment()
e2 = self.new_environment()
self.assertRaises(
AttributeError,
getattr,
e1.context_class,
'foo'
)
self.assertRaises(
AttributeError,
getattr,
e2.context_class,
'foo'
)
def foo(self):
pass
def bind_method(func,klass):
import new
method = new.instancemethod(func,None,klass)
setattr(klass, func.__name__,method)
bind_method(foo,e1.context_class)
assert getattr(e1.context_class,'foo')
self.assertRaises(
AttributeError,
getattr,
e2.context_class,
'foo'
)
def testRegisteringEngineAddsToTheEnvironmentsExtensions(self):
''' Test registering engine adds to the environments extensions '''
assert not self.env.engines['.foo']
assert ".foo" not in self.env.extensions
self.env.register_engine('.foo',lean.StringTemplate)
assert self.env.engines['.foo']
assert ".foo" in self.env.extensions
def testSeperateEnginesForEachInstance(self):
''' Test seperate engines for each instance '''
e1 = self.new_environment()
e2 = self.new_environment()
self.assertIsNone(e1.engines['.foo'])
self.assertIsNone(e2.engines['.foo'])
e1.register_engine('.foo',lean.StringTemplate)
assert e1.engines['.foo']
self.assertIsNone(e2.engines['foo'])
def testDisablingDefaultDirectiveProcessor(self):
''' Test disabling default directive processor '''
self.env.unregister_preprocessor('application/javascript',rivets.processing.DirectiveProcessor)
self.assertEqual(
"// =require \"notfound\"\n;\n",
str(self.env["missing_require.js"])
)
class TestIndex(RivetsTest,EnvironmentTests):
def new_environment(self,callback=None):
env = rivets.Environment('.')
env.append_path(self.fixture_path('default'))
env.cache = {}
return callback(env).index if callback else env.index
def setUp(self):
self.env = self.new_environment()
def testDoesNotAllowNewMimeTypesToBeAdded(self):
''' Test does not allow new mime types to be added '''
self.assertRaises(
TypeError,
self.env.register_mimetype,
".jst",
"application/javascript"
)
def testChangeInEnvironmentMimeTypesDoesNotAffectIndex(self):
''' Test change in environment mime types does not affect index '''
env = rivets.Environment('.')
env.register_mimetype('.jst','application/javascript')
index = env.index
self.assertEqual('application/javascript',index.mimetypes['.jst'])
env.register_mimetype(".jst",None)
self.assertEqual('application/javascript',index.mimetypes['.jst'])
def testDoesNotAllowBundleProcessorsToBeAdded(self):
''' Test does not allow bundle processors to be added '''
self.assertRaises(
TypeError,
self.env.register_bundleprocessor,
'text/css',
WhitespaceCompressor
)
def testDoesNotAllowBundleProcessorsToBeRemoved(self):
''' Test does not allow bundle processors to be removed '''
self.assertRaises(
TypeError,
self.env.unregister_bundleprocessor,
'text/css',
WhitespaceCompressor
)
def testChangeInEnvironmentBundleProcessorsDoesNotAffectIndex(self):
''' Test change in environment bundle processors does not affect index '''
env = rivets.Environment('.')
index = env.index
assert WhitespaceCompressor not in index.processors.get_bundleprocessors('text/css')
env.register_bundleprocessor('text/css',WhitespaceCompressor)
assert WhitespaceCompressor not in index.processors.get_bundleprocessors('text/css')
def testDoesNotAllowJSCompressorToBeChanged(self):
''' Test does not allow JS compressor to be changed '''
self.assertRaises(
TypeError,
self.env.js_compressor,
WhitespaceCompressor
)
def testDoesNotAllowCSSCompressorToBeChanged(self):
''' Test does not allow CSS compressor to be changed '''
self.assertRaises(
TypeError,
self.env.css_compressor,
WhitespaceCompressor
)
def testChangeInEnvironmentEnginesDoesNotAffectIndex(self):
''' Test change in environment engines does not affect index '''
env = rivets.Environment('.')
index = env.index
self.assertIsNone(env.engines['.foo'])
self.assertIsNone(index.engines['.foo'])
env.register_engine('.foo',lean.StringTemplate)
assert env.engines['.foo']
self.assertIsNone(index.engines['.foo'])
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""End to end tests for ChromeDriver."""
import ctypes
import optparse
import os
import sys
import unittest
import chromedriver
import webserver
from webelement import WebElement
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_THIS_DIR, os.pardir, 'pylib'))
from common import chrome_paths
from common import unittest_util
class ChromeDriverTest(unittest.TestCase):
"""End to end tests for ChromeDriver."""
@staticmethod
def GlobalSetUp():
ChromeDriverTest._http_server = webserver.WebServer(
chrome_paths.GetTestData())
@staticmethod
def GlobalTearDown():
ChromeDriverTest._http_server.Shutdown()
@staticmethod
def GetHttpUrlForFile(file_path):
return ChromeDriverTest._http_server.GetUrl() + file_path
def setUp(self):
self._driver = chromedriver.ChromeDriver(_CHROMEDRIVER_LIB, _CHROME_BINARY)
def tearDown(self):
self._driver.Quit()
def testStartStop(self):
pass
def testLoadUrl(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
def testEvaluateScript(self):
self.assertEquals(1, self._driver.ExecuteScript('return 1'))
self.assertEquals(None, self._driver.ExecuteScript(''))
def testEvaluateScriptWithArgs(self):
script = ('document.body.innerHTML = "<div>b</div><div>c</div>";' +
'return {stuff: document.querySelectorAll("div")};')
stuff = self._driver.ExecuteScript(script)['stuff']
script = 'return arguments[0].innerHTML + arguments[1].innerHTML';
self.assertEquals(
'bc', self._driver.ExecuteScript(script, stuff[0], stuff[1]))
def testEvaluateInvalidScript(self):
self.assertRaises(chromedriver.ChromeDriverException,
self._driver.ExecuteScript, '{{{')
def testSwitchToFrame(self):
self._driver.ExecuteScript(
'var frame = document.createElement("iframe");'
'frame.id="id";'
'frame.name="name";'
'document.body.appendChild(frame);')
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('id')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrame('name')
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
self._driver.SwitchToMainFrame()
self.assertTrue(self._driver.ExecuteScript('return window.top == window'))
self._driver.SwitchToFrameByIndex(0)
self.assertTrue(self._driver.ExecuteScript('return window.top != window'))
def testGetTitle(self):
script = 'document.title = "title"; return 1;'
self.assertEquals(1, self._driver.ExecuteScript(script))
self.assertEquals('title', self._driver.GetTitle())
def testFindElement(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
self.assertTrue(
isinstance(self._driver.FindElement('tag name', 'div'), WebElement))
def testFindElements(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>a</div><div>b</div>";')
result = self._driver.FindElements('tag name', 'div')
self.assertTrue(isinstance(result, list))
self.assertEquals(2, len(result))
for item in result:
self.assertTrue(isinstance(item, WebElement))
def testFindChildElement(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><a></a></div>";')
element = self._driver.FindElement('tag name', 'div')
self.assertTrue(
isinstance(element.FindElement('tag name', 'br'), WebElement))
def testFindChildElements(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div><br><br></div><div><br></div>";')
element = self._driver.FindElement('tag name', 'div')
result = element.FindElements('tag name', 'br')
self.assertTrue(isinstance(result, list))
self.assertEquals(2, len(result))
for item in result:
self.assertTrue(isinstance(item, WebElement))
def testHoverOverElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("mouseover", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return div;')
div.HoverOver();
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClickElement(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
div.Click()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testClearElement(self):
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text" value="abc">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.Clear();
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testSendKeysToElement(self):
text = self._driver.ExecuteScript(
'document.body.innerHTML = \'<input type="text">\';'
'var input = document.getElementsByTagName("input")[0];'
'input.addEventListener("change", function() {'
' document.body.appendChild(document.createElement("br"));'
'});'
'return input;')
text.SendKeys('0123456789+-*/ Hi');
text.SendKeys(', there!');
value = self._driver.ExecuteScript('return arguments[0].value;', text)
self.assertEquals('0123456789+-*/ Hi, there!', value)
def testGetCurrentUrl(self):
self.assertTrue('data:' in self._driver.GetCurrentUrl())
def testGoBackAndGoForward(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.GoBack()
self._driver.GoForward()
def testRefresh(self):
self._driver.Load(self.GetHttpUrlForFile('/chromedriver/empty.html'))
self._driver.Refresh()
def testMouseMoveTo(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mouseover", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 10, 10)
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseClick(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("click", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div)
self._driver.MouseClick()
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
def testMouseButtonDownAndUp(self):
self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("mousedown", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new1<br>";'
'});'
'div.addEventListener("mouseup", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new2<a></a>";'
'});')
self._driver.MouseMoveTo(None, 50, 50);
self._driver.MouseButtonDown();
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
self._driver.MouseButtonUp();
self.assertEquals(1, len(self._driver.FindElements('tag name', 'a')))
def testMouseDoubleClick(self):
div = self._driver.ExecuteScript(
'document.body.innerHTML = "<div>old</div>";'
'var div = document.getElementsByTagName("div")[0];'
'div.style["width"] = "100px";'
'div.style["height"] = "100px";'
'div.addEventListener("dblclick", function() {'
' var div = document.getElementsByTagName("div")[0];'
' div.innerHTML="new<br>";'
'});'
'return div;')
self._driver.MouseMoveTo(div, 1, 1);
self._driver.MouseDoubleClick();
self.assertEquals(1, len(self._driver.FindElements('tag name', 'br')))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option(
'', '--chromedriver', type='string', default=None,
help='Path to a build of the chromedriver library(REQUIRED!)')
parser.add_option(
'', '--chrome', type='string', default=None,
help='Path to a build of the chrome binary')
parser.add_option(
'', '--filter', type='string', default='*',
help='Filter for specifying what tests to run, "*" will run all. E.g., ' +
'*testStartStop')
options, args = parser.parse_args()
if (options.chromedriver is None or not os.path.exists(options.chromedriver)):
parser.error('chromedriver is required or the given path is invalid.' +
'Please run "%s --help" for help' % __file__)
global _CHROMEDRIVER_LIB
_CHROMEDRIVER_LIB = os.path.abspath(options.chromedriver)
global _CHROME_BINARY
if options.chrome is not None:
_CHROME_BINARY = os.path.abspath(options.chrome)
else:
_CHROME_BINARY = None
all_tests_suite = unittest.defaultTestLoader.loadTestsFromModule(
sys.modules[__name__])
tests = unittest_util.FilterTestSuite(all_tests_suite, options.filter)
ChromeDriverTest.GlobalSetUp();
result = unittest.TextTestRunner().run(tests)
ChromeDriverTest.GlobalTearDown();
sys.exit(len(result.failures) + len(result.errors))
|
|
# -*- coding:utf-8 -*-
"""
Sample script of Sequence to Sequence model for ChatBot.
This is a train script for seq2seq.py
You can also use Batch and GPU.
args: --gpu (flg of GPU, if you want to use GPU, please write "--gpu 1")
"""
import os
os.environ["CHAINER_TYPE_CHECK"] = "0"
import pickle
import argparse
import nltk
import numpy as np
import chainer
from chainer import cuda, optimizers, serializers
from util import ConvCorpus, JaConvCorpus
from seq2seq import Seq2Seq
from wer import wer
# parse command line args
parser = argparse.ArgumentParser()
parser.add_argument('--data', '-d', default='data/pair_corpus.txt', type=str, help='Data file directory')
parser.add_argument('--gpu', '-g', default='-1', type=int, help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=1000, type=int, help='number of epochs to learn')
parser.add_argument('--feature_num', '-f', default=1024, type=int, help='dimension of feature layer')
parser.add_argument('--hidden_num', '-hi', default=1024, type=int, help='dimension of hidden layer')
parser.add_argument('--batchsize', '-b', default=100, type=int, help='learning minibatch size')
parser.add_argument('--testsize', '-t', default=1000, type=int, help='number of text for testing a model')
parser.add_argument('--lang', '-l', default='en', type=str, help='the choice of a language (Japanese "ja" or English "en" )')
args = parser.parse_args()
# GPU settings
gpu_device = args.gpu
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(gpu_device).use()
xp = cuda.cupy if args.gpu >= 0 else np
data_file = args.data
n_epoch = args.epoch
feature_num = args.feature_num
hidden_num = args.hidden_num
batchsize = args.batchsize
testsize = args.testsize
def main():
###########################
#### create dictionary ####
###########################
if os.path.exists('./data/corpus/dictionary.dict'):
if args.lang == 'ja':
corpus = JaConvCorpus(file_path=None, batch_size=batchsize)
else:
corpus = ConvCorpus(file_path=None, batch_size=batchsize)
corpus.load(load_dir='./data/corpus/')
else:
if args.lang == 'ja':
corpus = JaConvCorpus(file_path=data_file, batch_size=batchsize)
else:
corpus = ConvCorpus(file_path=data_file, batch_size=batchsize)
corpus.save(save_dir='./data/corpus/')
print('Vocabulary Size (number of words) :', len(corpus.dic.token2id))
######################
#### create model ####
######################
model = Seq2Seq(len(corpus.dic.token2id), feature_num=feature_num,
hidden_num=hidden_num, batch_size=batchsize, gpu_flg=args.gpu)
if args.gpu >= 0:
model.to_gpu()
optimizer = optimizers.Adam(alpha=0.001)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.GradientClipping(5))
# optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
##########################
#### create ID corpus ####
##########################
input_mat = []
output_mat = []
max_input_ren = max_output_ren = 0
for input_text, output_text in zip(corpus.posts, corpus.cmnts):
# convert to list
input_text.reverse() # encode words in a reverse order
input_text.insert(0, corpus.dic.token2id["<eos>"])
output_text.append(corpus.dic.token2id["<eos>"])
# update max sentence length
max_input_ren = max(max_input_ren, len(input_text))
max_output_ren = max(max_output_ren, len(output_text))
input_mat.append(input_text)
output_mat.append(output_text)
# padding
for li in input_mat:
insert_num = max_input_ren - len(li)
for _ in range(insert_num):
li.insert(0, corpus.dic.token2id['<pad>'])
for li in output_mat:
insert_num = max_output_ren - len(li)
for _ in range(insert_num):
li.append(corpus.dic.token2id['<pad>'])
# create batch matrix
input_mat = np.array(input_mat, dtype=np.int32).T
output_mat = np.array(output_mat, dtype=np.int32).T
# separate corpus into Train and Test
perm = np.random.permutation(len(corpus.posts))
test_input_mat = input_mat[:, perm[0:0 + testsize]]
test_output_mat = output_mat[:, perm[0:0 + testsize]]
train_input_mat = input_mat[:, perm[testsize:]]
train_output_mat = output_mat[:, perm[testsize:]]
list_of_references = []
for text_ndarray in test_output_mat.T:
reference = text_ndarray.tolist()
references = [[w_id for w_id in reference if w_id is not -1]]
list_of_references.append(references)
#############################
#### train seq2seq model ####
#############################
accum_loss = 0
train_loss_data = []
test_loss_data = []
bleu_score_data = []
wer_score_data = []
for num, epoch in enumerate(range(n_epoch)):
total_loss = test_loss = 0
batch_num = 0
perm = np.random.permutation(len(corpus.posts) - testsize)
# for training
for i in range(0, len(corpus.posts) - testsize, batchsize):
# select batch data
input_batch = train_input_mat[:, perm[i:i + batchsize]]
output_batch = train_output_mat[:, perm[i:i + batchsize]]
# Encode a sentence
model.initialize() # initialize cell
model.encode(input_batch, train=True) # encode (output: hidden Variable)
# Decode from encoded context
end_batch = xp.array([corpus.dic.token2id["<start>"] for _ in range(batchsize)])
first_words = output_batch[0]
loss, predict_mat = model.decode(end_batch, first_words, train=True)
next_ids = first_words
accum_loss += loss
for w_ids in output_batch[1:]:
loss, predict_mat = model.decode(next_ids, w_ids, train=True)
next_ids = w_ids
accum_loss += loss
# learn model
model.cleargrads() # initialize all grad to zero
accum_loss.backward() # back propagation
optimizer.update()
total_loss += float(accum_loss.data)
batch_num += 1
print('Epoch: ', num, 'Batch_num', batch_num, 'batch loss: {:.2f}'.format(float(accum_loss.data)))
accum_loss = 0
# for testing
list_of_hypotheses = []
for i in range(0, testsize, batchsize):
# select test batch data
input_batch = test_input_mat[:, i:i + batchsize]
output_batch = test_output_mat[:, i:i + batchsize]
# Encode a sentence
model.initialize() # initialize cell
model.encode(input_batch, train=True) # encode (output: hidden Variable)
# Decode from encoded context
end_batch = xp.array([corpus.dic.token2id["<start>"] for _ in range(batchsize)])
first_words = output_batch[0]
loss, predict_mat = model.decode(end_batch, first_words, train=True)
next_ids = xp.argmax(predict_mat.data, axis=1)
test_loss += loss
if args.gpu >= 0:
hypotheses = [cuda.to_cpu(next_ids)]
else:
hypotheses = [next_ids]
for w_ids in output_batch[1:]:
loss, predict_mat = model.decode(next_ids, w_ids, train=True)
next_ids = xp.argmax(predict_mat.data, axis=1)
test_loss += loss
if args.gpu >= 0:
hypotheses.append(cuda.to_cpu(next_ids))
else:
hypotheses.append(next_ids)
# collect hypotheses for calculating BLEU score
hypotheses = np.array(hypotheses).T
for hypothesis in hypotheses:
text_list = hypothesis.tolist()
list_of_hypotheses.append([w_id for w_id in text_list if w_id is not -1])
# calculate BLEU score from test (develop) data
bleu_score = nltk.translate.bleu_score.corpus_bleu(list_of_references, list_of_hypotheses,
weights=(0.25, 0.25, 0.25, 0.25))
bleu_score_data.append(bleu_score)
print('Epoch: ', num, 'BLEU SCORE: ', bleu_score)
# calculate WER score from test (develop) data
wer_score = 0
for index, references in enumerate(list_of_references):
wer_score += wer(references[0], list_of_hypotheses[index])
wer_score /= len(list_of_references)
wer_score_data.append(wer_score)
print('Epoch: ', num, 'WER SCORE: ', wer_score)
# save model and optimizer
if (epoch + 1) % 10 == 0:
print('-----', epoch + 1, ' times -----')
print('save the model and optimizer')
serializers.save_hdf5('data/' + str(epoch) + '.model', model)
serializers.save_hdf5('data/' + str(epoch) + '.state', optimizer)
# display the on-going status
print('Epoch: ', num,
'Train loss: {:.2f}'.format(total_loss),
'Test loss: {:.2f}'.format(float(test_loss.data)))
train_loss_data.append(float(total_loss / batch_num))
test_loss_data.append(float(test_loss.data))
# evaluate a test loss
check_loss = test_loss_data[-10:] # check out the last 10 loss data
end_flg = [j for j in range(len(check_loss) - 1) if check_loss[j] < check_loss[j + 1]]
if len(end_flg) > 8:
print('Probably it is over-fitting. So stop to learn...')
break
# save loss data
with open('./data/loss_train_data.pkl', 'wb') as f:
pickle.dump(train_loss_data, f)
with open('./data/loss_test_data.pkl', 'wb') as f:
pickle.dump(test_loss_data, f)
with open('./data/bleu_score_data.pkl', 'wb') as f:
pickle.dump(bleu_score_data, f)
with open('./data/wer_score_data.pkl', 'wb') as f:
pickle.dump(wer_score_data, f)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python
import csv
import xml.etree.ElementTree as ET
import os
import sys
import shutil
# #########################################################
# Represents all the metadata classes for import
# #########################################################
class RecordInfo:
"""Holds record info information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'recordInfo')
if self.value:
top_level.text = self.value.strip()
return top_level
class RecordContentSource:
"""Holds content source information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'recordContentSource')
if self.value:
top_level.text = self.value.strip()
return top_level
class LanguageOfCataloging:
"""Holds language of cataloging information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'languageOfCataloging')
if self.value:
top_level.text = self.value.strip()
return top_level
class PhysicalDescription:
"""Holds physical description information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'physicalDescription')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class TypeOfResource:
"""Holds type of resource information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'typeOfResource')
top_level.text = self.value.strip()
return top_level
class Form:
"""Holds form information"""
def __init__(self):
self.authority = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'form')
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class InternetMediaType:
"""Holds internet media type information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'internetMediaType')
top_level.text = self.value.strip()
return top_level
class DigitalOrigin:
"""Holds digital origin information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'digitalOrigin')
top_level.text = self.value.strip()
return top_level
class Extent:
"""Holds extent information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'extent')
top_level.text = self.value.strip()
return top_level
class Abstract:
"""Holds abstract information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'abstract')
top_level.text = self.value.strip()
return top_level
class Subject:
"""Holds subject information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'subject')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class Topic:
"""Holds topic information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'topic')
top_level.text = self.value.strip()
return top_level
class Geographic:
"""Holds geographic information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'geographic')
top_level.text = self.value.strip()
return top_level
class Genre:
"""Holds genre information"""
def __init__(self):
self.authority = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'genre')
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class Note:
"""Holds note information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'note')
top_level.text = self.value.strip()
return top_level
class Role:
"""Holds role information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'role')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class Language:
"""Holds language information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'language')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class LanguageTerm:
"""Holds language term information"""
def __init__(self):
self.type = ''
self.value = ''
self.authority = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'languageTerm')
if self.type:
top_level.set('type', self.type.strip())
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class RoleTerm:
"""Holds role term information"""
def __init__(self):
self.value = ''
self.authority = ''
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'roleTerm')
if self.type:
top_level.set('type', self.type.strip())
if self.authority:
top_level.set('authority', self.authority.strip())
top_level.text = self.value.strip()
return top_level
class Name:
"""Holds name information"""
def __init__(self):
self.type = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'name')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class NamePart:
"""Holds name part information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'namePart')
top_level.text = self.value.strip()
return top_level
class DateCreated:
"""Holds date created information"""
def __init__(self):
self.value = ''
self.encoding = ''
self.qualifier = ''
self.keyDate = ''
self.point = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'dateCreated')
if self.encoding:
top_level.set('encoding', self.encoding.strip())
if self.qualifier:
top_level.set('qualifier', self.qualifier.strip())
if self.keyDate:
top_level.set('keyDate', self.keyDate.strip())
if self.point:
top_level.set('point', self.point.strip())
top_level.text = self.value.strip()
return top_level
class DateIssued:
"""Holds date issued information"""
def __init__(self):
self.value = ''
self.encoding = ''
self.qualifier = ''
self.keyDate = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'dateIssued')
if self.encoding:
top_level.set('encoding', self.encoding.strip())
if self.qualifier:
top_level.set('qualifier', self.qualifier.strip())
if self.keyDate:
top_level.set('keyDate', self.keyDate.strip())
top_level.text = self.value.strip()
return top_level
class OriginInfo:
"""Holds origin info information"""
def __init__(self):
self.value = ''
@staticmethod
def to_mods_element(parent_element):
top_level = ET.SubElement(parent_element, 'originInfo')
return top_level
class Place:
"""Holds place information"""
def __init__(self):
self.value = ''
@staticmethod
def to_mods_element(parent_element):
top_level = ET.SubElement(parent_element, 'place')
return top_level
class PlaceTerm:
"""Holds place term information"""
def __init__(self):
self.type = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'placeTerm')
if self.type:
top_level.set('type', self.type.strip())
top_level.text = self.value.strip()
return top_level
class RelatedItem:
"""Holds Releated Item information"""
def __init__(self):
self.type = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'relatedItem')
if self.type:
top_level.set('type', self.type.strip())
return top_level
class TitleInfo:
"""Holds identifier information"""
def __init__(self):
self.value = ''
@staticmethod
def to_mods_element(parent_element):
top_level = ET.SubElement(parent_element, 'titleInfo')
return top_level
class Title:
"""Holds title information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'title')
top_level.text = self.value.strip()
return top_level
class Location:
"""Holds identifier information"""
def __init__(self):
self.value = ''
@staticmethod
def to_mods_element(parent_element):
top_level = ET.SubElement(parent_element, 'location')
return top_level
class PhysicalLocation:
"""Holds physical location information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'physicalLocation')
top_level.text = self.value.strip()
return top_level
class ShelfLocator:
"""Holds shelf locator information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'shelfLocator')
top_level.text = self.value.strip()
return top_level
class AccessCondition:
"""Holds access condition information"""
def __init__(self):
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'accessCondition')
top_level.text = self.value.strip()
return top_level
class Identifier:
"""Holds identifier information"""
def __init__(self):
self.type = ''
self.value = ''
def to_mods_element(self, parent_element):
top_level = ET.SubElement(parent_element, 'identifier')
if self.type:
top_level.set('type', self.type.strip())
top_level.text = self.value.strip()
return top_level
#
# Build the xml file using the above classes
#
def build_xml(row):
print('build xml')
root = ET.Element('mods', {"xmlns:xlink": "http://www.w3.org/1999/xlink",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xmlns": "http://www.loc.gov/mods/v3",
"version": "3.5",
"xsi:schemaLocation": "http://www.loc.gov/mods/v3 http://www.loc.gov/standards/mods/v3/mods-3-5.xsd"})
location_element = None
if row[0]:
job_number = Identifier()
job_number.type = 'job'
job_number.value = row[0]
job_number.to_mods_element(root)
# physical location information
if row[1]:
location = Location()
location_element = location.to_mods_element(root)
physical_location = PhysicalLocation()
physical_location.value = row[1]
physical_location.to_mods_element(location_element)
# shelf location information
if row[2]:
shelf_locator = ShelfLocator()
shelf_locator.value = row[2]
shelf_locator.to_mods_element(location_element)
# title information
if row[3]:
title_info = TitleInfo()
title_info_element = title_info.to_mods_element(root)
title = Title()
title.value = row[3]
title.to_mods_element(title_info_element)
# related item information
if row[4]:
related_item = RelatedItem()
related_item.type = 'host'
related_item_element = related_item.to_mods_element(root)
title_info2 = TitleInfo()
title_info_element2 = title_info2.to_mods_element(related_item_element)
title2 = Title()
title2.value = row[4]
title2.to_mods_element(title_info_element2)
# date created information
if row[5]:
origin_info_element = OriginInfo().to_mods_element(root)
date_created = DateCreated()
date_created.value = row[5]
date_created.encoding = "w3cdtf"
date_created.keyDate = "yes"
date_created.to_mods_element(origin_info_element)
# add date issued so it shows up in dublin core
date_issued = DateIssued()
date_issued.value = row[5]
date_issued.encoding = "w3cdtf"
date_issued.keyDate = "yes"
date_issued.to_mods_element(origin_info_element)
# date approximate
if row[6]:
date_approx_info_element = OriginInfo().to_mods_element(root)
date_approximate = DateCreated()
date_approximate.value = row[6]
date_approximate.encoding = "w3cdtf"
date_approximate.qualifier = "approximate"
date_approximate.to_mods_element(date_approx_info_element)
# date inferred
if row[7]:
date_inferred_info_element = OriginInfo().to_mods_element(root)
date_inferred = DateCreated()
date_inferred.value = row[7]
date_inferred.encoding = "w3cdtf"
date_inferred.qualifier = "inferred"
date_inferred.to_mods_element(date_inferred_info_element)
# date questionable
if row[8]:
date_questionable_info_element = OriginInfo().to_mods_element(root)
date_questionable = DateCreated()
date_questionable.value = row[8]
date_questionable.encoding = "w3cdtf"
date_questionable.qualifier = "questionable"
date_questionable.to_mods_element(date_questionable_info_element)
# begin date
if row[9]:
begin_date_info_element = OriginInfo().to_mods_element(root)
date_begin = DateCreated()
date_begin.value = row[9]
date_begin.encoding = "marc"
date_begin.point = "start"
date_begin.to_mods_element(begin_date_info_element)
# end date
if row[10]:
end_date_info_element = OriginInfo().to_mods_element(root)
end_begin = DateCreated()
end_begin.value = row[10]
end_begin.encoding = "marc"
end_begin.point = "end"
end_begin.to_mods_element(end_date_info_element)
# personal name element (Letter creator)
if row[11]:
name = Name()
name.type = "personal"
name1_element = name.to_mods_element(root)
name_part = NamePart()
name_part.value = row[11]
name_part.to_mods_element(name1_element)
role_element = Role().to_mods_element(name1_element)
# role of the person
if row[12]:
role_term = RoleTerm()
role_term.authority = "marcrelator"
role_term.type = "text"
role_term.value = row[12]
role_term.to_mods_element(role_element)
# abstract information
if row[13]:
abstract = Abstract()
abstract.value = row[13]
abstract.to_mods_element(root)
# geo location information
if row[14]:
geo_location_element = OriginInfo().to_mods_element(root)
place_element = Place().to_mods_element(geo_location_element)
place_term = PlaceTerm()
place_term.type = "text"
place_term.value = row[14]
place_term.to_mods_element(place_element)
# language information
if row[15]:
language_element = Language().to_mods_element(root)
language_term = LanguageTerm()
language_term.type = "text"
language_term.value = row[15]
language_term.to_mods_element(language_element)
# related name (Letter recipient)
if row[16]:
related_name = Name().to_mods_element(root)
related_name_part = NamePart()
related_name_part.value = row[16]
related_name_part.to_mods_element(related_name)
role_element2 = Role().to_mods_element(related_name)
if row[17]:
role_term2 = RoleTerm()
role_term2.authority = "marcrelator"
role_term2.type = "text"
role_term2.value = row[17]
role_term2.to_mods_element(role_element2)
# genre information
if row[18]:
genre = Genre()
genre.authority = "gmgpc"
genre.value = row[18]
genre.to_mods_element(root)
# subject - topic information
if row[19]:
topic_subject_element = Subject().to_mods_element(root)
topic = Topic()
topic.value = row[19]
topic.to_mods_element(topic_subject_element)
# subject - name information
if row[20]:
topic_subject_root_element = Subject().to_mods_element(root)
name_subject = Name()
name_subject.type = "personal"
name_subject_element = name_subject.to_mods_element(topic_subject_root_element)
name_subject_part = NamePart()
name_subject_part.value = row[20]
name_subject_part.to_mods_element(name_subject_element)
# subject - corporation name information
if row[21]:
corp_subject_root_element = Subject().to_mods_element(root)
corp_subject = Name()
corp_subject.type = "corporate"
corp_subject_element = corp_subject.to_mods_element(corp_subject_root_element)
corp_subject_part = NamePart()
corp_subject_part.value = row[21]
corp_subject_part.to_mods_element(corp_subject_element)
# subject geographic information
if row[22]:
geo_subject_root_element = Subject().to_mods_element(root)
geo = Geographic()
geo.value = row[22]
geo.to_mods_element(geo_subject_root_element)
# physical description/form
if row[23] or row[24] or row[25]:
physical_description_element = PhysicalDescription().to_mods_element(root)
if row[23]:
form = Form()
form.authority = "marcform"
form.value = row[23]
form.to_mods_element(physical_description_element)
# media type e.g. image/tiff
if row[24]:
internet_media_type = InternetMediaType()
internet_media_type.value = row[24]
internet_media_type.to_mods_element(physical_description_element)
if row[25]:
page_str = " pages"
if int(row[25]) <= 1:
page_str = " page"
# extent
extent = Extent()
extent.value = row[25] + page_str
extent.to_mods_element(physical_description_element)
# note
if row[28]:
note = Note()
note.value = row[28]
note.to_mods_element(root)
# type of resource
if row[29]:
type_of_resource = TypeOfResource()
type_of_resource.value = row[29]
type_of_resource.to_mods_element(root)
# source and language of cataloging
if row[30] or row[31]:
record_info_element = RecordInfo().to_mods_element(root)
# source information
if row[30]:
record_source = RecordContentSource()
record_source.value = row[30]
record_source.to_mods_element(record_info_element)
if row[31]:
lang_of_cat_element = LanguageOfCataloging().to_mods_element(record_info_element)
record_language = LanguageTerm()
record_language.value = row[31]
record_language.type = "code"
record_language.authority = "iso639-2b"
record_language.to_mods_element(lang_of_cat_element)
# rights access
if row[32]:
access_condition = AccessCondition()
access_condition.value = row[32]
access_condition.to_mods_element(root)
# ET.dump(root)
return root
#
# Create an xml with the data from the xml file
#
def create_xml_file(row, file_name):
print("XML file name will be = " + file_name)
root_node = build_xml(row)
tree = ET.ElementTree(root_node)
if file_name:
tree.write(file_name)
#
# Creates a folder for every page based on the file name - this is a
# requirement of islandora
#
def create_page_structure(pages, base_dir, base_filename, book_dir):
# default format - no leading zeros
page_format = "{0:01d}"
if 9 < pages < 99:
# one leading zero
page_format = "{0:02d}"
elif 99 < pages < 999:
# two leading zeros
page_format = "{0:03d}"
elif 999 < pages < 9999:
# three leading zeros
page_format = "{0:04d}"
for page in range(1, pages):
page_name = page_format.format(page)
filename = base_filename + "_" + str(page) + ".tif"
source_file = os.path.join(base_dir, filename)
if not os.path.isfile(source_file):
print("Could not find file " + source_file)
sys.exit()
else:
page_dir = os.path.join(book_dir, "page-" + page_name)
dest_file = os.path.join(page_dir, "OBJ.tif")
print("source = " + source_file + " dest = " + dest_file)
print("creating directory " + page_dir)
os.mkdir(page_dir)
shutil.copy(source_file, dest_file)
#
# Create the file structure for a book in islandora
#
def create_file_structure(a_counter, row, base_dir, output_dir):
# base file name
base_filename = row[27]
# off by one so increase so it works correctly
pages = int(row[25]) + 1
print("filename = " + base_filename)
assert isinstance(output_dir, object)
book_dir = os.path.join(output_dir, str(a_counter))
print("creating directory " + book_dir)
os.mkdir(book_dir)
my_xml_file = os.path.join(book_dir, "MODS" + ".xml")
create_xml_file(row, a_counter, my_xml_file)
create_page_structure(pages, base_dir, base_filename, book_dir)
#
# Use this to print out the fields of a csv file and allows programmer
# to see the output
#
def print_csv_info(a_file):
with open(a_file, 'r') as csvfile:
file_reader = csv.reader(csvfile)
my_counter = 1
for row in file_reader:
print("************* " + str(my_counter) + " *********************")
my_counter += 1
for x in range(0, 33):
print("row " + str(x) + " = " + row[x])
print("************* DONE - " + str(my_counter) + " *********************")
print("")
# ########################################
# Main Program
# ########################################
# get the csv file input
csv_file = input("Please enter csv file name: ")
if not os.path.isfile(csv_file):
print("Could not find file " + csv_file)
sys.exit()
else:
print("found file ")
test = input("Test csv file (yes) to test: ")
if test.lower() == "yes":
print("testing csv file")
print_csv_info(csv_file)
test_xml = input("Test xml output (yes) to test: ")
if test_xml.lower() == "yes":
output_directory = input("Please enter xml output directory: ")
if not os.path.isdir(output_directory):
print("Directory " + output_directory + " does not exist or is not a directory")
sys.exit()
else:
print("Directory found " + output_directory)
# open the csv and start iterating through the rows
with open(csv_file, 'r') as the_csv_file:
fileReader = csv.reader(the_csv_file)
counter = 1
for a_row in fileReader:
if a_row[25]:
num_pages = int(a_row[25])
if num_pages > 0:
print("processing " + str(num_pages) + " pages")
xml_file = os.path.join(output_directory, "MODS_" + str(counter) + ".xml")
create_xml_file(a_row, counter, xml_file)
else:
print("Skipping row " + str(counter) + " pages found were " + a_row[25])
counter += 1
else:
# base directory of files to import
base_directory = input("Please enter directory of files to import: ")
if not os.path.isdir(base_directory):
print("Directory " + base_directory + " does not exist or is not a directory")
sys.exit()
else:
print("Directory found " + base_directory)
# output directory for processing
output_directory = input("Please enter output directory: ")
if not os.path.isdir(output_directory):
print("Directory " + output_directory + " does not exist or is not a directory")
sys.exit()
else:
print("Directory found " + output_directory)
# open the csv and start iterating through the rows
with open(csv_file, 'r') as the_csv_file:
fileReader = csv.reader(the_csv_file)
counter = 1
for a_row in fileReader:
if a_row[25]:
num_pages = int(a_row[25])
if num_pages > 0:
print("processing " + str(num_pages) + " pages")
create_file_structure(counter, a_row, base_directory, output_directory)
else:
print("Skipping row " + str(counter) + " pages found were " + a_row[25])
counter += 1
|
|
# cli commands
from twisted.python import log, usage
from twisted.internet import defer
from opennsa import constants as cnt, nsa, error
LABEL_MAP = {
'vlan': cnt.ETHERNET_VLAN,
'mpls': cnt.MPLS
}
def _createSTP(stp_arg):
if not ':' in stp_arg:
raise usage.UsageError('No ":" in stp, invalid format (see docs/cli.md)')
if '#' in stp_arg:
stp_desc, label_desc = stp_arg.split('#')
network, port = stp_desc.rsplit(':', 1)
if not '=' in label_desc:
raise usage.UsageError('No "=" in stp label, invalid format (see docs/cli.md)')
label_type, label_value = label_desc.split("=")
label = nsa.Label(LABEL_MAP[label_type],
label_value) # FIXME need good error message if label type doesn't exist
else:
network, port = stp_arg.rsplit(':', 1)
label = None
return nsa.STP(network, port, label)
def _createSTPList(ero):
"""
Take a string of ERO STP and convert to a list of OrderedStpType.
"""
if ero is None:
return None
ero_stps = [_createSTP(stp_spec.strip()) for stp_spec in ero.split(',')]
return ero_stps
def _createP2PS(src, dst, capacity, ero):
src_stp = _createSTP(src)
dst_stp = _createSTP(dst)
ordered_stp = _createSTPList(ero)
return nsa.Point2PointService(src_stp, dst_stp, capacity, cnt.BIDIRECTIONAL, False, ordered_stp, None)
def _handleEvent(event):
notification_type, header, entry = event
if notification_type == 'errorEvent':
log.msg('Error event: %s' % str(entry))
return True
elif notification_type == 'dataPlaneStateChange':
cid, nid, timestamp, dps = entry
active, version, consistent = dps
if active:
log.msg('Connection %s Data plane active, version %i, consistent: %s' % (cid, version, consistent))
return False
else:
log.msg('Connection %s Data plane down, version %i, consistent: %s' % (cid, version, consistent))
return consistent # this means we don't exit on initial partially down, where we are not consistent
else:
log.msg('Unrecognized event %s ' % notification_type)
return False
def _logError(e):
# emit the error to the user
error_type = e.__class__.__name__
log.msg('%s from %s' % (error_type, e.nsaId))
log.msg(' %s' % e)
if e.variables:
log.msg('Variables: %s' % ' '.join([': '.join(tvp) for tvp in e.variables]))
@defer.inlineCallbacks
def discover(client, service_url):
res = yield client.queryNSA(service_url)
print("-- COMMAND RESULT --")
print(res)
print("--")
@defer.inlineCallbacks
def reserveonly(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id):
schedule = nsa.Schedule(start_time, end_time)
service_def = _createP2PS(src, dst, capacity, ero)
crt = nsa.Criteria(0, schedule, service_def)
try:
nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1']
connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection',
crt)
nsi_header.connection_trace = None
sd = criteria.service_def
log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa))
log.msg("Source - Destination: %s - %s" % (sd.source_stp, sd.dest_stp))
except error.NSIError as e:
_logError(e)
@defer.inlineCallbacks
def reserve(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id):
schedule = nsa.Schedule(start_time, end_time)
service_def = _createP2PS(src, dst, capacity, ero)
crt = nsa.Criteria(0, schedule, service_def)
try:
nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1']
connection_id, global_reservation_id, description, criteria = yield client.reserve(nsi_header, connection_id,
global_id, 'Test Connection',
crt)
nsi_header.connection_trace = None
sd = criteria.service_def
log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa))
log.msg("Source - Destination: %s - %s" % (sd.source_stp, sd.dest_stp))
nsi_header.newCorrelationId()
yield client.reserveCommit(nsi_header, connection_id)
log.msg("Reservation committed at %s" % nsi_header.provider_nsa)
except error.NSIError as e:
_logError(e)
@defer.inlineCallbacks
def reserveprovision(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id,
notification_wait):
schedule = nsa.Schedule(start_time, end_time)
service_def = _createP2PS(src, dst, capacity, ero)
crt = nsa.Criteria(0, schedule, service_def)
try:
nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1']
connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection',
crt)
nsi_header.connection_trace = []
sd = criteria.service_def
log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa))
log.msg("Source - Destination: %s - %s" % (sd.source_stp, sd.dest_stp))
nsi_header.newCorrelationId()
yield client.reserveCommit(nsi_header, connection_id)
log.msg("Connection committed at %s" % nsi_header.provider_nsa)
# query
nsi_header.newCorrelationId()
qr = yield client.querySummary(nsi_header, connection_ids=[connection_id])
print('Query result: {}'.format(qr))
# provision
nsi_header.newCorrelationId()
yield client.provision(nsi_header, connection_id)
log.msg('Connection %s provisioned' % connection_id)
while notification_wait:
event = yield client.notifications.get()
exit = _handleEvent(event)
if exit:
break
except error.NSIError as e:
_logError(e)
@defer.inlineCallbacks
def rprt(client, nsi_header, src, dst, start_time, end_time, capacity, ero, connection_id, global_id):
# reserve, provision, release, terminate
schedule = nsa.Schedule(start_time, end_time)
service_def = _createP2PS(src, dst, capacity, ero)
crt = nsa.Criteria(0, schedule, service_def)
try:
nsi_header.connection_trace = [nsi_header.requester_nsa + ':' + '1']
connection_id, _, _, criteria = yield client.reserve(nsi_header, connection_id, global_id, 'Test Connection',
crt)
nsi_header.connection_trace = []
sd = criteria.service_def
log.msg("Connection created and held. Id %s at %s" % (connection_id, nsi_header.provider_nsa))
log.msg("Source - Destination: %s - %s" % (sd.source_stp, sd.dest_stp))
# commit
nsi_header.newCorrelationId()
yield client.reserveCommit(nsi_header, connection_id)
log.msg("Connection committed at %s" % nsi_header.provider_nsa)
# provision
nsi_header.newCorrelationId()
yield client.provision(nsi_header, connection_id)
log.msg('Connection %s provisioned' % connection_id)
# release
nsi_header.newCorrelationId()
yield client.release(nsi_header, connection_id)
log.msg('Connection %s released' % connection_id)
# terminate
nsi_header.newCorrelationId()
yield client.terminate(nsi_header, connection_id)
log.msg('Connection %s terminated' % connection_id)
except error.NSIError as e:
_logError(e)
@defer.inlineCallbacks
def reservecommit(client, nsi_header, connection_id):
try:
yield client.reserveCommit(nsi_header, connection_id)
log.msg("Reservation committed at %s" % nsi_header.provider_nsa)
except error.NSIError as e:
_logError(e)
@defer.inlineCallbacks
def provision(client, nsi_header, connection_id, notification_wait):
try:
yield client.provision(nsi_header, connection_id)
log.msg('Connection %s provisioned' % connection_id)
except error.NSIError as e:
_logError(e)
if notification_wait:
log.msg("Notification wait not added to provision yet")
@defer.inlineCallbacks
def release(client, nsi_header, connection_id, notification_wait):
try:
yield client.release(nsi_header, connection_id)
log.msg('Connection %s released' % connection_id)
except error.NSIError as e:
_logError(e)
if notification_wait:
log.msg("Notification wait not added to release yet")
@defer.inlineCallbacks
def terminate(client, nsi_header, connection_id):
try:
yield client.terminate(nsi_header, connection_id)
log.msg('Connection %s terminated' % connection_id)
except error.NSIError as e:
_logError(e)
def _emitQueryResult(query_result, i='', child=False):
qr = query_result
log.msg('')
log.msg(i + 'Connection %s (%s)' % (qr.connection_id, qr.provider_nsa))
if qr.global_reservation_id:
log.msg(i + 'Global ID %s' % qr.global_reservation_id)
if qr.description:
log.msg(i + 'Description %s' % qr.description)
states = qr.states
dps = states[3]
log.msg(i + 'States %s' % ', '.join(states[0:3]))
log.msg(i + 'Dataplane Active : %s, Version: %s, Consistent %s' % dps)
if qr.criterias:
crit = qr.criterias[0]
if not child:
log.msg(i + 'Start-End %s - %s' % (crit.schedule.start_time, crit.schedule.end_time))
if type(crit.service_def) is nsa.Point2PointService:
sd = crit.service_def
# log.msg(i + 'Source : %s' % sd.source_stp.shortName())
# log.msg(i + 'Destination : %s' % sd.dest_stp.shortName())
log.msg(i + 'Path %s -- %s' % (sd.source_stp.shortName(), sd.dest_stp.shortName()))
if not child: # these should be the same everywhere
log.msg(i + 'Bandwidth %s' % sd.capacity)
log.msg(i + 'Direction %s' % sd.directionality)
if sd.symmetric: # only show symmetric if set
log.msg(i + 'Symmetric %s' % sd.symmetric)
if sd.parameters:
log.msg(i + 'Params %s' % sd.parameters)
else:
log.msg(i + 'Unrecognized service definition: %s' % str(crit.service_def))
for c in crit.children:
_emitQueryResult(c, i + ' ', True)
@defer.inlineCallbacks
def querySummary(client, nsi_header, connection_ids, global_reservation_ids):
try:
qc = yield client.querySummary(nsi_header, connection_ids, global_reservation_ids)
if not qc:
log.msg('No results from query')
defer.returnValue(None)
log.msg('Query results:')
for qr in qc:
_emitQueryResult(qr)
log.msg('')
except error.NSIError as e:
_logError(e)
@defer.inlineCallbacks
def queryRecursive(client, nsi_header, connection_ids, global_reservation_ids):
try:
qc = yield client.queryRecursive(nsi_header, connection_ids, global_reservation_ids)
if not qc:
log.msg('No results from query')
defer.returnValue(None)
log.msg('Query results:')
for qr in qc:
_emitQueryResult(qr)
log.msg('')
except error.NSIError as e:
_logError(e)
|
|
import json, re, struct
import os.path
from tblgen import interpret, Dag, TableGenBits
def dag2expr(dag):
def clean(value):
if isinstance(value, tuple) and len(value) == 2 and value[0] == 'defref':
return value[1]
return value
def sep((name, value)):
if name is None:
return clean(value)
return name
if isinstance(dag, Dag):
return [dag2expr(sep(elem)) for elem in dag.elements]
else:
return dag
if not os.path.exists('insts.td.cache') or os.path.getmtime('insts.td') > os.path.getmtime('insts.td.cache'):
insts = interpret('insts.td').deriving('BaseInst')
ops = []
for name, (bases, data) in insts:
ops.append((name, bases[1], data['Opcode'][1], data['Function'][1] if 'Function' in data else None, data['Disasm'][1], dag2expr(data['Eval'][1])))
with file('insts.td.cache', 'w') as fp:
json.dump(ops, fp)
else:
ops = json.load(file('insts.td.cache'))
toplevel = {}
for name, type, op, funct, dasm, dag in ops:
if funct is None:
assert op not in toplevel
toplevel[op] = name, type, dasm, dag
else:
if op not in toplevel:
toplevel[op] = [type, {}]
toplevel[op][1][funct] = name, type, dasm, dag
def generate(gfunc):
switch = []
for op, body in toplevel.items():
if isinstance(body, list):
type, body = body
subswitch = []
for funct, sub in body.items():
subswitch.append(('case', funct, gfunc(sub)))
if type == 'CFType':
when = ('&', ('>>>', 'inst', 21), 0x1F)
elif type == 'RIType':
when = ('&', ('>>>', 'inst', 16), 0x1F)
else:
when = ('&', 'inst', 0x3F)
switch.append(('case', op, ('switch', when, subswitch)))
else:
switch.append(('case', op, gfunc(body)))
return ('switch', ('>>>', 'inst', 26), switch)
def indent(str, single=True):
if single and '\n' not in str:
return ' %s ' % str
else:
return '\n%s\n' % '\n'.join('\t' + x for x in str.split('\n'))
def output(expr, top=True, emitting=False):
if isinstance(expr, list):
return '\n'.join(output(x, top=top, emitting=emitting) for x in expr)
elif isinstance(expr, int) or isinstance(expr, long):
return '0x%x' % expr
elif isinstance(expr, str) or isinstance(expr, unicode):
if emitting and expr.startswith('$') and not expr.startswith('$_'):
return '" + %s + "' % expr
else:
return expr
op = expr[0]
if op == 'switch':
return 'switch(%s) {%s}' % (output(expr[1], emitting=emitting), indent(output(expr[2], emitting=emitting)))
elif op == 'case':
return 'case %s: {%s\tbreak;\n}' % (output(expr[1], emitting=emitting), indent(output(expr[2], emitting=emitting), single=False))
elif op in ('+', '-', '*', '/', '%', '<<', '>>', '>>>', '&', '|', '^', '==', '!=', '<', '<=', '>', '>='):
return '(%s) %s (%s)' % (output(expr[1], top=False, emitting=emitting), op, output(expr[2], top=False, emitting=emitting))
elif op == '!':
return '!(%s)' % output(expr[1], top=False, emitting=emitting)
elif op == '=':
return '%s %s %s;' % (output(expr[1], top=False, emitting=emitting), op, output(expr[2], top=False, emitting=emitting))
elif op == 'if':
return 'if(%s) {%s} else {%s}' % (output(expr[1], top=False, emitting=emitting), indent(output(expr[2], emitting=emitting), single=False), indent(output(expr[3], emitting=emitting), single=False))
elif op == 'when':
return 'if(%s) {%s}' % (output(expr[1], top=False, emitting=emitting), indent(output(expr[2], emitting=emitting)))
elif op == 'comment':
return '/*%s*/' % indent(output(expr[1], emitting=emitting))
elif op == 'str':
return `str(expr[1])`
elif op == 'index':
return '(%s)[%s]' % (output(expr[1], top=False, emitting=emitting), output(expr[2], top=False, emitting=emitting))
elif op == 'emit':
if emitting:
return output(expr[1], top=False, emitting=True)
else:
return 'emit("%s");' % (output(expr[1], top=True, emitting=True).replace('\n', '\\n').replace('"" + ', '').replace(' + ""', ''))
else:
return '%s(%s)%s' % (op, ', '.join(output(x, top=False, emitting=emitting) for x in expr[1:]), ';' if top else '')
gops = {
'add' : lambda a, b: ('+', a, b),
'sub' : lambda a, b: ('-', a, b),
'and' : lambda a, b: ('>>>', ('&', a, b), 0),
'or' : lambda a, b: ('>>>', ('|', a, b), 0),
'nor' : lambda a, b: ('>>>', ('~', ('|', a, b)), 0),
'xor' : lambda a, b: ('>>>', ('^', a, b), 0),
'mul' : lambda a, b: ('*', a, b), # XXX: This needs to be a 64-bit mul!
'div' : lambda a, b: ('>>>', ('/', a, b), 0),
'mod' : lambda a, b: ('>>>', ('%', a, b), 0),
'shl' : lambda a, b: ('>>>', ('<<', a, b), 0),
'shra' : lambda a, b: ('>>', a, b),
'shrl' : lambda a, b: ('>>>', a, b),
'eq' : lambda a, b: ('==', a, b),
'ge' : lambda a, b: ('>=', a, b),
'gt' : lambda a, b: ('>', a, b),
'le' : lambda a, b: ('<=', a, b),
'lt' : lambda a, b: ('<', a, b),
'neq' : lambda a, b: ('!=', a, b),
}
def cleansexp(sexp):
if isinstance(sexp, list):
return [cleansexp(x) for x in sexp if x != []]
elif isinstance(sexp, tuple):
return tuple([cleansexp(x) for x in sexp if x != []])
else:
return sexp
def find_deps(dag):
if isinstance(dag, str) or isinstance(dag, unicode):
return set([dag])
elif not isinstance(dag, list):
return set()
return reduce(lambda a, b: a|b, map(find_deps, dag[1:])) if len(dag) != 1 else set()
def decoder(code, vars, type, dag):
def decl(name, val):
if name in deps:
vars.append(name)
code.append(('=', name, val))
deps = find_deps(dag)
if type == 'IType' or type == 'RIType':
decl('$rs', ('&', ('>>>', 'inst', 21), 0x1F))
decl('$rt', ('&', ('>>>', 'inst', 16), 0x1F))
decl('$imm', ('&', 'inst', 0xFFFF))
elif type == 'JType':
decl('$imm', ('&', 'inst', 0x3FFFFFF))
elif type == 'RType':
decl('$rs', ('&', ('>>>', 'inst', 21), 0x1F))
decl('$rt', ('&', ('>>>', 'inst', 16), 0x1F))
decl('$rd', ('&', ('>>>', 'inst', 11), 0x1F))
decl('$shamt', ('&', ('>>>', 'inst', 6), 0x1F))
elif type == 'SType':
decl('$code', ('&', ('>>>', 'inst', 6), 0x0FFFFF))
elif type == 'CFType':
decl('$cop', ('&', ('>>>', 'inst', 26), 3))
decl('$rt', ('&', ('>>>', 'inst', 16), 0x1F))
decl('$rd', ('&', ('>>>', 'inst', 11), 0x1F))
decl('$cofun', ('&', 'inst', 0x01FFFFFF))
else:
print 'Unknown instruction type:', type
assert False
def genDisasm((name, type, dasm, dag)):
code = [('comment', name)]
vars = []
decoder(code, vars, type, dag)
def subgen(dag):
if isinstance(dag, str) or isinstance(dag, unicode):
return dag
elif isinstance(dag, int) or isinstance(dag, long):
return dag
elif not isinstance(dag, list):
print 'Fail', `dag`
assert False
op = dag[0]
if op in ('let', 'rlet'):
# Ignore any leading underscore vars
if dag[1].startswith('$_'):
return []
if dag[1] not in vars:
vars.append(dag[1])
return [('=', dag[1], subgen(dag[2]))] + subgen(dag[3])
elif op in ('branch', 'break', 'copfun', 'raise', 'set', 'store', 'syscall'): # Catch toplevel exprs
return []
elif op == 'if':
return list(map(subgen, dag[2:]))
elif op == 'when':
return list(map(subgen, dag[2:]))
elif op in gops:
return gops[op](subgen(dag[1]), subgen(dag[2]))
elif op in ('signext', 'zeroext'):
return (op, dag[1], subgen(dag[2]))
elif op in ('pc', 'hi', 'lo'):
return [op]
elif op == 'pcd':
return ('+', '$pc', 4) # Return the delay slot position
elif op == 'gpr':
return ('gpr', subgen(dag[1]))
elif op == 'copreg':
return ('copreg', subgen(dag[1]), subgen(dag[2]))
elif op == 'copcreg':
return ('copcreg', subgen(dag[1]), subgen(dag[2]))
elif op == 'block':
return list(map(subgen, dag[1:]))
elif op == 'unsigned':
return ('>>>', subgen(dag[1]), 0)
else:
print 'Unknown op:', op
return []
code += cleansexp(subgen(dag))
def format(dasm):
shortest = (len(dasm), None)
for var in vars:
match = re.match(r'^(.*?)' + var.replace('$', '\\$') + '([^a-zA-Z0-9_].*$|$)', dasm)
if match:
match = match.groups()
if len(match[0]) < shortest[0]:
shortest = len(match[0]), var
if shortest[1] is None:
return ('str', dasm)
var = shortest[1]
match = re.match(r'^(.*?)(%?)' + var.replace('$', '\\$') + '([^a-zA-Z0-9_].*$|$)', dasm).groups()
if match[1] == '%':
out = ('regname', var)
else:
out = ('+', ('str', '0x'), ('hexify', var))
if match[0]:
out = ('+', ('str', match[0]), out)
if match[2]:
out = ('+', out, format(match[2]))
return out
code.append(('return', format(dasm)))
return code
debug = False
def dlog(dag, code, pos):
if dag[0] == 'gpr':
name = ('regname', dag[1])
elif dag[0] == 'copreg':
name = '+', ('+', ('+', ('str', 'cop'), dag[1]), ('str', ' reg ')), dag[2]
elif dag[0] == 'copcreg':
name = '+', ('+', ('+', ('str', 'cop'), dag[1]), ('str', ' control reg ')), dag[2]
elif dag[0] in ('hi', 'lo', 'pc'):
name = dag[0]
elif dag[0] == 'store':
name = '>>>', dag[1], 0
else:
print 'Unknown dag to dlog:', dag
return ('phex32', name, ('str', pos + ':'), code, ('str', 'uint:'), ('>>>', code, 0))
def genInterp((name, type, dasm, dag)):
code = [('comment', name)]
vars = []
decoder(code, vars, type, dag)
def subgen(dag):
if isinstance(dag, str) or isinstance(dag, unicode):
return dag
elif isinstance(dag, int) or isinstance(dag, long):
return dag
elif not isinstance(dag, list):
print 'Fail', dag
assert False
op = dag[0]
if op in ('let', 'rlet'):
if dag[1] not in vars:
vars.append(dag[1])
return [('=', dag[1], subgen(dag[2]))] + subgen(['block'] + dag[3:])
elif op == 'set':
left = dag[1]
if left[0] == 'copreg':
ret = [('state.copreg', subgen(left[1]), subgen(left[2]), subgen(dag[2]))]
if debug:
val = 'state.copreg', subgen(left[1]), subgen(left[2])
tdag = 'copreg', subgen(left[1]), subgen(left[2])
ret = [dlog(tdag, val, 'before')] + ret + [dlog(tdag, val, 'after')]
elif left[0] == 'copcreg':
ret = [('state.copcreg', subgen(left[1]), subgen(left[2]), subgen(dag[2]))]
if debug:
val = 'state.copcreg', subgen(left[1]), subgen(left[2])
tdag = 'copcreg', subgen(left[1]), subgen(left[2])
ret = [dlog(tdag, val, 'before')] + ret + [dlog(tdag, val, 'after')]
else:
leftjs = subgen(left)
ret = [('=', leftjs, subgen(dag[2]))]
if debug:
ret = [dlog(left, leftjs, 'before')] + ret + [dlog(left, leftjs, 'after')]
if left[0] == 'gpr':
ret = [('when', ('!=', left[1], 0), ret)]
return ret
elif op == 'if':
return [('if', subgen(dag[1]), subgen(dag[2]), subgen(dag[3]))]
elif op == 'when':
return [('when', subgen(dag[1]), subgen(dag[2]))]
elif op in gops:
return gops[op](subgen(dag[1]), subgen(dag[2]))
elif op in ('signext', 'zeroext'):
return (op, dag[1], subgen(dag[2]))
elif op == 'pc':
return ['$pc']
elif op in ('hi', 'lo'):
return ['state.' + op]
elif op == 'pcd':
return [('+', '$pc', 4)] # Return the delay slot position
elif op == 'gpr':
return ('index', 'state.regs', subgen(dag[1]))
elif op == 'copreg':
return ('state.copreg', subgen(dag[1]), subgen(dag[2]))
elif op == 'copcreg':
return ('state.copcreg', subgen(dag[1]), subgen(dag[2]))
elif op == 'block':
return list(map(subgen, dag[1:]))
elif op == 'unsigned':
return ('>>>', subgen(dag[1]), 0)
elif op == 'signed':
return ('|', subgen(dag[1]), 0)
elif op == 'overflow':
return [('overflow', subgen(dag[1]))]
elif op == 'raise':
return [('state.raise', dag[1])]
elif op == 'break':
return [('state.break_', dag[1])]
elif op == 'syscall':
return [('state.syscall', dag[1])]
elif op == 'branch':
return [('state.branch', subgen(dag[1]))]
elif op == 'load':
return [('state.mem.uint%i' % dag[1], subgen(dag[2]))]
elif op == 'store':
ret = [('state.mem.uint%i' % dag[1], subgen(dag[2]), subgen(dag[3]))]
if debug:
addr = subgen(dag[2])
val = 'state.mem.uint%i' % dag[1], addr
tdag = 'store', addr
ret = [dlog(tdag, val, 'before')] + ret + [dlog(tdag, val, 'after')]
return ret
elif op == 'copfun':
return [('state.copfun', subgen(dag[1]), subgen(dag[2]))]
else:
print 'Unknown op:', op
return []
code += cleansexp(subgen(dag))
code.append(('return', 'true'))
return code
def genDecomp((name, type, dasm, dag)):
code = [('comment', name), ('emit', ('=', 'state.pc', '$pc'))]
vars = []
decoder(code, vars, type, dag)
has_branch = [False]
def subgen(dag):
if isinstance(dag, str) or isinstance(dag, unicode):
return dag
elif isinstance(dag, int) or isinstance(dag, long):
return dag
elif not isinstance(dag, list):
print 'Fail', dag
assert False
op = dag[0]
if op in ('let', 'rlet'):
if dag[1] not in vars:
vars.append(dag[1])
ret = [('=', dag[1], subgen(dag[2]))] + subgen(['block'] + dag[3:])
if op == 'rlet':
return [('emit', ret)]
else:
return ret
elif op == 'set':
left = dag[1]
if left[0] == 'copreg':
return [('emit', ('state.copreg', subgen(left[1]), subgen(left[2]), subgen(dag[2])))]
elif left[0] == 'copcreg':
return [('emit', ('state.copcreg', subgen(left[1]), subgen(left[2]), subgen(dag[2])))]
else:
leftjs = subgen(left)
ret = [('emit', ('=', leftjs, subgen(dag[2])))]
if left[0] == 'gpr':
ret = [('when', ('!=', left[1], 0), ret)]
return ret
# XXX: Conditionals should detect if they can happen at decompile-time
elif op == 'if':
return [('emit', ('if', subgen(dag[1]), subgen(dag[2]), subgen(dag[3])))]
elif op == 'when':
return [('emit', ('when', subgen(dag[1]), subgen(dag[2])))]
elif op in gops:
return gops[op](subgen(dag[1]), subgen(dag[2]))
elif op in ('signext', 'zeroext'):
return (op, dag[1], subgen(dag[2]))
elif op == 'pc':
return ['$pc']
elif op in ('hi', 'lo'):
return ['state.' + op]
elif op == 'pcd':
return [('+', '$pc', 4)] # Return the delay slot position
elif op == 'gpr':
return ('index', 'state.regs', subgen(dag[1]))
elif op == 'copreg':
return ('state.copreg', subgen(dag[1]), subgen(dag[2]))
elif op == 'copcreg':
return ('state.copcreg', subgen(dag[1]), subgen(dag[2]))
elif op == 'block':
return list(map(subgen, dag[1:]))
elif op == 'unsigned':
return ('>>>', subgen(dag[1]), 0)
elif op == 'signed':
return ('|', subgen(dag[1]), 0)
elif op == 'overflow':
return [('overflow', subgen(dag[1]))]
elif op == 'raise':
return [('emit', ('state.raise', dag[1]))]
elif op == 'break':
return [('emit', ('state.break_', dag[1]))]
elif op == 'syscall':
return [('emit', ('state.syscall', dag[1]))]
elif op == 'branch':
has_branch[0] = True
return [('emit', ('state.branch', subgen(dag[1]), 'true'))]
elif op == 'load':
return [('state.mem.uint%i' % dag[1], subgen(dag[2]))]
elif op == 'store':
return [('emit', ('state.mem.uint%i' % dag[1], subgen(dag[2]), subgen(dag[3])))]
elif op == 'copfun':
return [('emit', ('state.copfun', subgen(dag[1]), subgen(dag[2])))]
else:
print 'Unknown op:', op
return []
code += cleansexp(subgen(dag))
if has_branch[0]:
code.append(('branch', ))
code.append(('return', 'true'))
return code
def build():
print 'Rebuilding from tables'
with file('scripts/disasm.js', 'w') as fp:
print >>fp, '/* Autogenerated from insts.td. DO NOT EDIT */'
print >>fp, 'function disassemble($pc, inst) {%s\treturn "Unknown instruction. Op=0b" + ((inst >>> 26).toString(2).zeropad(6)) + " (Funct=0b" + ((inst & 0x3f).toString(2).zeropad(6)) + ", Cofunct=0b" + ((inst >>> 0x15) & 0x1f).toString(2).zeropad(5) + ")";\n}' % indent(output(generate(genDisasm)))
with file('scripts/interp.js', 'w') as fp:
print >>fp, '/* Autogenerated from insts.td. DO NOT EDIT */'
print >>fp, 'function interpret($pc, inst, state) {%s\treturn false;\n}' % indent(output(generate(genInterp)))
with file('scripts/decomp.js', 'w') as fp:
print >>fp, '/* Autogenerated from insts.td. DO NOT EDIT */'
print >>fp, 'function decompile($pc, inst, emit, branch) {%s\treturn false;\n}' % indent(output(generate(genDecomp)))
if __name__=='__main__':
build()
|
|
from typing import List, Iterator, Dict, Tuple, Any, Type, Union, Optional
import logging
from os import PathLike
import json
import re
from contextlib import contextmanager
import numpy
import torch
from torch.utils.hooks import RemovableHandle
from torch import Tensor
from torch import backends
from allennlp.common import Registrable, plugins
from allennlp.common.util import JsonDict, sanitize
from allennlp.data import DatasetReader, Instance
from allennlp.data.batch import Batch
from allennlp.models import Model
from allennlp.models.archival import Archive, load_archive
from allennlp.nn import util
logger = logging.getLogger(__name__)
class Predictor(Registrable):
"""
a `Predictor` is a thin wrapper around an AllenNLP model that handles JSON -> JSON predictions
that can be used for serving models through the web API or making predictions in bulk.
"""
def __init__(self, model: Model, dataset_reader: DatasetReader, frozen: bool = True) -> None:
if frozen:
model.eval()
self._model = model
self._dataset_reader = dataset_reader
self.cuda_device = next(self._model.named_parameters())[1].get_device()
self._token_offsets: List[Tensor] = []
def load_line(self, line: str) -> JsonDict:
"""
If your inputs are not in JSON-lines format (e.g. you have a CSV)
you can override this function to parse them correctly.
"""
return json.loads(line)
def dump_line(self, outputs: JsonDict) -> str:
"""
If you don't want your outputs in JSON-lines format
you can override this function to output them differently.
"""
return json.dumps(outputs) + "\n"
def predict_json(self, inputs: JsonDict) -> JsonDict:
instance = self._json_to_instance(inputs)
return self.predict_instance(instance)
def json_to_labeled_instances(self, inputs: JsonDict) -> List[Instance]:
"""
Converts incoming json to a [`Instance`](../data/instance.md),
runs the model on the newly created instance, and adds labels to the
`Instance`s given by the model's output.
# Returns
`List[instance]`
A list of `Instance`'s.
"""
instance = self._json_to_instance(inputs)
self._dataset_reader.apply_token_indexers(instance)
outputs = self._model.forward_on_instance(instance)
new_instances = self.predictions_to_labeled_instances(instance, outputs)
return new_instances
def get_gradients(self, instances: List[Instance]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Gets the gradients of the loss with respect to the model inputs.
# Parameters
instances : `List[Instance]`
# Returns
`Tuple[Dict[str, Any], Dict[str, Any]]`
The first item is a Dict of gradient entries for each input.
The keys have the form `{grad_input_1: ..., grad_input_2: ... }`
up to the number of inputs given. The second item is the model's output.
# Notes
Takes a `JsonDict` representing the inputs of the model and converts
them to [`Instances`](../data/instance.md)), sends these through
the model [`forward`](../models/model.md#forward) function after registering hooks on the embedding
layer of the model. Calls `backward` on the loss and then removes the
hooks.
"""
# set requires_grad to true for all parameters, but save original values to
# restore them later
original_param_name_to_requires_grad_dict = {}
for param_name, param in self._model.named_parameters():
original_param_name_to_requires_grad_dict[param_name] = param.requires_grad
param.requires_grad = True
embedding_gradients: List[Tensor] = []
hooks: List[RemovableHandle] = self._register_embedding_gradient_hooks(embedding_gradients)
for instance in instances:
self._dataset_reader.apply_token_indexers(instance)
dataset = Batch(instances)
dataset.index_instances(self._model.vocab)
dataset_tensor_dict = util.move_to_device(dataset.as_tensor_dict(), self.cuda_device)
# To bypass "RuntimeError: cudnn RNN backward can only be called in training mode"
with backends.cudnn.flags(enabled=False):
outputs = self._model.make_output_human_readable(
self._model.forward(**dataset_tensor_dict) # type: ignore
)
loss = outputs["loss"]
# Zero gradients.
# NOTE: this is actually more efficient than calling `self._model.zero_grad()`
# because it avoids a read op when the gradients are first updated below.
for p in self._model.parameters():
p.grad = None
loss.backward()
for hook in hooks:
hook.remove()
grad_dict = dict()
for idx, grad in enumerate(embedding_gradients):
key = "grad_input_" + str(idx + 1)
grad_dict[key] = grad.detach().cpu().numpy()
# restore the original requires_grad values of the parameters
for param_name, param in self._model.named_parameters():
param.requires_grad = original_param_name_to_requires_grad_dict[param_name]
return grad_dict, outputs
def get_interpretable_layer(self) -> torch.nn.Module:
"""
Returns the input/embedding layer of the model.
If the predictor wraps around a non-AllenNLP model,
this function should be overridden to specify the correct input/embedding layer.
For the cases where the input layer _is_ an embedding layer, this should be the
layer 0 of the embedder.
"""
try:
return util.find_embedding_layer(self._model)
except RuntimeError:
raise RuntimeError(
"If the model does not use `TextFieldEmbedder`, please override "
"`get_interpretable_layer` in your predictor to specify the embedding layer."
)
def get_interpretable_text_field_embedder(self) -> torch.nn.Module:
"""
Returns the first `TextFieldEmbedder` of the model.
If the predictor wraps around a non-AllenNLP model,
this function should be overridden to specify the correct embedder.
"""
try:
return util.find_text_field_embedder(self._model)
except RuntimeError:
raise RuntimeError(
"If the model does not use `TextFieldEmbedder`, please override "
"`get_interpretable_text_field_embedder` in your predictor to specify "
"the embedding layer."
)
def _register_embedding_gradient_hooks(self, embedding_gradients):
"""
Registers a backward hook on the embedding layer of the model. Used to save the gradients
of the embeddings for use in get_gradients()
When there are multiple inputs (e.g., a passage and question), the hook
will be called multiple times. We append all the embeddings gradients
to a list.
We additionally add a hook on the _forward_ pass of the model's `TextFieldEmbedder` to save
token offsets, if there are any. Having token offsets means that you're using a mismatched
token indexer, so we need to aggregate the gradients across wordpieces in a token. We do
that with a simple sum.
"""
def hook_layers(module, grad_in, grad_out):
grads = grad_out[0]
if self._token_offsets:
# If you have a mismatched indexer with multiple TextFields, it's quite possible
# that the order we deal with the gradients is wrong. We'll just take items from
# the list one at a time, and try to aggregate the gradients. If we got the order
# wrong, we should crash, so you'll know about it. If you get an error because of
# that, open an issue on github, and we'll see what we can do. The intersection of
# multiple TextFields and mismatched indexers is pretty small (currently empty, that
# I know of), so we'll ignore this corner case until it's needed.
offsets = self._token_offsets.pop(0)
span_grads, span_mask = util.batched_span_select(grads.contiguous(), offsets)
span_mask = span_mask.unsqueeze(-1)
span_grads *= span_mask # zero out paddings
span_grads_sum = span_grads.sum(2)
span_grads_len = span_mask.sum(2)
# Shape: (batch_size, num_orig_tokens, embedding_size)
grads = span_grads_sum / torch.clamp_min(span_grads_len, 1)
# All the places where the span length is zero, write in zeros.
grads[(span_grads_len == 0).expand(grads.shape)] = 0
embedding_gradients.append(grads)
def get_token_offsets(module, inputs, outputs):
offsets = util.get_token_offsets_from_text_field_inputs(inputs)
if offsets is not None:
self._token_offsets.append(offsets)
hooks = []
text_field_embedder = self.get_interpretable_text_field_embedder()
hooks.append(text_field_embedder.register_forward_hook(get_token_offsets))
embedding_layer = self.get_interpretable_layer()
hooks.append(embedding_layer.register_backward_hook(hook_layers))
return hooks
@contextmanager
def capture_model_internals(self, module_regex: str = ".*") -> Iterator[dict]:
"""
Context manager that captures the internal-module outputs of
this predictor's model. The idea is that you could use it as follows:
```
with predictor.capture_model_internals() as internals:
outputs = predictor.predict_json(inputs)
return {**outputs, "model_internals": internals}
```
"""
results = {}
hooks = []
# First we'll register hooks to add the outputs of each module to the results dict.
def add_output(idx: int):
def _add_output(mod, _, outputs):
results[idx] = {"name": str(mod), "output": sanitize(outputs)}
return _add_output
regex = re.compile(module_regex)
for idx, (name, module) in enumerate(self._model.named_modules()):
if regex.fullmatch(name) and module != self._model:
hook = module.register_forward_hook(add_output(idx))
hooks.append(hook)
# If you capture the return value of the context manager, you get the results dict.
yield results
# And then when you exit the context we remove all the hooks.
for hook in hooks:
hook.remove()
def predict_instance(self, instance: Instance) -> JsonDict:
self._dataset_reader.apply_token_indexers(instance)
outputs = self._model.forward_on_instance(instance)
return sanitize(outputs)
def predictions_to_labeled_instances(
self, instance: Instance, outputs: Dict[str, numpy.ndarray]
) -> List[Instance]:
"""
This function takes a model's outputs for an Instance, and it labels that instance according
to the `outputs`. This function is used to (1) compute gradients of what the model predicted;
(2) label the instance for the attack. For example, (a) for the untargeted attack for classification
this function labels the instance according to the class with the highest probability; (b) for
targeted attack, it directly constructs fields from the given target.
The return type is a list because in some tasks there are multiple predictions in the output
(e.g., in NER a model predicts multiple spans). In this case, each instance in the returned list of
Instances contains an individual entity prediction as the label.
"""
raise RuntimeError("implement this method for model interpretations or attacks")
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Converts a JSON object into an [`Instance`](../data/instance.md)
and a `JsonDict` of information which the `Predictor` should pass through,
such as tokenized inputs.
"""
raise NotImplementedError
def predict_batch_json(self, inputs: List[JsonDict]) -> List[JsonDict]:
instances = self._batch_json_to_instances(inputs)
return self.predict_batch_instance(instances)
def predict_batch_instance(self, instances: List[Instance]) -> List[JsonDict]:
for instance in instances:
self._dataset_reader.apply_token_indexers(instance)
outputs = self._model.forward_on_instances(instances)
return sanitize(outputs)
def _batch_json_to_instances(self, json_dicts: List[JsonDict]) -> List[Instance]:
"""
Converts a list of JSON objects into a list of `Instance`s.
By default, this expects that a "batch" consists of a list of JSON blobs which would
individually be predicted by `predict_json`. In order to use this method for
batch prediction, `_json_to_instance` should be implemented by the subclass, or
if the instances have some dependency on each other, this method should be overridden
directly.
"""
instances = []
for json_dict in json_dicts:
instances.append(self._json_to_instance(json_dict))
return instances
@classmethod
def from_path(
cls,
archive_path: Union[str, PathLike],
predictor_name: str = None,
cuda_device: int = -1,
dataset_reader_to_load: str = "validation",
frozen: bool = True,
import_plugins: bool = True,
overrides: Union[str, Dict[str, Any]] = "",
**kwargs,
) -> "Predictor":
"""
Instantiate a `Predictor` from an archive path.
If you need more detailed configuration options, such as overrides,
please use `from_archive`.
# Parameters
archive_path : `Union[str, PathLike]`
The path to the archive.
predictor_name : `str`, optional (default=`None`)
Name that the predictor is registered as, or None to use the
predictor associated with the model.
cuda_device : `int`, optional (default=`-1`)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
dataset_reader_to_load : `str`, optional (default=`"validation"`)
Which dataset reader to load from the archive, either "train" or
"validation".
frozen : `bool`, optional (default=`True`)
If we should call `model.eval()` when building the predictor.
import_plugins : `bool`, optional (default=`True`)
If `True`, we attempt to import plugins before loading the predictor.
This comes with additional overhead, but means you don't need to explicitly
import the modules that your predictor depends on as long as those modules
can be found by `allennlp.common.plugins.import_plugins()`.
overrides : `Union[str, Dict[str, Any]]`, optional (default = `""`)
JSON overrides to apply to the unarchived `Params` object.
**kwargs : `Any`
Additional key-word arguments that will be passed to the `Predictor`'s
`__init__()` method.
# Returns
`Predictor`
A Predictor instance.
"""
if import_plugins:
plugins.import_plugins()
return Predictor.from_archive(
load_archive(archive_path, cuda_device=cuda_device, overrides=overrides),
predictor_name,
dataset_reader_to_load=dataset_reader_to_load,
frozen=frozen,
extra_args=kwargs,
)
@classmethod
def from_archive(
cls,
archive: Archive,
predictor_name: str = None,
dataset_reader_to_load: str = "validation",
frozen: bool = True,
extra_args: Optional[Dict[str, Any]] = None,
) -> "Predictor":
"""
Instantiate a `Predictor` from an [`Archive`](../models/archival.md);
that is, from the result of training a model. Optionally specify which `Predictor`
subclass; otherwise, we try to find a corresponding predictor in `DEFAULT_PREDICTORS`, or if
one is not found, the base class (i.e. `Predictor`) will be used. Optionally specify
which [`DatasetReader`](../data/dataset_readers/dataset_reader.md) should be loaded;
otherwise, the validation one will be used if it exists followed by the training dataset reader.
Optionally specify if the loaded model should be frozen, meaning `model.eval()` will be called.
"""
# Duplicate the config so that the config inside the archive doesn't get consumed
config = archive.config.duplicate()
if not predictor_name:
model_type = config.get("model").get("type")
model_class, _ = Model.resolve_class_name(model_type)
predictor_name = model_class.default_predictor
predictor_class: Type[Predictor] = (
Predictor.by_name(predictor_name) if predictor_name is not None else cls # type: ignore
)
if dataset_reader_to_load == "validation":
dataset_reader = archive.validation_dataset_reader
else:
dataset_reader = archive.dataset_reader
model = archive.model
if frozen:
model.eval()
if extra_args is None:
extra_args = {}
return predictor_class(model, dataset_reader, **extra_args)
|
|
from twisted.python import failure, reflect, log as twlog
from twisted.internet import defer
from foolscap import copyable, slicer, tokens
from foolscap.copyable import AttributeDictConstraint
from foolscap.constraint import ByteStringConstraint
from foolscap.slicers.list import ListConstraint
from tokens import BananaError, Violation
from foolscap.util import AsyncAND
from foolscap.logging import log
def wrap_remote_failure(f):
return failure.Failure(tokens.RemoteException(f))
class FailureConstraint(AttributeDictConstraint):
opentypes = [("copyable", "twisted.python.failure.Failure")]
name = "FailureConstraint"
klass = failure.Failure
def __init__(self):
attrs = [('type', ByteStringConstraint(200)),
('value', ByteStringConstraint(1000)),
('traceback', ByteStringConstraint(2000)),
('parents', ListConstraint(ByteStringConstraint(200))),
]
AttributeDictConstraint.__init__(self, *attrs)
def checkObject(self, obj, inbound):
if not isinstance(obj, self.klass):
raise Violation("is not an instance of %s" % self.klass)
class PendingRequest(object):
# this object is a local representation of a message we have sent to
# someone else, that will be executed on their end.
active = True
def __init__(self, reqID, rref, interface_name, method_name):
self.reqID = reqID
self.rref = rref # keep it alive
self.broker = None # if set, the broker knows about us
self.deferred = defer.Deferred()
self.constraint = None # this constrains the results
self.failure = None
self.interface_name = interface_name # for error messages
self.method_name = method_name # same
def setConstraint(self, constraint):
self.constraint = constraint
def getMethodNameInfo(self):
return (self.interface_name, self.method_name)
def complete(self, res):
if self.broker:
self.broker.removeRequest(self)
if self.active:
self.active = False
self.deferred.callback(res)
else:
log.msg("PendingRequest.complete called on an inactive request")
def fail(self, why):
if self.active:
if self.broker:
self.broker.removeRequest(self)
self.active = False
self.failure = why
if (self.broker and
self.broker.tub and
self.broker.tub.logRemoteFailures):
my_short_tubid = "??"
if self.broker.tub: # for tests
my_short_tubid = self.broker.tub.getShortTubID()
their_short_tubid = self.broker.remote_tubref.getShortTubID()
lp = log.msg("an outbound callRemote (that we [%s] sent to "
"someone else [%s]) failed on the far end"
% (my_short_tubid, their_short_tubid),
level=log.UNUSUAL)
methname = ".".join([self.interfaceName or "?",
self.methodName or "?"])
log.msg(" reqID=%d, rref=%s, methname=%s"
% (self.reqID, self.rref, methname),
level=log.NOISY, parent=lp)
#stack = why.getTraceback()
# TODO: include the first few letters of the remote tubID in
# this REMOTE tag
#stack = "REMOTE: " + stack.replace("\n", "\nREMOTE: ")
log.msg(" the REMOTE failure was:", failure=why,
level=log.NOISY, parent=lp)
#log.msg(stack, level=log.NOISY, parent=lp)
self.deferred.errback(why)
else:
log.msg("WEIRD: fail() on an inactive request", traceback=True)
if self.failure:
log.msg("multiple failures")
log.msg("first one was:", self.failure)
log.msg("this one was:", why)
log.err("multiple failures indicate a problem")
class ArgumentSlicer(slicer.ScopedSlicer):
opentype = ('arguments',)
def __init__(self, args, kwargs, methodname="?"):
slicer.ScopedSlicer.__init__(self, None)
self.args = args
self.kwargs = kwargs
self.which = ""
self.methodname = methodname
def sliceBody(self, streamable, banana):
yield len(self.args)
for i,arg in enumerate(self.args):
self.which = "arg[%d]-of-%s" % (i, self.methodname)
yield arg
keys = self.kwargs.keys()
keys.sort()
for argname in keys:
self.which = "arg[%s]-of-%s" % (argname, self.methodname)
yield argname
yield self.kwargs[argname]
def describe(self):
return "<%s>" % self.which
class CallSlicer(slicer.ScopedSlicer):
opentype = ('call',)
def __init__(self, reqID, clid, methodname, args, kwargs):
slicer.ScopedSlicer.__init__(self, None)
self.reqID = reqID
self.clid = clid
self.methodname = methodname
self.args = args
self.kwargs = kwargs
def sliceBody(self, streamable, banana):
yield self.reqID
yield self.clid
yield self.methodname
yield ArgumentSlicer(self.args, self.kwargs, self.methodname)
def describe(self):
return "<call-%s-%s-%s>" % (self.reqID, self.clid, self.methodname)
class InboundDelivery(object):
"""An inbound message that has not yet been delivered.
This is created when a 'call' sequence has finished being received. The
Broker will add it to a queue. The delivery at the head of the queue is
serviced when all of its arguments have been resolved.
The only way that the arguments might not all be available is if one of
the Unslicers which created them has provided a 'ready_deferred' along
with the prospective object. The only standard Unslicer which does this
is the TheirReferenceUnslicer, which handles introductions. (custom
Unslicers might also provide a ready_deferred, for example a URL
slicer/unslicer pair for which the receiving end fetches the target of
the URL as its value, or a UnixFD slicer/unslicer that had to wait for a
side-channel unix-domain socket to finish transferring control over the
FD to the recipient before being ready).
Most Unslicers refuse to accept unready objects as their children (most
implementations of receiveChild() do 'assert ready_deferred is None').
The CallUnslicer is fairly unique in not rejecting such objects.
We do require, however, that all of the arguments be at least
referenceable. This is not generally a problem: the only time an
unslicer's receiveChild() can get a non-referenceable object (represented
by a Deferred) is if that unslicer is participating in a reference cycle
that has not yet completed, and CallUnslicers only live at the top level,
above any cycles.
"""
def __init__(self, broker, reqID, obj,
interface, methodname, methodSchema,
allargs):
self.broker = broker
self.reqID = reqID
self.obj = obj
self.interface = interface
self.methodname = methodname
self.methodSchema = methodSchema
self.allargs = allargs
def logFailure(self, f):
# called if tub.logLocalFailures is True
my_short_tubid = "??"
if self.broker.tub: # for tests
my_short_tubid = self.broker.tub.getShortTubID()
their_short_tubid = "<???>"
if self.broker.remote_tubref:
their_short_tubid = self.broker.remote_tubref.getShortTubID()
lp = log.msg("an inbound callRemote that we [%s] executed (on behalf "
"of someone else, TubID %s) failed"
% (my_short_tubid, their_short_tubid),
level=log.UNUSUAL)
if self.interface:
methname = self.interface.getName() + "." + self.methodname
else:
methname = self.methodname
log.msg(" reqID=%d, rref=%s, methname=%s" %
(self.reqID, self.obj, methname),
level=log.NOISY, parent=lp)
log.msg(" args=%s" % (self.allargs.args,), level=log.NOISY, parent=lp)
log.msg(" kwargs=%s" % (self.allargs.kwargs,),
level=log.NOISY, parent=lp)
#if isinstance(f.type, str):
# stack = "getTraceback() not available for string exceptions\n"
#else:
# stack = f.getTraceback()
# TODO: trim stack to everything below Broker._doCall
#stack = "LOCAL: " + stack.replace("\n", "\nLOCAL: ")
log.msg(" the LOCAL failure was:", failure=f,
level=log.NOISY, parent=lp)
#log.msg(stack, level=log.NOISY, parent=lp)
class ArgumentUnslicer(slicer.ScopedUnslicer):
methodSchema = None
debug = False
def setConstraint(self, methodSchema):
self.methodSchema = methodSchema
def start(self, count):
if self.debug:
log.msg("%s.start: %s" % (self, count))
self.numargs = None
self.args = []
self.kwargs = {}
self.argname = None
self.argConstraint = None
self.num_unreferenceable_children = 0
self._all_children_are_referenceable_d = None
self._ready_deferreds = []
self.closed = False
def checkToken(self, typebyte, size):
if self.numargs is None:
# waiting for positional-arg count
if typebyte != tokens.INT:
raise BananaError("posarg count must be an INT")
return
if len(self.args) < self.numargs:
# waiting for a positional arg
if self.argConstraint:
self.argConstraint.checkToken(typebyte, size)
return
if self.argname is None:
# waiting for the name of a keyword arg
if typebyte not in (tokens.STRING, tokens.VOCAB):
raise BananaError("kwarg name must be a STRING")
# TODO: limit to longest argument name of the method?
return
# waiting for the value of a kwarg
if self.argConstraint:
self.argConstraint.checkToken(typebyte, size)
def doOpen(self, opentype):
if self.argConstraint:
self.argConstraint.checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
if self.argConstraint:
unslicer.setConstraint(self.argConstraint)
return unslicer
def receiveChild(self, token, ready_deferred=None):
if self.debug:
log.msg("%s.receiveChild: %s %s %s %s %s args=%s kwargs=%s" %
(self, self.closed, self.num_unreferenceable_children,
len(self._ready_deferreds), token, ready_deferred,
self.args, self.kwargs))
if self.numargs is None:
# this token is the number of positional arguments
assert isinstance(token, int)
assert ready_deferred is None
self.numargs = token
if self.numargs:
ms = self.methodSchema
if ms:
accept, self.argConstraint = \
ms.getPositionalArgConstraint(0)
assert accept
return
if len(self.args) < self.numargs:
# this token is a positional argument
argvalue = token
argpos = len(self.args)
self.args.append(argvalue)
if isinstance(argvalue, defer.Deferred):
# this may occur if the child is a gift which has not
# resolved yet.
self.num_unreferenceable_children += 1
argvalue.addCallback(self.updateChild, argpos)
if ready_deferred:
if self.debug:
log.msg("%s.receiveChild got an unready posarg" % self)
self._ready_deferreds.append(ready_deferred)
if len(self.args) < self.numargs:
# more to come
ms = self.methodSchema
if ms:
nextargnum = len(self.args)
accept, self.argConstraint = \
ms.getPositionalArgConstraint(nextargnum)
assert accept
return
if self.argname is None:
# this token is the name of a keyword argument
assert ready_deferred is None
self.argname = token
# if the argname is invalid, this may raise Violation
ms = self.methodSchema
if ms:
accept, self.argConstraint = \
ms.getKeywordArgConstraint(self.argname,
self.numargs,
self.kwargs.keys())
assert accept
return
# this token is the value of a keyword argument
argvalue = token
self.kwargs[self.argname] = argvalue
if isinstance(argvalue, defer.Deferred):
self.num_unreferenceable_children += 1
argvalue.addCallback(self.updateChild, self.argname)
if ready_deferred:
if self.debug:
log.msg("%s.receiveChild got an unready kwarg" % self)
self._ready_deferreds.append(ready_deferred)
self.argname = None
return
def updateChild(self, obj, which):
# one of our arguments has just now become referenceable. Normal
# types can't trigger this (since the arguments to a method form a
# top-level serialization domain), but special Unslicers might. For
# example, the Gift unslicer will eventually provide us with a
# RemoteReference, but for now all we get is a Deferred as a
# placeholder.
if self.debug:
log.msg("%s.updateChild, [%s] became referenceable: %s" %
(self, which, obj))
if isinstance(which, int):
self.args[which] = obj
else:
self.kwargs[which] = obj
self.num_unreferenceable_children -= 1
if self.num_unreferenceable_children == 0:
if self._all_children_are_referenceable_d:
self._all_children_are_referenceable_d.callback(None)
return obj
def receiveClose(self):
if self.debug:
log.msg("%s.receiveClose: %s %s %s" %
(self, self.closed, self.num_unreferenceable_children,
len(self._ready_deferreds)))
if (self.numargs is None or
len(self.args) < self.numargs or
self.argname is not None):
raise BananaError("'arguments' sequence ended too early")
self.closed = True
dl = []
if self.num_unreferenceable_children:
d = self._all_children_are_referenceable_d = defer.Deferred()
dl.append(d)
dl.extend(self._ready_deferreds)
ready_deferred = None
if dl:
ready_deferred = AsyncAND(dl)
return self, ready_deferred
def describe(self):
s = "<arguments"
if self.numargs is not None:
if len(self.args) < self.numargs:
s += " arg[%d]" % len(self.args)
else:
if self.argname is not None:
s += " arg[%s]" % self.argname
else:
s += " arg[?]"
if self.closed:
s += " closed"
# TODO: it would be nice to indicate if we still have unready
# children
s += ">"
return s
class CallUnslicer(slicer.ScopedUnslicer):
debug = False
def start(self, count):
# start=0:reqID, 1:objID, 2:methodname, 3: arguments
self.stage = 0
self.reqID = None
self.obj = None
self.interface = None
self.methodname = None
self.methodSchema = None # will be a MethodArgumentsConstraint
self._ready_deferreds = []
def checkToken(self, typebyte, size):
# TODO: limit strings by returning a number instead of None
if self.stage == 0:
if typebyte != tokens.INT:
raise BananaError("request ID must be an INT")
elif self.stage == 1:
if typebyte not in (tokens.INT, tokens.NEG):
raise BananaError("object ID must be an INT/NEG")
elif self.stage == 2:
if typebyte not in (tokens.STRING, tokens.VOCAB):
raise BananaError("method name must be a STRING")
# TODO: limit to longest method name of self.obj in the interface
elif self.stage == 3:
if typebyte != tokens.OPEN:
raise BananaError("arguments must be an 'arguments' sequence")
else:
raise BananaError("too many objects given to CallUnslicer")
def doOpen(self, opentype):
# checkToken insures that this can only happen when we're receiving
# an arguments object, so we don't have to bother checking self.stage
assert self.stage == 3
unslicer = self.open(opentype)
if self.methodSchema:
unslicer.setConstraint(self.methodSchema)
return unslicer
def reportViolation(self, f):
# if the Violation is because we received an ABORT, then we know
# that the sender knows there was a problem, so don't respond.
if f.value.args[0] == "ABORT received":
return f
# if the Violation was raised after we know the reqID, we can send
# back an Error.
if self.stage > 0:
self.broker.callFailed(f, self.reqID)
return f # give up our sequence
def receiveChild(self, token, ready_deferred=None):
assert not isinstance(token, defer.Deferred)
if self.debug:
log.msg("%s.receiveChild [s%d]: %s" %
(self, self.stage, repr(token)))
if self.stage == 0: # reqID
# we don't yet know which reqID to send any failure to
assert ready_deferred is None
self.reqID = token
self.stage = 1
if self.reqID != 0:
assert self.reqID not in self.broker.activeLocalCalls
self.broker.activeLocalCalls[self.reqID] = self
return
if self.stage == 1: # objID
# this might raise an exception if objID is invalid
assert ready_deferred is None
self.objID = token
try:
self.obj = self.broker.getMyReferenceByCLID(token)
except KeyError:
raise Violation("unknown CLID %d" % (token,))
#iface = self.broker.getRemoteInterfaceByName(token)
if self.objID < 0:
self.interface = None
else:
self.interface = self.obj.getInterface()
self.stage = 2
return
if self.stage == 2: # methodname
# validate the methodname, get the schema. This may raise an
# exception for unknown methods
# must find the schema, using the interfaces
# TODO: getSchema should probably be in an adapter instead of in
# a pb.Referenceable base class. Old-style (unconstrained)
# flavors.Referenceable should be adapted to something which
# always returns None
# TODO: make this faster. A likely optimization is to take a
# tuple of components.getInterfaces(obj) and use it as a cache
# key. It would be even faster to use obj.__class__, but that
# would probably violate the expectation that instances can
# define their own __implements__ (independently from their
# class). If this expectation were to go away, a quick
# obj.__class__ -> RemoteReferenceSchema cache could be built.
assert ready_deferred is None
self.stage = 3
if self.objID < 0:
# the target is a bound method, ignore the methodname
self.methodSchema = getattr(self.obj, "methodSchema", None)
self.methodname = None # TODO: give it something useful
if self.broker.requireSchema and not self.methodSchema:
why = "This broker does not accept unconstrained " + \
"method calls"
raise Violation(why)
return
self.methodname = token
if self.interface:
# they are calling an interface+method pair
ms = self.interface.get(self.methodname)
if not ms:
why = "method '%s' not defined in %s" % \
(self.methodname, self.interface.__remote_name__)
raise Violation(why)
self.methodSchema = ms
return
if self.stage == 3: # arguments
assert isinstance(token, ArgumentUnslicer)
self.allargs = token
# queue the message. It will not be executed until all the
# arguments are ready. The .args list and .kwargs dict may change
# before then.
if ready_deferred:
self._ready_deferreds.append(ready_deferred)
self.stage = 4
return
def receiveClose(self):
if self.stage != 4:
raise BananaError("'call' sequence ended too early")
# time to create the InboundDelivery object so we can queue it
delivery = InboundDelivery(self.broker, self.reqID, self.obj,
self.interface, self.methodname,
self.methodSchema,
self.allargs)
ready_deferred = None
if self._ready_deferreds:
ready_deferred = AsyncAND(self._ready_deferreds)
return delivery, ready_deferred
def describe(self):
s = "<methodcall"
if self.stage == 0:
pass
if self.stage >= 1:
s += " reqID=%d" % self.reqID
if self.stage >= 2:
s += " obj=%s" % (self.obj,)
ifacename = "[none]"
if self.interface:
ifacename = self.interface.__remote_name__
s += " iface=%s" % ifacename
if self.stage >= 3:
s += " methodname=%s" % self.methodname
s += ">"
return s
class AnswerSlicer(slicer.ScopedSlicer):
opentype = ('answer',)
def __init__(self, reqID, results, methodname="?"):
assert reqID != 0
slicer.ScopedSlicer.__init__(self, None)
self.reqID = reqID
self.results = results
self.methodname = methodname
def sliceBody(self, streamable, banana):
yield self.reqID
yield self.results
def describe(self):
return "<answer-%s-to-%s>" % (self.reqID, self.methodname)
class AnswerUnslicer(slicer.ScopedUnslicer):
request = None
resultConstraint = None
haveResults = False
def start(self, count):
slicer.ScopedUnslicer.start(self, count)
self._ready_deferreds = []
self._child_deferred = None
def checkToken(self, typebyte, size):
if self.request is None:
if typebyte != tokens.INT:
raise BananaError("request ID must be an INT")
elif not self.haveResults:
if self.resultConstraint:
try:
self.resultConstraint.checkToken(typebyte, size)
except Violation, v:
# improve the error message
if v.args:
# this += gives me a TypeError "object doesn't
# support item assignment", which confuses me
#v.args[0] += " in inbound method results"
why = v.args[0] + " in inbound method results"
v.args = why,
else:
v.args = ("in inbound method results",)
raise # this will errback the request
else:
raise BananaError("stop sending me stuff!")
def doOpen(self, opentype):
if self.resultConstraint:
self.resultConstraint.checkOpentype(opentype)
# TODO: improve the error message
unslicer = self.open(opentype)
if unslicer:
if self.resultConstraint:
unslicer.setConstraint(self.resultConstraint)
return unslicer
def receiveChild(self, token, ready_deferred=None):
if self.request == None:
assert not isinstance(token, defer.Deferred)
assert ready_deferred is None
reqID = token
# may raise Violation for bad reqIDs
self.request = self.broker.getRequest(reqID)
self.resultConstraint = self.request.constraint
else:
if isinstance(token, defer.Deferred):
self._child_deferred = token
else:
self._child_deferred = defer.succeed(token)
if ready_deferred:
self._ready_deferreds.append(ready_deferred)
self.haveResults = True
def reportViolation(self, f):
# if the Violation was received after we got the reqID, we can tell
# the broker it was an error
if self.request != None:
self.request.fail(f) # local violation
return f # give up our sequence
def receiveClose(self):
# three things must happen before our request is complete:
# receiveClose has occurred
# the receiveChild object deferred (if any) has fired
# ready_deferred has finished
# If ready_deferred errbacks, provide its failure object to the
# request. If not, provide the request with whatever receiveChild
# got.
if not self._child_deferred:
raise BananaError("Answer didn't include an answer")
if self._ready_deferreds:
d = AsyncAND(self._ready_deferreds)
else:
d = defer.succeed(None)
def _ready(res):
return self._child_deferred
d.addCallback(_ready)
def _done(res):
self.request.complete(res)
def _fail(f):
# we hit here if any of the _ready_deferreds fail (i.e a Gift
# failed to resolve), or if the _child_deferred fails (not sure
# how this could happen). I think it's ok to return a local
# exception (instead of a RemoteException) for both.
self.request.fail(f)
d.addCallbacks(_done, _fail)
return None, None
def describe(self):
if self.request:
return "Answer(req=%s)" % self.request.reqID
return "Answer(req=?)"
class ErrorSlicer(slicer.ScopedSlicer):
opentype = ('error',)
def __init__(self, reqID, f):
slicer.ScopedSlicer.__init__(self, None)
assert isinstance(f, failure.Failure)
self.reqID = reqID
self.f = f
def sliceBody(self, streamable, banana):
yield self.reqID
yield self.f
def describe(self):
return "<error-%s>" % self.reqID
class ErrorUnslicer(slicer.ScopedUnslicer):
request = None
fConstraint = FailureConstraint()
gotFailure = False
def checkToken(self, typebyte, size):
if self.request == None:
if typebyte != tokens.INT:
raise BananaError("request ID must be an INT")
elif not self.gotFailure:
self.fConstraint.checkToken(typebyte, size)
else:
raise BananaError("stop sending me stuff!")
def doOpen(self, opentype):
self.fConstraint.checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
unslicer.setConstraint(self.fConstraint)
return unslicer
def reportViolation(self, f):
# a failure while receiving the failure. A bit daft, really.
if self.request != None:
self.request.fail(f)
return f # give up our sequence
def receiveChild(self, token, ready_deferred=None):
assert not isinstance(token, defer.Deferred)
assert ready_deferred is None
if self.request == None:
reqID = token
# may raise BananaError for bad reqIDs
self.request = self.broker.getRequest(reqID)
else:
self.failure = token
self.gotFailure = True
def receiveClose(self):
f = self.failure
if not self.broker._expose_remote_exception_types:
f = wrap_remote_failure(f)
self.request.fail(f)
return None, None
def describe(self):
if self.request is None:
return "<error-?>"
return "<error-%s>" % self.request.reqID
def truncate(s, limit):
assert limit > 3
if s and len(s) > limit:
s = s[:limit-3] + ".."
return s
# failures are sent as Copyables
class FailureSlicer(slicer.BaseSlicer):
slices = failure.Failure
classname = "twisted.python.failure.Failure"
def slice(self, streamable, banana):
self.streamable = streamable
yield 'copyable'
yield self.classname
state = self.getStateToCopy(self.obj, banana)
for k,v in state.iteritems():
yield k
yield v
def describe(self):
return "<%s>" % self.classname
def getStateToCopy(self, obj, broker):
#state = obj.__dict__.copy()
#state['tb'] = None
#state['frames'] = []
#state['stack'] = []
state = {}
# string exceptions show up as obj.value == None and
# isinstance(obj.type, str). Normal exceptions show up as obj.value
# == text and obj.type == exception class. We need to make sure we
# can handle both.
if isinstance(obj.value, failure.Failure):
# TODO: how can this happen? I got rid of failure2Copyable, so
# if this case is possible, something needs to replace it
raise RuntimeError("not implemented yet")
#state['value'] = failure2Copyable(obj.value, banana.unsafeTracebacks)
elif isinstance(obj.type, str):
state['value'] = str(obj.value)
state['type'] = obj.type # a string
else:
state['value'] = str(obj.value) # Exception instance
state['type'] = reflect.qual(obj.type) # Exception class
# TODO: I suspect that f.value may be getting a copy of the
# traceback, because I've seen it be 1819 bytes at one point. I had
# assumed that it was just the exception name plus args: whatever
# Exception.__repr__ returns.
state['value'] = truncate(state['value'], 1000)
state['type'] = truncate(state['type'], 200)
if broker.unsafeTracebacks:
if isinstance(obj.type, str):
stack = "getTraceback() not available for string exceptions\n"
else:
stack = obj.getTraceback()
state['traceback'] = stack
# TODO: provide something with globals and locals and HTML and
# all that cool stuff
else:
state['traceback'] = 'Traceback unavailable\n'
# The last few lines are often the most interesting. If we need to
# truncate this, grab the first few lines and then as much of the
# tail as we can get.
if len(state['traceback']) > 1900:
state['traceback'] = (state['traceback'][:700] +
"\n\n-- TRACEBACK ELIDED --\n\n"
+ state['traceback'][-1200:])
parents = obj.parents[:]
if parents:
for i,value in enumerate(parents):
parents[i] = truncate(value, 200)
state['parents'] = parents
return state
class CopiedFailure(failure.Failure, copyable.RemoteCopyOldStyle):
# this is a RemoteCopyOldStyle because you can't raise new-style
# instances as exceptions.
"""I am a shadow of some remote Failure instance. I contain less
information than the original did.
You can still extract a (brief) printable traceback from me. My .parents
attribute is a list of strings describing the class of the exception
that I contain, just like the real Failure had, so my trap() and check()
methods work fine. My .type and .value attributes are string
representations of the original exception class and exception instance,
respectively. The most significant effect is that you cannot access
f.value.args, and should instead just use f.value .
My .frames and .stack attributes are empty, although this may change in
the future (and with the cooperation of the sender).
"""
nonCyclic = True
stateSchema = FailureConstraint()
def __init__(self):
copyable.RemoteCopyOldStyle.__init__(self)
def __getstate__(self):
s = failure.Failure.__getstate__(self)
# the ExceptionLikeString we use in self.type is not pickleable, so
# replace it with the same sort of string that we use in the wire
# protocol.
if not isinstance(self.type, str):
s['type'] = reflect.qual(self.type)
return s
def __setstate__(self, state):
self.setCopyableState(state)
def setCopyableState(self, state):
#self.__dict__.update(state)
self.__dict__ = state
# state includes: type, value, traceback, parents
#self.type = state['type']
#self.value = state['value']
#self.traceback = state['traceback']
#self.parents = state['parents']
self.tb = None
self.frames = []
self.stack = []
# MAYBE: for native exception types, be willing to wire up a
# reference to the real exception class. For other exception types,
# our .type attribute will be a string, which (from a Failure's point
# of view) looks as if someone raised an old-style string exception.
# This is here so that trial will properly render a CopiedFailure
# that comes out of a test case (since it unconditionally does
# reflect.qual(f.type)
# ACTUALLY: replace self.type with a class that looks a lot like the
# original exception class (meaning that reflect.qual() will return
# the same string for this as for the original). If someone calls our
# .trap method, resulting in a new Failure with contents copied from
# this one, then the new Failure.printTraceback will attempt to use
# reflect.qual() on our self.type, so it needs to be a class instead
# of a string.
assert isinstance(self.type, str)
typepieces = self.type.split(".")
class ExceptionLikeString:
pass
self.type = ExceptionLikeString
self.type.__module__ = ".".join(typepieces[:-1])
self.type.__name__ = typepieces[-1]
def __str__(self):
return "[CopiedFailure instance: %s]" % self.getBriefTraceback()
pickled = 1
def printTraceback(self, file=None, elideFrameworkCode=0,
detail='default'):
if file is None: file = twlog.logerr
file.write("Traceback from remote host -- ")
file.write(self.traceback)
copyable.registerRemoteCopy(FailureSlicer.classname, CopiedFailure)
class CopiedFailureSlicer(FailureSlicer):
# A calls B. B calls C. C fails and sends a Failure to B. B gets a
# CopiedFailure and sends it to A. A should get a CopiedFailure too. This
# class lives on B and slices the CopiedFailure as it is sent to A.
slices = CopiedFailure
def getStateToCopy(self, obj, broker):
state = {}
for k in ('value', 'type', 'parents'):
state[k] = getattr(obj, k)
if broker.unsafeTracebacks:
state['traceback'] = obj.traceback
else:
state['traceback'] = "Traceback unavailable\n"
if not isinstance(state['type'], str):
state['type'] = reflect.qual(state['type']) # Exception class
return state
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import time
import os
import IECore
import Gaffer
import GafferImage
import GafferScene
import GafferRenderMan
import GafferRenderManTest
@unittest.skipIf( "TRAVIS" in os.environ, "No license available on Travis" )
class InteractiveRenderManRenderTest( GafferRenderManTest.RenderManTestCase ) :
def __colorAtUV( self, image, uv ) :
e = IECore.ImagePrimitiveEvaluator( image )
r = e.createResult()
e.pointAtUV( uv, r )
return IECore.Color3f(
r.floatPrimVar( image["R"] ),
r.floatPrimVar( image["G"] ),
r.floatPrimVar( image["B"] ),
)
def testLights( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["l"]["out"] )
s["g"]["in"][1].setInput( s["p"]["out"] )
s["g"]["in"][2].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# adjust a parameter, give it time to update, and check the output
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0.25, 0.5, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# pause it, adjust a parameter, wait, and check that nothing changed
s["r"]["state"].setValue( s["r"].State.Paused )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# unpause it, wait, and check that the update happened
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# turn off light updates, adjust a parameter, wait, and check nothing happened
s["r"]["updateLights"].setValue( False )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0.25, 0.5, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0.5, 0.25 ) )
# turn light updates back on and check that it updates
s["r"]["updateLights"].setValue( True )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
# stop the render, tweak a parameter and check that nothing happened
s["r"]["state"].setValue( s["r"].State.Stopped )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[2], IECore.Color3f( 0.25, 0.5, 1 ) )
def testAttributes( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["p"]["transform"]["translate"].setValue( IECore.V3f( -0.1, -0.1, 0 ) )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/flat.sl" ) )
s["s"]["parameters"]["c"].setValue( IECore.Color3f( 1, 0.5, 0.25 ) )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1, 0.5, 0.25 ) )
# adjust a shader parameter, wait, and check that it changed
s["s"]["parameters"]["c"].setValue( IECore.Color3f( 1, 1, 1 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1 ) )
# turn off shader updates, do the same, and check that it hasn't changed
s["r"]["updateAttributes"].setValue( False )
s["s"]["parameters"]["c"].setValue( IECore.Color3f( 0.5 ) )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1 ) )
# turn shader updates back on, and check that it updates
s["r"]["updateAttributes"].setValue( True )
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 0.5 ) )
def testScopesDontLeak( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["p"]["transform"]["translate"].setValue( IECore.V3f( -0.6, -0.1, 0 ) )
s["p1"] = GafferScene.Plane()
s["p1"]["transform"]["translate"].setValue( IECore.V3f( 0.6, 0.1, 0 ) )
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 2 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["p1"]["out"] )
s["g"]["in"][2].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( self.compileShader( os.path.dirname( __file__ ) + "/shaders/flat.sl" ) )
s["s"]["parameters"]["c"].setValue( IECore.Color3f( 1, 0, 0 ) )
s["f"] = GafferScene.PathFilter()
s["f"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["a"]["filter"].setInput( s["f"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlanes",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["options"]["renderResolution"]["value"].setValue( IECore.V2i( 512 ) )
s["o"]["options"]["renderResolution"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output.
# we should have a red plane on the left, and a facing ratio
# shaded plane on the right, because we attached no shader to the
# second plane.
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.25, 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 1, 0, 0 ) )
c1 = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.75, 0.5 ),
)
self.assertTrue( c1[0] > 0.9 )
self.assertEqual( c1[0], c1[1] )
self.assertEqual( c1[0], c1[2] )
# adjust a shader parameter, wait, and check that the plane
# on the left changed. check that the plane on the right didn't
# change at all.
s["s"]["parameters"]["c"].setValue( IECore.Color3f( 0, 1, 0 ) )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.25, 0.5 ),
)
self.assertEqual( c, IECore.Color3f( 0, 1, 0 ) )
c1 = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlanes" ),
IECore.V2f( 0.75, 0.5 ),
)
self.assertTrue( c1[0] > 0.9 )
self.assertEqual( c1[0], c1[1] )
self.assertEqual( c1[0], c1[2] )
def testContext( self ):
s = Gaffer.ScriptNode()
r = GafferRenderMan.InteractiveRenderManRender()
self.assertNotEqual( r.getContext(), None )
self.failIf( r.getContext().isSame( s.context() ) )
s["r"] = r
self.failUnless( r.getContext().isSame( s.context() ) )
s.removeChild( r )
self.failIf( r.getContext().isSame( s.context() ) )
def testAddLight( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 1, 0, 0 ) )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["l"]["out"] )
s["g"]["in"][1].setInput( s["p"]["out"] )
s["g"]["in"][2].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 0, 0 ) )
# add a light
s["l2"] = GafferRenderMan.RenderManLight()
s["l2"].loadShader( "pointlight" )
s["l2"]["parameters"]["lightcolor"].setValue( IECore.Color3f( 0, 1, 0 ) )
s["l2"]["transform"]["translate"]["z"].setValue( 1 )
s["g"]["in"][3].setInput( s["l2"]["out"] )
# give it time to update, and check the output
time.sleep( 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c / c[0], IECore.Color3f( 1, 1, 0 ) )
def testRemoveLight( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["l"]["out"] )
s["g"]["in"][1].setInput( s["p"]["out"] )
s["g"]["in"][2].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
# remove the light by disabling it
s["l"]["enabled"].setValue( False )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c[0], 0.0 )
# enable it again
s["l"]["enabled"].setValue( True )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
def testHideLight( self ) :
s = Gaffer.ScriptNode()
s["l"] = GafferRenderMan.RenderManLight()
s["l"].loadShader( "pointlight" )
s["l"]["transform"]["translate"]["z"].setValue( 1 )
s["v"] = GafferScene.StandardAttributes()
s["v"]["attributes"]["visibility"]["enabled"].setValue( True )
s["v"]["in"].setInput( s["l"]["out"] )
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["v"]["out"] )
s["g"]["in"][1].setInput( s["p"]["out"] )
s["g"]["in"][2].setInput( s["c"]["out"] )
s["s"] = GafferRenderMan.RenderManShader()
s["s"].loadShader( "matte" )
s["a"] = GafferScene.ShaderAssignment()
s["a"]["in"].setInput( s["g"]["out"] )
s["a"]["shader"].setInput( s["s"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["a"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
# remove the light by hiding it
s["v"]["attributes"]["visibility"]["value"].setValue( False )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertEqual( c[0], 0.0 )
# put the light back by showing it
s["v"]["attributes"]["visibility"]["value"].setValue( True )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertNotEqual( c[0], 0.0 )
def testRenderingDuringScriptDeletion( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["c"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"driverType" : "ClientDisplayDriver",
"displayHost" : "localhost",
"displayPort" : "1559",
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
"quantize" : IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["m"] = GafferImage.Display()
# connect a python function to the Display node image and data
# received signals. this emulates what the UI does.
def __displayCallback( plug ) :
pass
c = (
s["m"].imageReceivedSignal().connect( __displayCallback ),
s["m"].dataReceivedSignal().connect( __displayCallback ),
)
s["o"] = GafferScene.StandardOptions()
s["o"]["in"].setInput( s["d"]["out"] )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 1 )
# delete the script while the render is still progressing. when
# this occurs, deletion of the render node will be triggered, which
# will in turn stop the render. this may flush data to the display,
# in which case it will emit its data and image received signals
# on a separate thread. if we're still holding the gil on the main
# thread when this happens, we'll get a deadlock.
del s
def testMoveCamera( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["c"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[1], 1, delta = 0.001 )
# move the camera so it can't see the plane, and check the output
s["c"]["transform"]["translate"]["x"].setValue( 2 )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[0], 0 )
# move the camera back and recheck
s["c"]["transform"]["translate"]["x"].setValue( 0 )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[1], 1, delta = 0.001 )
def testMoveCoordinateSystem( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coordSysDot.sl" )
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
s["shader"]["parameters"]["coordSys"].setValue( "/group/coordinateSystem" )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["camera"] = GafferScene.Camera()
s["camera"]["transform"]["translate"]["z"].setValue( 1 )
s["coordSys"] = GafferScene.CoordinateSystem()
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["shaderAssignment"]["out"] )
s["g"]["in"][1].setInput( s["camera"]["out"] )
s["g"]["in"][2].setInput( s["coordSys"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to finish, and check the output
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.5 ),
)
self.assertAlmostEqual( c[1], 1, delta = 0.001 )
# move the coordinate system, and check the output
s["coordSys"]["transform"]["translate"]["x"].setValue( 0.1 )
time.sleep( 2 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.6, 0.5 ),
)
self.assertAlmostEqual( c[0], 1 )
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
IECore.V2f( 0.6, 0.7 ),
)
self.assertAlmostEqual( c[0], 0 )
# scale the coordinate system to cover everything, and check again
s["coordSys"]["transform"]["scale"].setValue( IECore.V3f( 100 ) )
time.sleep( 2 )
for p in [
IECore.V2f( 0.5 ),
IECore.V2f( 0.15 ),
IECore.V2f( 0.85 ),
] :
c = self.__colorAtUV(
IECore.ImageDisplayDriver.storedImage( "myLovelyPlane" ),
p,
)
self.assertAlmostEqual( c[0], 1, delta = 0.001 )
def testDeleteWhilePaused( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["c"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to get going, then pause it
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
s["r"]["state"].setValue( s["r"].State.Paused )
# delete everything, and check that we don't hang
del s
def testChangeInputWhilePaused( self ) :
s = Gaffer.ScriptNode()
s["p"] = GafferScene.Plane()
s["c"] = GafferScene.Camera()
s["c"]["transform"]["translate"]["z"].setValue( 1 )
s["g"] = GafferScene.Group()
s["g"]["in"][0].setInput( s["p"]["out"] )
s["g"]["in"][1].setInput( s["c"]["out"] )
s["d"] = GafferScene.Outputs()
s["d"].addOutput(
"beauty",
IECore.Display(
"test",
"ieDisplay",
"rgba",
{
"quantize" : IECore.FloatVectorData( [ 0, 0, 0, 0 ] ),
"driverType" : "ImageDisplayDriver",
"handle" : "myLovelyPlane",
}
)
)
s["d"]["in"].setInput( s["g"]["out"] )
s["o"] = GafferScene.StandardOptions()
s["o"]["options"]["renderCamera"]["value"].setValue( "/group/camera" )
s["o"]["options"]["renderCamera"]["enabled"].setValue( True )
s["o"]["in"].setInput( s["d"]["out"] )
s["r"] = GafferRenderMan.InteractiveRenderManRender()
s["r"]["in"].setInput( s["o"]["out"] )
# start a render, give it time to get going, then pause it
s["r"]["state"].setValue( s["r"].State.Running )
time.sleep( 2 )
s["r"]["state"].setValue( s["r"].State.Paused )
# change the input to the render node, and check that we don't hang
s["o2"] = GafferScene.StandardOptions()
s["o2"]["in"].setInput( s["o"]["out"] )
s["r"]["in"].setInput( s["o2"]["out"] )
# start the render again, so we know we're not just testing
# the same thing as testDeleteWhilePaused().
s["r"]["state"].setValue( s["r"].State.Running )
def testChildNamesUpdateCrash( self ) :
# build a scene with a reasonably large hierarchy:
plane = GafferScene.Plane()
plane["dimensions"].setValue( IECore.V2f( 1000,1000 ) )
seeds = GafferScene.Seeds()
seeds["in"].setInput( plane["out"] )
seeds["parent"].setValue("/plane")
seeds["density"].setValue(0.01)
sphere = GafferScene.Sphere()
instancer = GafferScene.Instancer()
instancer["parent"].setValue("/plane/seeds")
instancer["in"].setInput( seeds["out"] )
instancer["instance"].setInput( sphere["out"] )
r = GafferRenderMan.InteractiveRenderManRender()
r["in"].setInput( instancer["out"] )
r["state"].setValue( r.State.Running )
# change the child names a couple of times. There was a problem
# where a childnames check was happening asynchronously, leading
# to a crash, so we're gonna check this has been fixed:
seeds["density"].setValue(0)
seeds["density"].setValue(0.01)
r["state"].setValue( r.State.Stopped )
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Split Op."""
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
_TEST_DTYPES = (dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128)
class SplitOpTest(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
@test_util.run_deprecated_v1
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.cached_session() as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# scalar Tensors are not permitted as num_splits
for axis in [0, -2]:
with self.cached_session() as sess:
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=constant_op.constant(2),
axis=axis))
# pylint: enable=expression-not-assigned
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.cached_session() as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
@test_util.run_deprecated_v1
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.session() as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertIn("Cannot infer argument `num` from shape",
str(context.exception))
@test_util.run_in_graph_and_eager_modes
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes
def testListOfScalarTensors(self):
a = math_ops.cast(5, dtypes.int32)
b = math_ops.cast(6, dtypes.int32)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
@test_util.run_in_graph_and_eager_modes
def testDegenerateVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [-1, 4], 0))
self.assertAllEqual(result[0], inp[0:0, :])
self.assertAllEqual(result[1], inp[0:4, :])
result = self.evaluate(array_ops.split(inp, [4, -1], 0))
self.assertAllEqual(result[0], inp[0:4, :])
self.assertAllEqual(result[1], inp[4:4, :])
result = self.evaluate(array_ops.split(inp, [-1, 4], 1))
self.assertAllEqual(result[0], inp[:, 0:0])
self.assertAllEqual(result[1], inp[:, 0:4])
result = self.evaluate(array_ops.split(inp, [4, -1], 1))
self.assertAllEqual(result[0], inp[:, 0:4])
self.assertAllEqual(result[1], inp[:, 4:4])
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
@test_util.run_deprecated_v1
def testOutputShape(self):
for axis in [1, -1]:
with self.cached_session():
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.cached_session():
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = self.evaluate(grad)
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
@test_util.run_deprecated_v1
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
@test_util.run_deprecated_v1
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegex(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
@test_util.run_deprecated_v1
def testVariableShapeFunction(self):
# size_splits too big
with self.assertRaises(ValueError):
array_ops.split([0, 1], [3, -1], axis=0)
# Correct inference of variable dimension
s0, s1 = array_ops.split([0, 1, 2], [2, -1], axis=0)
assert s0.shape.as_list() == [2]
assert s1.shape.as_list() == [1]
@test_util.run_deprecated_v1
def testNonexistentDimTensor(self):
x = array_ops.placeholder(dtypes.int32)
values = np.zeros([5, 30])
splits = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegex(ValueError, "Cannot infer"):
y = array_ops.split(values, splits, axis=x)
splits = array_ops.placeholder(dtypes.int32, [3])
y = array_ops.split(values, splits, axis=x)
with self.session() as sess:
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
"must have exactly one element"):
sess.run(y, {x: np.array([], dtype=np.int32), splits: [4, 11, 15]})
@test_util.run_in_graph_and_eager_modes
def testNegativeSizes(self):
x = constant_op.constant([1, 2, 3], dtypes.float32)
# A size of -1 signifies to determine size based on sum of other splits.
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Split size at index 1 must be >= .*. Got: -2"):
splits = [-1, -2]
self.evaluate(array_ops.split(x, splits, axis=0))
@test_util.run_in_graph_and_eager_modes
def testBadSplitSizes(self):
x = constant_op.constant([1, 2], dtypes.float32)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"Determined shape must either match input"
"|can't split axis"):
splits = [1, 2]
self.evaluate(array_ops.split(x, splits, axis=0))
if __name__ == "__main__":
test.main()
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import itertools
import operator
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_log import log
from six import moves
from neutron.common import exceptions as exc
from neutron.common import topics
from neutron.db import api as db_api
from neutron.i18n import _LI, _LW
from neutron.plugins.common import utils as plugin_utils
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import helpers
LOG = log.getLogger(__name__)
TUNNEL = 'tunnel'
def chunks(iterable, chunk_size):
"""Chunks data into chunk with size<=chunk_size."""
iterator = iter(iterable)
chunk = list(itertools.islice(iterator, 0, chunk_size))
while chunk:
yield chunk
chunk = list(itertools.islice(iterator, 0, chunk_size))
class TunnelTypeDriver(helpers.SegmentTypeDriver):
"""Define stable abstract interface for ML2 type drivers.
tunnel type networks rely on tunnel endpoints. This class defines abstract
methods to manage these endpoints.
"""
BULK_SIZE = 100
def __init__(self, model):
super(TunnelTypeDriver, self).__init__(model)
self.segmentation_key = next(iter(self.primary_keys))
@abc.abstractmethod
def add_endpoint(self, ip, host):
"""Register the endpoint in the type_driver database.
param ip: the IP address of the endpoint
param host: the Host name of the endpoint
"""
@abc.abstractmethod
def get_endpoints(self):
"""Get every endpoint managed by the type_driver
:returns a list of dict [{ip_address:endpoint_ip, host:endpoint_host},
..]
"""
@abc.abstractmethod
def get_endpoint_by_host(self, host):
"""Get endpoint for a given host managed by the type_driver
param host: the Host name of the endpoint
if host found in type_driver database
:returns db object for that particular host
else
:returns None
"""
@abc.abstractmethod
def get_endpoint_by_ip(self, ip):
"""Get endpoint for a given tunnel ip managed by the type_driver
param ip: the IP address of the endpoint
if ip found in type_driver database
:returns db object for that particular ip
else
:returns None
"""
@abc.abstractmethod
def delete_endpoint(self, ip):
"""Delete the endpoint in the type_driver database.
param ip: the IP address of the endpoint
"""
def _initialize(self, raw_tunnel_ranges):
self.tunnel_ranges = []
self._parse_tunnel_ranges(raw_tunnel_ranges, self.tunnel_ranges)
self.sync_allocations()
def _parse_tunnel_ranges(self, tunnel_ranges, current_range):
for entry in tunnel_ranges:
entry = entry.strip()
try:
tun_min, tun_max = entry.split(':')
tun_min = tun_min.strip()
tun_max = tun_max.strip()
tunnel_range = int(tun_min), int(tun_max)
except ValueError as ex:
raise exc.NetworkTunnelRangeError(tunnel_range=entry, error=ex)
plugin_utils.verify_tunnel_range(tunnel_range, self.get_type())
current_range.append(tunnel_range)
LOG.info(_LI("%(type)s ID ranges: %(range)s"),
{'type': self.get_type(), 'range': current_range})
@oslo_db_api.wrap_db_retry(
max_retries=db_api.MAX_RETRIES, retry_on_deadlock=True)
def sync_allocations(self):
# determine current configured allocatable tunnel ids
tunnel_ids = set()
for tun_min, tun_max in self.tunnel_ranges:
tunnel_ids |= set(moves.range(tun_min, tun_max + 1))
tunnel_id_getter = operator.attrgetter(self.segmentation_key)
tunnel_col = getattr(self.model, self.segmentation_key)
session = db_api.get_session()
with session.begin(subtransactions=True):
# remove from table unallocated tunnels not currently allocatable
# fetch results as list via all() because we'll be iterating
# through them twice
allocs = (session.query(self.model).
with_lockmode("update").all())
# collect those vnis that needs to be deleted from db
unallocateds = (
tunnel_id_getter(a) for a in allocs if not a.allocated)
to_remove = (x for x in unallocateds if x not in tunnel_ids)
# Immediately delete tunnels in chunks. This leaves no work for
# flush at the end of transaction
for chunk in chunks(to_remove, self.BULK_SIZE):
session.query(self.model).filter(
tunnel_col.in_(chunk)).delete(synchronize_session=False)
# collect vnis that need to be added
existings = {tunnel_id_getter(a) for a in allocs}
missings = list(tunnel_ids - existings)
for chunk in chunks(missings, self.BULK_SIZE):
bulk = [{self.segmentation_key: x, 'allocated': False}
for x in chunk]
session.execute(self.model.__table__.insert(), bulk)
def is_partial_segment(self, segment):
return segment.get(api.SEGMENTATION_ID) is None
def validate_provider_segment(self, segment):
physical_network = segment.get(api.PHYSICAL_NETWORK)
if physical_network:
msg = _("provider:physical_network specified for %s "
"network") % segment.get(api.NETWORK_TYPE)
raise exc.InvalidInput(error_message=msg)
for key, value in segment.items():
if value and key not in [api.NETWORK_TYPE,
api.SEGMENTATION_ID]:
msg = (_("%(key)s prohibited for %(tunnel)s provider network"),
{'key': key, 'tunnel': segment.get(api.NETWORK_TYPE)})
raise exc.InvalidInput(error_message=msg)
def reserve_provider_segment(self, session, segment):
if self.is_partial_segment(segment):
alloc = self.allocate_partially_specified_segment(session)
if not alloc:
raise exc.NoNetworkAvailable()
else:
segmentation_id = segment.get(api.SEGMENTATION_ID)
alloc = self.allocate_fully_specified_segment(
session, **{self.segmentation_key: segmentation_id})
if not alloc:
raise exc.TunnelIdInUse(tunnel_id=segmentation_id)
return {api.NETWORK_TYPE: self.get_type(),
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key),
api.MTU: self.get_mtu()}
def allocate_tenant_segment(self, session):
alloc = self.allocate_partially_specified_segment(session)
if not alloc:
return
return {api.NETWORK_TYPE: self.get_type(),
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: getattr(alloc, self.segmentation_key),
api.MTU: self.get_mtu()}
def release_segment(self, session, segment):
tunnel_id = segment[api.SEGMENTATION_ID]
inside = any(lo <= tunnel_id <= hi for lo, hi in self.tunnel_ranges)
info = {'type': self.get_type(), 'id': tunnel_id}
with session.begin(subtransactions=True):
query = (session.query(self.model).
filter_by(**{self.segmentation_key: tunnel_id}))
if inside:
count = query.update({"allocated": False})
if count:
LOG.debug("Releasing %(type)s tunnel %(id)s to pool",
info)
else:
count = query.delete()
if count:
LOG.debug("Releasing %(type)s tunnel %(id)s outside pool",
info)
if not count:
LOG.warning(_LW("%(type)s tunnel %(id)s not found"), info)
def get_allocation(self, session, tunnel_id):
return (session.query(self.model).
filter_by(**{self.segmentation_key: tunnel_id}).
first())
def get_mtu(self, physical_network=None):
seg_mtu = super(TunnelTypeDriver, self).get_mtu()
mtu = []
if seg_mtu > 0:
mtu.append(seg_mtu)
if cfg.CONF.ml2.path_mtu > 0:
mtu.append(cfg.CONF.ml2.path_mtu)
return min(mtu) if mtu else 0
class EndpointTunnelTypeDriver(TunnelTypeDriver):
def __init__(self, segment_model, endpoint_model):
super(EndpointTunnelTypeDriver, self).__init__(segment_model)
self.endpoint_model = endpoint_model
self.segmentation_key = next(iter(self.primary_keys))
def get_endpoint_by_host(self, host):
LOG.debug("get_endpoint_by_host() called for host %s", host)
session = db_api.get_session()
return (session.query(self.endpoint_model).
filter_by(host=host).first())
def get_endpoint_by_ip(self, ip):
LOG.debug("get_endpoint_by_ip() called for ip %s", ip)
session = db_api.get_session()
return (session.query(self.endpoint_model).
filter_by(ip_address=ip).first())
def delete_endpoint(self, ip):
LOG.debug("delete_endpoint() called for ip %s", ip)
session = db_api.get_session()
with session.begin(subtransactions=True):
(session.query(self.endpoint_model).
filter_by(ip_address=ip).delete())
def _get_endpoints(self):
LOG.debug("_get_endpoints() called")
session = db_api.get_session()
return session.query(self.endpoint_model)
def _add_endpoint(self, ip, host, **kwargs):
LOG.debug("_add_endpoint() called for ip %s", ip)
session = db_api.get_session()
try:
endpoint = self.endpoint_model(ip_address=ip, host=host, **kwargs)
endpoint.save(session)
except db_exc.DBDuplicateEntry:
endpoint = (session.query(self.endpoint_model).
filter_by(ip_address=ip).one())
LOG.warning(_LW("Endpoint with ip %s already exists"), ip)
return endpoint
class TunnelRpcCallbackMixin(object):
def setup_tunnel_callback_mixin(self, notifier, type_manager):
self._notifier = notifier
self._type_manager = type_manager
def tunnel_sync(self, rpc_context, **kwargs):
"""Update new tunnel.
Updates the database with the tunnel IP. All listening agents will also
be notified about the new tunnel IP.
"""
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
msg = _("Tunnel IP value needed by the ML2 plugin")
raise exc.InvalidInput(error_message=msg)
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
msg = _("Network type value needed by the ML2 plugin")
raise exc.InvalidInput(error_message=msg)
host = kwargs.get('host')
driver = self._type_manager.drivers.get(tunnel_type)
if driver:
# The given conditional statements will verify the following
# things:
# 1. If host is not passed from an agent, it is a legacy mode.
# 2. If passed host and tunnel_ip are not found in the DB,
# it is a new endpoint.
# 3. If host is passed from an agent and it is not found in DB
# but the passed tunnel_ip is found, delete the endpoint
# from DB and add the endpoint with (tunnel_ip, host),
# it is an upgrade case.
# 4. If passed host is found in DB and passed tunnel ip is not
# found, delete the endpoint belonging to that host and
# add endpoint with latest (tunnel_ip, host), it is a case
# where local_ip of an agent got changed.
if host:
host_endpoint = driver.obj.get_endpoint_by_host(host)
ip_endpoint = driver.obj.get_endpoint_by_ip(tunnel_ip)
if (ip_endpoint and ip_endpoint.host is None
and host_endpoint is None):
driver.obj.delete_endpoint(ip_endpoint.ip_address)
elif (ip_endpoint and ip_endpoint.host != host):
msg = (_("Tunnel IP %(ip)s in use with host %(host)s"),
{'ip': ip_endpoint.ip_address,
'host': ip_endpoint.host})
raise exc.InvalidInput(error_message=msg)
elif (host_endpoint and host_endpoint.ip_address != tunnel_ip):
# Notify all other listening agents to delete stale tunnels
self._notifier.tunnel_delete(rpc_context,
host_endpoint.ip_address, tunnel_type)
driver.obj.delete_endpoint(host_endpoint.ip_address)
tunnel = driver.obj.add_endpoint(tunnel_ip, host)
tunnels = driver.obj.get_endpoints()
entry = {'tunnels': tunnels}
# Notify all other listening agents
self._notifier.tunnel_update(rpc_context, tunnel.ip_address,
tunnel_type)
# Return the list of tunnels IP's to the agent
return entry
else:
msg = _("Network type value '%s' not supported") % tunnel_type
raise exc.InvalidInput(error_message=msg)
class TunnelAgentRpcApiMixin(object):
def _get_tunnel_update_topic(self):
return topics.get_topic_name(self.topic,
TUNNEL,
topics.UPDATE)
def tunnel_update(self, context, tunnel_ip, tunnel_type):
cctxt = self.client.prepare(topic=self._get_tunnel_update_topic(),
fanout=True)
cctxt.cast(context, 'tunnel_update', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
def _get_tunnel_delete_topic(self):
return topics.get_topic_name(self.topic,
TUNNEL,
topics.DELETE)
def tunnel_delete(self, context, tunnel_ip, tunnel_type):
cctxt = self.client.prepare(topic=self._get_tunnel_delete_topic(),
fanout=True)
cctxt.cast(context, 'tunnel_delete', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
|
|
from configparser import RawConfigParser
from attic.remote import cache_if_remote
import msgpack
import os
import sys
from binascii import hexlify
import shutil
from .key import PlaintextKey
from .helpers import Error, get_cache_dir, decode_dict, st_mtime_ns, unhexlify, UpgradableLock, int_to_bigint, \
bigint_to_int
from .hashindex import ChunkIndex
class Cache(object):
"""Client Side cache
"""
class RepositoryReplay(Error):
"""Cache is newer than repository, refusing to continue"""
class CacheInitAbortedError(Error):
"""Cache initialization aborted"""
class RepositoryAccessAborted(Error):
"""Repository access aborted"""
class EncryptionMethodMismatch(Error):
"""Repository encryption method changed since last acccess, refusing to continue
"""
def __init__(self, repository, key, manifest, path=None, sync=True, warn_if_unencrypted=True):
self.lock = None
self.timestamp = None
self.txn_active = False
self.repository = repository
self.key = key
self.manifest = manifest
self.path = path or os.path.join(get_cache_dir(), hexlify(repository.id).decode('ascii'))
# Warn user before sending data to a never seen before unencrypted repository
if not os.path.exists(self.path):
if warn_if_unencrypted and isinstance(key, PlaintextKey):
if not self._confirm('Warning: Attempting to access a previously unknown unencrypted repository',
'ATTIC_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK'):
raise self.CacheInitAbortedError()
self.create()
self.open()
# Warn user before sending data to a relocated repository
if self.previous_location and self.previous_location != repository._location.canonical_path():
msg = 'Warning: The repository at location {} was previously located at {}'.format(repository._location.canonical_path(), self.previous_location)
if not self._confirm(msg, 'ATTIC_RELOCATED_REPO_ACCESS_IS_OK'):
raise self.RepositoryAccessAborted()
if sync and self.manifest.id != self.manifest_id:
# If repository is older than the cache something fishy is going on
if self.timestamp and self.timestamp > manifest.timestamp:
raise self.RepositoryReplay()
# Make sure an encrypted repository has not been swapped for an unencrypted repository
if self.key_type is not None and self.key_type != str(key.TYPE):
raise self.EncryptionMethodMismatch()
self.sync()
self.commit()
def __del__(self):
self.close()
def _confirm(self, message, env_var_override=None):
print(message, file=sys.stderr)
if env_var_override and os.environ.get(env_var_override):
print("Yes (From {})".format(env_var_override))
return True
if not sys.stdin.isatty():
return False
try:
answer = input('Do you want to continue? [yN] ')
except EOFError:
return False
return answer and answer in 'Yy'
def create(self):
"""Create a new empty cache at `path`
"""
os.makedirs(self.path)
with open(os.path.join(self.path, 'README'), 'w') as fd:
fd.write('This is an Attic cache')
config = RawConfigParser()
config.add_section('cache')
config.set('cache', 'version', '1')
config.set('cache', 'repository', hexlify(self.repository.id).decode('ascii'))
config.set('cache', 'manifest', '')
with open(os.path.join(self.path, 'config'), 'w') as fd:
config.write(fd)
ChunkIndex().write(os.path.join(self.path, 'chunks').encode('utf-8'))
with open(os.path.join(self.path, 'files'), 'w') as fd:
pass # empty file
def _do_open(self):
self.config = RawConfigParser()
self.config.read(os.path.join(self.path, 'config'))
if self.config.getint('cache', 'version') != 1:
raise Exception('%s Does not look like an Attic cache')
self.id = self.config.get('cache', 'repository')
self.manifest_id = unhexlify(self.config.get('cache', 'manifest'))
self.timestamp = self.config.get('cache', 'timestamp', fallback=None)
self.key_type = self.config.get('cache', 'key_type', fallback=None)
self.previous_location = self.config.get('cache', 'previous_location', fallback=None)
self.chunks = ChunkIndex.read(os.path.join(self.path, 'chunks').encode('utf-8'))
self.files = None
def open(self):
if not os.path.isdir(self.path):
raise Exception('%s Does not look like an Attic cache' % self.path)
self.lock = UpgradableLock(os.path.join(self.path, 'config'), exclusive=True)
self.rollback()
def close(self):
if self.lock:
self.lock.release()
self.lock = None
def _read_files(self):
self.files = {}
self._newest_mtime = 0
with open(os.path.join(self.path, 'files'), 'rb') as fd:
u = msgpack.Unpacker(use_list=True)
while True:
data = fd.read(64 * 1024)
if not data:
break
u.feed(data)
for path_hash, item in u:
item[0] += 1
self.files[path_hash] = msgpack.packb(item)
def begin_txn(self):
# Initialize transaction snapshot
txn_dir = os.path.join(self.path, 'txn.tmp')
os.mkdir(txn_dir)
shutil.copy(os.path.join(self.path, 'config'), txn_dir)
shutil.copy(os.path.join(self.path, 'chunks'), txn_dir)
shutil.copy(os.path.join(self.path, 'files'), txn_dir)
os.rename(os.path.join(self.path, 'txn.tmp'),
os.path.join(self.path, 'txn.active'))
self.txn_active = True
def commit(self):
"""Commit transaction
"""
if not self.txn_active:
return
if self.files is not None:
with open(os.path.join(self.path, 'files'), 'wb') as fd:
for path_hash, item in self.files.items():
# Discard cached files with the newest mtime to avoid
# issues with filesystem snapshots and mtime precision
item = msgpack.unpackb(item)
if item[0] < 10 and bigint_to_int(item[3]) < self._newest_mtime:
msgpack.pack((path_hash, item), fd)
self.config.set('cache', 'manifest', hexlify(self.manifest.id).decode('ascii'))
self.config.set('cache', 'timestamp', self.manifest.timestamp)
self.config.set('cache', 'key_type', str(self.key.TYPE))
self.config.set('cache', 'previous_location', self.repository._location.canonical_path())
with open(os.path.join(self.path, 'config'), 'w') as fd:
self.config.write(fd)
self.chunks.write(os.path.join(self.path, 'chunks').encode('utf-8'))
os.rename(os.path.join(self.path, 'txn.active'),
os.path.join(self.path, 'txn.tmp'))
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
self.txn_active = False
def rollback(self):
"""Roll back partial and aborted transactions
"""
# Remove partial transaction
if os.path.exists(os.path.join(self.path, 'txn.tmp')):
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
# Roll back active transaction
txn_dir = os.path.join(self.path, 'txn.active')
if os.path.exists(txn_dir):
shutil.copy(os.path.join(txn_dir, 'config'), self.path)
shutil.copy(os.path.join(txn_dir, 'chunks'), self.path)
shutil.copy(os.path.join(txn_dir, 'files'), self.path)
os.rename(txn_dir, os.path.join(self.path, 'txn.tmp'))
if os.path.exists(os.path.join(self.path, 'txn.tmp')):
shutil.rmtree(os.path.join(self.path, 'txn.tmp'))
self.txn_active = False
self._do_open()
def sync(self):
"""Initializes cache by fetching and reading all archive indicies
"""
def add(id, size, csize):
try:
count, size, csize = self.chunks[id]
self.chunks[id] = count + 1, size, csize
except KeyError:
self.chunks[id] = 1, size, csize
self.begin_txn()
print('Initializing cache...')
self.chunks.clear()
unpacker = msgpack.Unpacker()
repository = cache_if_remote(self.repository)
for name, info in self.manifest.archives.items():
archive_id = info[b'id']
cdata = repository.get(archive_id)
data = self.key.decrypt(archive_id, cdata)
add(archive_id, len(data), len(cdata))
archive = msgpack.unpackb(data)
if archive[b'version'] != 1:
raise Exception('Unknown archive metadata version')
decode_dict(archive, (b'name',))
print('Analyzing archive:', archive[b'name'])
for key, chunk in zip(archive[b'items'], repository.get_many(archive[b'items'])):
data = self.key.decrypt(key, chunk)
add(key, len(data), len(chunk))
unpacker.feed(data)
for item in unpacker:
if b'chunks' in item:
for chunk_id, size, csize in item[b'chunks']:
add(chunk_id, size, csize)
def add_chunk(self, id, data, stats):
if not self.txn_active:
self.begin_txn()
if self.seen_chunk(id):
return self.chunk_incref(id, stats)
size = len(data)
data = self.key.encrypt(data)
csize = len(data)
self.repository.put(id, data, wait=False)
self.chunks[id] = (1, size, csize)
stats.update(size, csize, True)
return id, size, csize
def seen_chunk(self, id):
return self.chunks.get(id, (0, 0, 0))[0]
def chunk_incref(self, id, stats):
if not self.txn_active:
self.begin_txn()
count, size, csize = self.chunks[id]
self.chunks[id] = (count + 1, size, csize)
stats.update(size, csize, False)
return id, size, csize
def chunk_decref(self, id, stats):
if not self.txn_active:
self.begin_txn()
count, size, csize = self.chunks[id]
if count == 1:
del self.chunks[id]
self.repository.delete(id, wait=False)
stats.update(-size, -csize, True)
else:
self.chunks[id] = (count - 1, size, csize)
stats.update(-size, -csize, False)
def file_known_and_unchanged(self, path_hash, st):
if self.files is None:
self._read_files()
entry = self.files.get(path_hash)
if not entry:
return None
entry = msgpack.unpackb(entry)
if entry[2] == st.st_size and bigint_to_int(entry[3]) == st_mtime_ns(st) and entry[1] == st.st_ino:
# reset entry age
entry[0] = 0
self.files[path_hash] = msgpack.packb(entry)
return entry[4]
else:
return None
def memorize_file(self, path_hash, st, ids):
# Entry: Age, inode, size, mtime, chunk ids
mtime_ns = st_mtime_ns(st)
self.files[path_hash] = msgpack.packb((0, st.st_ino, st.st_size, int_to_bigint(mtime_ns), ids))
self._newest_mtime = max(self._newest_mtime, mtime_ns)
|
|
import numpy as np
from numpy import array
class Holder(object):
pass
data = Holder()
data.comment = 'generated data, divide by 1000'
data.name = 'data'
data.xo = array([[ -419, -731, -1306, -1294],
[ 6, 529, -200, -437],
[ -27, -833, -6, -564],
[ -304, -273, -502, -739],
[ 1377, -912, 927, 280],
[ -375, -517, -514, 49],
[ 247, -504, 123, -259],
[ 712, 534, -773, 286],
[ 195, -1080, 3256, -178],
[ -854, 75, -706, -1084],
[-1219, -612, -15, -203],
[ 550, -628, -483, -2686],
[ -365, 1376, -1266, 317],
[ -489, 544, -195, 431],
[ -656, 854, 840, -723],
[ 16, -1385, -880, -460],
[ 258, -2252, 96, 54],
[ 2049, -750, -1115, 381],
[ -65, 280, -777, 416],
[ 755, 82, -806, 1027],
[ -39, -170, -2134, 743],
[ -859, 780, 746, -133],
[ 762, 252, -450, -459],
[ -941, -202, 49, -202],
[ -54, 115, 455, 388],
[-1348, 1246, 1430, -480],
[ 229, -535, -1831, 1524],
[ -651, -167, 2116, 483],
[-1249, -1373, 888, -1092],
[ -75, -2162, 486, -496],
[ 2436, -1627, -1069, 162],
[ -63, 560, -601, 587],
[ -60, 1051, -277, 1323],
[ 1329, -1294, 68, 5],
[ 1532, -633, -923, 696],
[ 669, 895, -1762, -375],
[ 1129, -548, 2064, 609],
[ 1320, 573, 2119, 270],
[ -213, -412, -2517, 1685],
[ 73, -979, 1312, -1220],
[-1360, -2107, -237, 1522],
[ -645, 205, -543, -169],
[ -212, 1072, 543, -128],
[ -352, -129, -605, -904],
[ 511, 85, 167, -1914],
[ 1515, 1862, 942, 1622],
[ -465, 623, -495, -89],
[-1396, -979, 1758, 128],
[ -255, -47, 980, 501],
[-1282, -58, -49, -610],
[ -889, -1177, -492, 494],
[ 1415, 1146, 696, -722],
[ 1237, -224, -1609, -64],
[ -528, -1625, 231, 883],
[ -327, 1636, -476, -361],
[ -781, 793, 1882, 234],
[ -506, -561, 1988, -810],
[-1233, 1467, -261, 2164],
[ 53, 1069, 824, 2123],
[-1200, -441, -321, 339],
[ 1606, 298, -995, 1292],
[-1740, -672, -1628, -129],
[-1450, -354, 224, -657],
[-2556, 1006, -706, -1453],
[ -717, -463, 345, -1821],
[ 1056, -38, -420, -455],
[ -523, 565, 425, 1138],
[-1030, -187, 683, 78],
[ -214, -312, -1171, -528],
[ 819, 736, -265, 423],
[ 1339, 351, 1142, 579],
[ -387, -126, -1573, 2346],
[ 969, 2, 327, -134],
[ 163, 227, 90, 2021],
[ 1022, -1076, 174, 304],
[ 1042, 1317, 311, 880],
[ 2018, -840, 295, 2651],
[ -277, 566, 1147, -189],
[ 20, 467, 1262, 263],
[ -663, 1061, -1552, -1159],
[ 1830, 391, 2534, -199],
[ -487, 752, -1061, 351],
[-2138, -556, -367, -457],
[ -868, -411, -559, 726],
[ 1770, 819, -892, -363],
[ 553, -736, -169, -490],
[ 388, -503, 809, -821],
[ -516, -1452, -192, 483],
[ 493, 2904, 1318, 2591],
[ 175, 584, -1001, 1675],
[ 1316, -1596, -460, 1500],
[ 1212, 214, -644, -696],
[ -501, 338, 1197, -841],
[ -587, -469, -1101, 24],
[-1205, 1910, 659, 1232],
[ -150, 398, 594, 394],
[ 34, -663, 235, -334],
[-1580, 647, 239, -351],
[-2177, -345, 1215, -1494],
[ 1923, 329, -152, 1128]])
princomp1 = Holder()
princomp1.comment = 'mlab.princomp(x, nout=3)'
princomp1.factors = array([[-0.83487832815382, -1.75681522344645, -0.50882660928949, -0.59661466511045],
[-0.18695786699253, -0.10732909330422, 0.23971799542554, -0.75468286946853],
[-0.57403949255604, -0.39667006607544, -0.7927838094217 , 0.02652621881328],
[-0.60828125251513, -0.75979035898754, -0.20148864200404, -0.40278856050237],
[ 0.55997928601548, 0.88869370546643, -1.55474410845786, 0.23033958281961],
[-0.18023239851961, -0.72398923145328, -0.07056264751117, 0.29292391015376],
[-0.189029743271 , -0.05888596186903, -0.63882208368513, -0.05682951829677],
[ 0.94694345324739, -0.33448036234864, 0.16665867708366, -0.67190948646953],
[-1.355171899399 , 2.58899695901774, -1.53157119606928, 0.93743278678908],
[-1.06797676403358, -1.01894055566289, 0.29181722134698, -0.65261957826524],
[-1.08919199915725, -0.5395876105009 , 0.18846579824378, 0.61935728909742],
[-1.36598849770841, -1.00986627679465, -1.6090477073157 , -1.82708847399443],
[ 0.561511276285 , -0.74919011595195, 1.49872898209738, -0.80588545345232],
[ 0.04805787176428, -0.05522267212748, 0.82943784435024, 0.01537039050312],
[-1.12006939155398, 0.73462770352006, 0.58868274831601, -0.67786987413505],
[-0.26087838474316, -1.33362289066951, -1.02932517860259, 0.24865839951801],
[-0.24666198784909, -0.58247196399204, -1.78971960966265, 1.18908143657302],
[ 1.80675592845666, -0.73341258204636, -1.45012544705912, -0.44875329121288],
[ 0.4794281391435 , -0.57169295903913, 0.48557628591056, -0.11638075289238],
[ 1.39425263398653, -0.3665732682294 , 0.06937942447187, 0.06683559082703],
[ 1.11015707065101, -1.87631329249852, 0.48914958604867, 0.11096926802212],
[-0.85159530389901, 0.68543874135386, 0.86736021483251, -0.17641002537865],
[ 0.34109015314112, -0.25431311542374, -0.36804227540019, -0.95824474920131],
[-0.86253950274987, -0.28796613689709, 0.30820634958709, 0.27228599921917],
[ 0.01266190412089, 0.48559962017667, 0.14020630700546, 0.18517398749337],
[-1.56345869427724, 1.27917754070516, 1.25640847929385, -0.36055181722313],
[ 1.62834293379132, -1.51923809467869, 0.27754976407182, 0.79362967384835],
[-0.94400458067084, 1.77733054371289, 0.03595731772774, 0.96570688640992],
[-2.11906234438329, -0.13226430948321, -0.78992396115366, 0.66362103473975],
[-0.94372331181891, -0.37502966791165, -1.77907324401749, 0.97801542954941],
[ 1.76575198740032, -0.92309597844861, -2.3872195277998 , -0.21817018301121],
[ 0.57418226616373, -0.2925257318724 , 0.71180507312941, -0.13937750314467],
[ 1.01654397566275, 0.28855305878842, 1.25119859389106, 0.11257524396004],
[ 0.58979013567212, -0.06866577243092, -1.74447546690995, 0.13917953157575],
[ 1.62072087150051, -0.5835145063711 , -0.99029357957459, -0.06334029436682],
[ 0.893493925425 , -1.23995040005948, 0.40058503790479, -1.49029669097391],
[ 0.26990527585623, 2.03399854143898, -1.2335089890881 , 0.54010061879979],
[ 0.33504096277444, 2.42394994177782, -0.6643863358332 , -0.42471161848557],
[ 1.69952476943058, -2.1707037237448 , 0.79694026483866, 0.88177267205969],
[-1.41498253257895, 0.65248089992094, -1.40045976465378, -0.12045332880702],
[-0.22640706265253, -0.94114558124915, -0.18868114063537, 2.67652245892778],
[-0.37493712386529, -0.61985213642068, 0.5383582946365 , -0.17931524703276],
[-0.30437796317839, 0.74252786648649, 0.73255373596822, -0.64993745548429],
[-0.68788283675831, -0.84714762684627, -0.10721753874211, -0.59777382822281],
[-1.00667616522842, -0.06670525233919, -0.92973707141688, -1.60742284256649],
[ 1.95220512266515, 2.05751265066695, 0.79640648143073, -0.59608004229343],
[-0.15504464969388, -0.3882079443045 , 0.75049869361395, -0.44163703260023],
[-1.6686863460652 , 0.96325894557423, -0.16453379247258, 1.4560996746313 ],
[-0.25573631707529, 0.88265554068571, 0.08984550855664, 0.53561910563178],
[-1.29430028690793, -0.48042359291447, 0.49318558750269, 0.03689178852848],
[-0.34391235307349, -0.95154811896716, -0.09714022474353, 1.19792361047367],
[ 0.34367523316975, 1.16641214447854, -0.39528838072965, -1.72565643987406],
[ 1.23887392116229, -1.27474554996132, -0.65859544264097, -0.81757560038832],
[-0.17739006831099, -0.29057501559843, -0.62533324788504, 1.7092669546224 ],
[-0.08610919021307, -0.06524996994257, 1.3018284944661 , -1.28219607271255],
[-0.95717735853496, 1.79841555744597, 0.75799149339397, 0.23542916575208],
[-1.70175078442029, 1.33831900642462, -0.73979048943944, 0.26157699746442],
[ 0.84631686421106, 0.32029666775009, 2.51638540556813, 0.90367536744335],
[ 1.22693220256582, 1.45665385966518, 1.27480662666555, 0.78786331120259],
[-0.59251239046609, -0.660398245535 , 0.53258334042042, 0.81248748854679],
[ 2.22723057510913, -0.22856960444805, -0.15586801032885, -0.26957090658609],
[-0.83192612439183, -2.11983096548132, 0.75319973501664, 0.62196293266702],
[-1.577627210601 , -0.3747136286972 , 0.31736538266249, 0.30187577548949],
[-2.28230005998543, -1.17283119424281, 1.83780755209602, -0.75928026219594],
[-1.90574204329052, -0.34197417196464, -0.59978910354131, -0.68240235236779],
[ 0.48132729275936, -0.2524965456322 , -0.75271273075 , -0.89651237903089],
[ 0.26961427953002, 0.62968227134995, 0.99324664633985, 0.59917742452108],
[-0.95910506784013, 0.31907970712369, 0.35568397653203, 0.60155535679072],
[-0.18528259973205, -1.31831013869974, -0.09749195643548, -0.39885348684496],
[ 0.9608404103702 , 0.23727553971573, 0.20695289013955, -0.65281918968052],
[ 0.85302395609555, 1.5303724004181 , -0.56440186223081, -0.27348033453255],
[ 1.72786301913767, -1.14859994931789, 1.16222121440674, 1.39284961909257],
[ 0.37711527308989, 0.47231886947072, -0.69423676772182, -0.53515102147655],
[ 1.35642227654922, 0.53204130038923, 0.69844068787197, 1.04544871561741],
[ 0.57797880484094, 0.08044525072063, -1.32634695941334, 0.35179408060132],
[ 1.29437232500619, 1.07461562326311, 0.54545226737269, -0.6836610122092 ],
[ 2.74736726573105, 0.90881277479338, -0.98342785084735, 1.38171127911719],
[-0.67749479829901, 1.10093727650063, 0.28416704607992, -0.24984509303044],
[-0.24513961858774, 1.32098977907584, 0.16904762754153, 0.00886790270539],
[-0.5392290825383 , -1.43851802284774, 1.0064737206577 , -1.52649870396689],
[ 0.19486366400459, 2.77236000318994, -1.32201258472682, -0.75922390642504],
[ 0.33271229220962, -0.78464273816827, 1.09930224781861, -0.32184679755027],
[-1.72814706427698, -1.09275114767838, 0.7451569579997 , 0.72871211772761],
[-0.035506207751 , -0.72161367235521, 0.52828318684787, 0.87177739169758],
[ 1.31224955134141, -0.22742530984642, -0.44682270809773, -1.72769462581607],
[-0.07125058353119, -0.36850925227739, -1.01188688859296, -0.24962251325969],
[-0.69840680770104, 0.4925285516285 , -1.0255829922787 , -0.36214090052941],
[-0.2530614593082 , -0.68595709316063, -0.56882710610856, 1.25787365685572],
[ 1.93782484285419, 2.67095706598253, 2.4023579082791 , -0.09112046819432],
[ 1.57782156817208, -0.39819017512275, 1.01938038947667, 0.39718992194809],
[ 1.6839282738726 , -0.37808442385434, -1.36566197748227, 1.22029200163339],
[ 0.54652714502605, -0.38206797548206, -0.70554510441189, -1.31224358889695],
[-1.30026063006148, 0.90642495630747, 0.02711437433058, -0.44482098905042],
[-0.1239033493518 , -1.29112252171673, 0.18092802221218, 0.22673242779457],
[ 0.01152882540055, 1.13242883415094, 2.34980443084773, 0.17712319903618],
[-0.0505195424414 , 0.6807219067402 , 0.37771832345982, 0.0842510459176 ],
[-0.44230076745505, -0.07002728477811, -0.6716520563439 , 0.09637247949641],
[-1.31245480585229, -0.01674966464909, 1.21063252882651, -0.03927111631335],
[-2.94268586886381, 0.20925236551048, 0.30321714445262, 0.22027672852006],
[ 2.04121905977187, 0.58496246543101, -0.5192457175416 , -0.37212298770116]])
princomp1.values = array([[ 1.29489288337888],
[ 1.12722515391348],
[ 0.94682423958163],
[ 0.65890241090379]])
princomp1.name = 'princomp1'
princomp1.coef = array([[ 0.65989917631713, 0.22621848650964, -0.5882833472413 , -0.40899997165748],
[ 0.15824945056105, 0.3189419948895 , 0.71689623797385, -0.5994104597619 ],
[-0.3488766362785 , 0.90294049788532, -0.17151017930575, 0.1832151967827 ],
[ 0.64635538301471, 0.17832458477678, 0.33251578268108, 0.66321815082225]])
princomp2 = Holder()
princomp2.comment = 'mlab.princomp(x[:20,], nout=3)'
princomp2.factors = array([[ 0.74592631465403, -0.92093638563647, 1.10020213969681, -0.20234362115983],
[ 0.40379773814409, -0.23694214086306, -0.53526599590626, 0.48048423978257],
[-0.43826559396565, -0.26267383420164, 0.35939862515391, -0.15176605914773],
[ 0.29427656853499, -0.56363285386285, 0.19525662206552, -0.0384830001072 ],
[-1.4327917748351 , 1.18414191887856, 0.05435949672922, 0.46861687286613],
[ 0.23033214569426, -0.00452237842477, 0.00346120473054, -0.61483888402985],
[-0.40976419499281, 0.10137131352284, 0.02570805136468, 0.06798926306103],
[ 0.83201287149759, 0.82736894861103, -0.35298970920805, 0.49344802383821],
[-3.36634598435507, -0.18324521714611, -1.12118215528184, 0.2057949493723 ],
[ 0.70198992281665, -1.1856449495675 , 0.02465727900177, -0.08333428418838],
[-0.13789069679894, -0.79430992968357, -0.33106496391047, -1.01808298459082],
[-0.10779840884825, -1.41970796854378, 1.55590290358904, 1.34014813517248],
[ 1.8229340670437 , 0.13065838030104, -1.06152350166072, 0.11456488463131],
[ 0.51650051521229, 0.07999783864926, -1.08601194413786, -0.28255247881905],
[-0.24654203558433, -1.02895891025197, -1.34475655787845, 0.52240852619949],
[ 0.03542169335227, -0.01198903021187, 1.12649412049726, -0.60518306798831],
[-1.23945075955452, 0.48778599927278, 1.11522465483282, -0.994827967694 ],
[ 0.30661562766349, 1.91993049714024, 1.08834307939522, 0.61608892787963],
[ 0.8241280516035 , 0.43533554216801, -0.48261931874702, -0.22391158066897],
[ 0.6649139327178 , 1.44597315984982, -0.33359403032613, -0.094219894409 ]])
princomp2.values = array([[ 1.16965204468073],
[ 0.77687367815155],
[ 0.72297937656591],
[ 0.32548581375971]])
princomp2.name = 'princomp2'
princomp2.coef = array([[-0.13957162231397, 0.6561182967648 , 0.32256106777669, 0.66781951188167],
[ 0.49534264552989, -0.08241251099014, -0.6919444767593 , 0.51870674049413],
[-0.85614372781797, -0.11427402995055, -0.47665923729502, 0.16357058078438],
[ 0.04661912785591, 0.74138950947638, -0.43584764555793, -0.50813884128056]])
princomp3 = Holder()
princomp3.comment = 'mlab.princomp(x[:20,]-x[:20,].mean(0), nout=3)'
princomp3.factors = array([[ 0.74592631465403, -0.92093638563647, 1.10020213969681, -0.20234362115983],
[ 0.40379773814409, -0.23694214086306, -0.53526599590626, 0.48048423978257],
[-0.43826559396565, -0.26267383420164, 0.35939862515391, -0.15176605914773],
[ 0.29427656853499, -0.56363285386285, 0.19525662206552, -0.0384830001072 ],
[-1.4327917748351 , 1.18414191887856, 0.05435949672922, 0.46861687286613],
[ 0.23033214569426, -0.00452237842477, 0.00346120473054, -0.61483888402985],
[-0.40976419499281, 0.10137131352284, 0.02570805136468, 0.06798926306103],
[ 0.83201287149759, 0.82736894861103, -0.35298970920805, 0.49344802383821],
[-3.36634598435507, -0.18324521714611, -1.12118215528184, 0.2057949493723 ],
[ 0.70198992281665, -1.1856449495675 , 0.02465727900177, -0.08333428418838],
[-0.13789069679894, -0.79430992968357, -0.33106496391047, -1.01808298459082],
[-0.10779840884825, -1.41970796854378, 1.55590290358904, 1.34014813517248],
[ 1.8229340670437 , 0.13065838030104, -1.06152350166072, 0.11456488463131],
[ 0.51650051521229, 0.07999783864926, -1.08601194413786, -0.28255247881905],
[-0.24654203558433, -1.02895891025197, -1.34475655787845, 0.52240852619949],
[ 0.03542169335227, -0.01198903021187, 1.12649412049726, -0.60518306798831],
[-1.23945075955452, 0.48778599927278, 1.11522465483282, -0.994827967694 ],
[ 0.30661562766349, 1.91993049714024, 1.08834307939522, 0.61608892787963],
[ 0.8241280516035 , 0.43533554216801, -0.48261931874702, -0.22391158066897],
[ 0.6649139327178 , 1.44597315984982, -0.33359403032613, -0.094219894409 ]])
princomp3.values = array([[ 1.16965204468073],
[ 0.77687367815155],
[ 0.72297937656591],
[ 0.32548581375971]])
princomp3.name = 'princomp3'
princomp3.coef = array([[-0.13957162231397, 0.6561182967648 , 0.32256106777669, 0.66781951188167],
[ 0.49534264552989, -0.08241251099014, -0.6919444767593 , 0.51870674049413],
[-0.85614372781797, -0.11427402995055, -0.47665923729502, 0.16357058078438],
[ 0.04661912785591, 0.74138950947638, -0.43584764555793, -0.50813884128056]])
armarep = Holder()
armarep.comment = 'mlab.garchma(-res_armarep.ar[1:], res_armarep.ma[1:], 20)' +\
'mlab.garchar(-res_armarep.ar[1:], res_armarep.ma[1:], 20)'
armarep.marep = array([[-0.1 ],
[-0.77 ],
[-0.305 ],
[ 0.4635 ],
[ 0.47575 ],
[-0.132925 ],
[-0.4470625 ],
[-0.11719125 ],
[ 0.299054375 ],
[ 0.2432801875 ],
[-0.11760340625 ],
[-0.253425853125 ],
[-0.0326302015625 ],
[ 0.18642558171875],
[ 0.11931695210938],
[-0.08948198932031],
[-0.14019455634766],
[ 0.00148831328242],
[ 0.11289980171934],
[ 0.05525925023373]])
armarep.ar = array([ 1. , -0.5, 0.8])
armarep.ma = array([ 1. , -0.6 , 0.08])
armarep.name = 'armarep'
armarep.arrep = array([[ -1.00000000000000e-01],
[ -7.80000000000000e-01],
[ -4.60000000000000e-01],
[ -2.13600000000000e-01],
[ -9.13600000000000e-02],
[ -3.77280000000000e-02],
[ -1.53280000000000e-02],
[ -6.17856000000000e-03],
[ -2.48089600000000e-03],
[ -9.94252799999999e-04],
[ -3.98080000000000e-04],
[ -1.59307776000000e-04],
[ -6.37382655999999e-05],
[ -2.54983372800000e-05],
[ -1.01999411200000e-05],
[ -4.08009768959999e-06],
[ -1.63206332416000e-06],
[ -6.52830179327999e-07],
[ -2.61133041663999e-07],
[ -1.04453410652160e-07]])
|
|
import json
from httpretty import HTTPretty
from ...exceptions import AuthFailed
from .oauth import OAuth2Test
class GithubEnterpriseOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.github_enterprise.GithubEnterpriseOAuth2'
user_data_url = 'https://www.example.com/api/v3/user'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://www.example.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://www.example.com/api/v3/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://www.example.com/blog',
'location': 'San Francisco',
'email': 'foo@bar.com',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://www.example.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL': 'https://www.example.com/api/v3'})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL': 'https://www.example.com/api/v3'})
self.do_partial_pipeline()
class GithubEnterpriseOAuth2NoEmailTest(GithubEnterpriseOAuth2Test):
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://www.example.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://www.example.com/api/v3/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://www.example.com/blog',
'location': 'San Francisco',
'email': '',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://www.example.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL': 'https://www.example.com/api/v3'})
url = 'https://www.example.com/api/v3/user/emails'
HTTPretty.register_uri(HTTPretty.GET, url, status=200,
body=json.dumps(['foo@bar.com']),
content_type='application/json')
self.do_login()
def test_login_next_format(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL': 'https://www.example.com/api/v3'})
url = 'https://www.example.com/api/v3/user/emails'
HTTPretty.register_uri(HTTPretty.GET, url, status=200,
body=json.dumps([{'email': 'foo@bar.com'}]),
content_type='application/json')
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_API_URL': 'https://www.example.com/api/v3'})
self.do_partial_pipeline()
class GithubEnterpriseOrganizationOAuth2Test(GithubEnterpriseOAuth2Test):
backend_path = 'social_core.backends.github_enterprise.GithubEnterpriseOrganizationOAuth2'
def auth_handlers(self, start_url):
url = 'https://www.example.com/api/v3/orgs/foobar/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=204, body='')
return super(GithubEnterpriseOrganizationOAuth2Test, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME': 'foobar'})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME': 'foobar'})
self.do_partial_pipeline()
class GithubEnterpriseOrganizationOAuth2FailTest(GithubEnterpriseOAuth2Test):
backend_path = 'social_core.backends.github_enterprise.GithubEnterpriseOrganizationOAuth2'
def auth_handlers(self, start_url):
url = 'https://www.example.com/api/v3/orgs/foobar/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=404,
body='{"message": "Not Found"}',
content_type='application/json')
return super(GithubEnterpriseOrganizationOAuth2FailTest, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME': 'foobar'})
with self.assertRaises(AuthFailed):
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_ORG_NAME': 'foobar'})
with self.assertRaises(AuthFailed):
self.do_partial_pipeline()
class GithubEnterpriseTeamOAuth2Test(GithubEnterpriseOAuth2Test):
backend_path = 'social_core.backends.github_enterprise.GithubEnterpriseTeamOAuth2'
def auth_handlers(self, start_url):
url = 'https://www.example.com/api/v3/teams/123/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=204, body='')
return super(GithubEnterpriseTeamOAuth2Test, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID': '123'})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID': '123'})
self.do_partial_pipeline()
class GithubEnterpriseTeamOAuth2FailTest(GithubEnterpriseOAuth2Test):
backend_path = 'social_core.backends.github_enterprise.GithubEnterpriseTeamOAuth2'
def auth_handlers(self, start_url):
url = 'https://www.example.com/api/v3/teams/123/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=404,
body='{"message": "Not Found"}',
content_type='application/json')
return super(GithubEnterpriseTeamOAuth2FailTest, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID': '123'})
with self.assertRaises(AuthFailed):
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_URL': 'https://www.example.com'})
self.strategy.set_settings({
'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_API_URL': 'https://www.example.com/api/v3'})
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ENTERPRISE_TEAM_ID': '123'})
with self.assertRaises(AuthFailed):
self.do_partial_pipeline()
|
|
"""
This is an implementation of the Linear Fascicle Evaluation (LiFE) algorithm
described in:
Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell B.A. (2014). Validation
and statistical inference in living connectomes. Nature Methods 11:
1058-1063. doi:10.1038/nmeth.3098
"""
import numpy as np
import scipy.sparse as sps
import scipy.linalg as la
from dipy.reconst.base import ReconstModel, ReconstFit
from dipy.utils.six.moves import range
from dipy.tracking.utils import unique_rows
from dipy.tracking.streamline import transform_streamlines
from dipy.tracking.vox2track import _voxel2streamline
import dipy.data as dpd
import dipy.core.optimize as opt
def gradient(f):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
Returns
-------
gradient : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
Note
----
This is a simplified implementation of gradient that is part of numpy
1.8. In order to mitigate the effects of changes added to this
implementation in version 1.9 of numpy, we include this implementation
here.
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
dx = [1.0]*N
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
for axis in range(N):
# select out appropriate parts for this dimension
out = np.empty_like(f)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def streamline_gradients(streamline):
"""
Calculate the gradients of the streamline along the spatial dimension
Parameters
----------
streamline : array-like of shape (n, 3)
The 3d coordinates of a single streamline
Returns
-------
Array of shape (3, n): Spatial gradients along the length of the
streamline.
"""
return np.array(gradient(np.asarray(streamline))[0])
def grad_tensor(grad, evals):
"""
Calculate the 3 by 3 tensor for a given spatial gradient, given a canonical
tensor shape (also as a 3 by 3), pointing at [1,0,0]
Parameters
----------
grad : 1d array of shape (3,)
The spatial gradient (e.g between two nodes of a streamline).
evals: 1d array of shape (3,)
The eigenvalues of a canonical tensor to be used as a response
function.
"""
# This is the rotation matrix from [1, 0, 0] to this gradient of the sl:
R = la.svd(np.matrix(grad), overwrite_a=True)[2]
# This is the 3 by 3 tensor after rotation:
T = np.dot(np.dot(R, np.diag(evals)), R.T)
return T
def streamline_tensors(streamline, evals=[0.001, 0, 0]):
"""
The tensors generated by this fiber.
Parameters
----------
streamline : array-like of shape (n, 3)
The 3d coordinates of a single streamline
evals : iterable with three entries
The estimated eigenvalues of a single fiber tensor.
(default: [0.001, 0, 0]).
Returns
-------
An n_nodes by 3 by 3 array with the tensor for each node in the fiber.
Note
----
Estimates of the radial/axial diffusivities may rely on
empirical measurements (for example, the AD in the Corpus Callosum), or
may be based on a biophysical model of some kind.
"""
grad = streamline_gradients(streamline)
# Preallocate:
tensors = np.empty((grad.shape[0], 3, 3))
for grad_idx, this_grad in enumerate(grad):
tensors[grad_idx] = grad_tensor(this_grad, evals)
return tensors
def streamline_signal(streamline, gtab, evals=[0.001, 0, 0]):
"""
The signal from a single streamline estimate along each of its nodes.
Parameters
----------
streamline : a single streamline
gtab : GradientTable class instance
evals : list of length 3 (optional. Default: [0.001, 0, 0])
The eigenvalues of the canonical tensor used as an estimate of the
signal generated by each node of the streamline.
"""
# Gotta have those tensors:
tensors = streamline_tensors(streamline, evals)
sig = np.empty((len(streamline), np.sum(~gtab.b0s_mask)))
# Extract them once:
bvecs = gtab.bvecs[~gtab.b0s_mask]
bvals = gtab.bvals[~gtab.b0s_mask]
for ii, tensor in enumerate(tensors):
ADC = np.diag(np.dot(np.dot(bvecs, tensor), bvecs.T))
# Use the Stejskal-Tanner equation with the ADC as input, and S0 = 1:
sig[ii] = np.exp(-bvals * ADC)
return sig - np.mean(sig)
class LifeSignalMaker(object):
"""
A class for generating signals from streamlines in an efficient and speedy
manner.
"""
def __init__(self, gtab, evals=[0.001, 0, 0], sphere=None):
"""
Initialize a signal maker
Parameters
----------
gtab : GradientTable class instance
The gradient table on which the signal is calculated.
evals : list of 3 items
The eigenvalues of the canonical tensor to use in calculating the
signal.
n_points : `dipy.core.Sphere` class instance
The discrete sphere to use as an approximation for the continuous
sphere on which the signal is represented. If integer - we will use
an instance of one of the symmetric spheres cached in
`dps.get_sphere`. If a 'dipy.core.Sphere' class instance is
provided, we will use this object. Default: the :mod:`dipy.data`
symmetric sphere with 724 vertices
"""
if sphere is None:
self.sphere = dpd.get_sphere('symmetric724')
else:
self.sphere = sphere
self.gtab = gtab
self.evals = evals
# Initialize an empty dict to fill with signals for each of the sphere
# vertices:
self.signal = np.empty((self.sphere.vertices.shape[0],
np.sum(~gtab.b0s_mask)))
# We'll need to keep track of what we've already calculated:
self._calculated = []
def calc_signal(self, xyz):
idx = self.sphere.find_closest(xyz)
if idx not in self._calculated:
bvecs = self.gtab.bvecs[~self.gtab.b0s_mask]
bvals = self.gtab.bvals[~self.gtab.b0s_mask]
tensor = grad_tensor(self.sphere.vertices[idx], self.evals)
ADC = np.diag(np.dot(np.dot(bvecs, tensor), bvecs.T))
sig = np.exp(-bvals * ADC)
sig = sig - np.mean(sig)
self.signal[idx] = sig
self._calculated.append(idx)
return self.signal[idx]
def streamline_signal(self, streamline):
"""
Approximate the signal for a given streamline
"""
grad = streamline_gradients(streamline)
sig_out = np.zeros((grad.shape[0], self.signal.shape[-1]))
for ii, g in enumerate(grad):
sig_out[ii] = self.calc_signal(g)
return sig_out
def voxel2streamline(streamline, transformed=False, affine=None,
unique_idx=None):
"""
Maps voxels to streamlines and streamlines to voxels, for setting up
the LiFE equations matrix
Parameters
----------
streamline : list
A collection of streamlines, each n by 3, with n being the number of
nodes in the fiber.
affine : 4 by 4 array (optional)
Defines the spatial transformation from streamline to data.
Default: np.eye(4)
transformed : bool (optional)
Whether the streamlines have been already transformed (in which case
they don't need to be transformed in here).
unique_idx : array (optional).
The unique indices in the streamlines
Returns
-------
v2f, v2fn : tuple of dicts
The first dict in the tuple answers the question: Given a voxel (from
the unique indices in this model), which fibers pass through it?
The second answers the question: Given a streamline, for each voxel that
this streamline passes through, which nodes of that streamline are in that
voxel?
"""
if transformed:
transformed_streamline = streamline
else:
if affine is None:
affine = np.eye(4)
transformed_streamline = transform_streamlines(streamline, affine)
if unique_idx is None:
all_coords = np.concatenate(transformed_streamline)
unique_idx = unique_rows(np.round(all_coords))
return _voxel2streamline(transformed_streamline,
unique_idx.astype(np.intp))
class FiberModel(ReconstModel):
"""
A class for representing and solving predictive models based on
tractography solutions.
Notes
-----
This is an implementation of the LiFE model described in [1]_
[1] Pestilli, F., Yeatman, J, Rokem, A. Kay, K. and Wandell
B.A. (2014). Validation and statistical inference in living
connectomes. Nature Methods.
"""
def __init__(self, gtab):
"""
Parameters
----------
gtab : a GradientTable class instance
"""
# Initialize the super-class:
ReconstModel.__init__(self, gtab)
def setup(self, streamline, affine, evals=[0.001, 0, 0], sphere=None):
"""
Set up the necessary components for the LiFE model: the matrix of
fiber-contributions to the DWI signal, and the coordinates of voxels
for which the equations will be solved
Parameters
----------
streamline : list
Streamlines, each is an array of shape (n, 3)
affine : 4 by 4 array
Mapping from the streamline coordinates to the data
evals : list (3 items, optional)
The eigenvalues of the canonical tensor used as a response
function. Default:[0.001, 0, 0].
sphere: `dipy.core.Sphere` instance.
Whether to approximate (and cache) the signal on a discrete
sphere. This may confer a significant speed-up in setting up the
problem, but is not as accurate. If `False`, we use the exact
gradients along the streamlines to calculate the matrix, instead of
an approximation. Defaults to use the 724-vertex symmetric sphere
from :mod:`dipy.data`
"""
if sphere is not False:
SignalMaker = LifeSignalMaker(self.gtab,
evals=evals,
sphere=sphere)
if affine is None:
affine = np.eye(4)
streamline = transform_streamlines(streamline, affine)
# Assign some local variables, for shorthand:
all_coords = np.concatenate(streamline)
vox_coords = unique_rows(np.round(all_coords).astype(np.intp))
del all_coords
# We only consider the diffusion-weighted signals:
n_bvecs = self.gtab.bvals[~self.gtab.b0s_mask].shape[0]
v2f, v2fn = voxel2streamline(streamline, transformed=True,
affine=affine, unique_idx=vox_coords)
# How many fibers in each voxel (this will determine how many
# components are in the matrix):
n_unique_f = len(np.hstack(v2f.values()))
# Preallocate these, which will be used to generate the sparse
# matrix:
f_matrix_sig = np.zeros(n_unique_f * n_bvecs, dtype=np.float)
f_matrix_row = np.zeros(n_unique_f * n_bvecs, dtype=np.intp)
f_matrix_col = np.zeros(n_unique_f * n_bvecs, dtype=np.intp)
fiber_signal = []
for s_idx, s in enumerate(streamline):
if sphere is not False:
fiber_signal.append(SignalMaker.streamline_signal(s))
else:
fiber_signal.append(streamline_signal(s, self.gtab, evals))
del streamline
if sphere is not False:
del SignalMaker
keep_ct = 0
range_bvecs = np.arange(n_bvecs).astype(int)
# In each voxel:
for v_idx in range(vox_coords.shape[0]):
mat_row_idx = (range_bvecs + v_idx * n_bvecs).astype(np.intp)
# For each fiber in that voxel:
for f_idx in v2f[v_idx]:
# For each fiber-voxel combination, store the row/column
# indices in the pre-allocated linear arrays
f_matrix_row[keep_ct:keep_ct+n_bvecs] = mat_row_idx
f_matrix_col[keep_ct:keep_ct+n_bvecs] = f_idx
vox_fiber_sig = np.zeros(n_bvecs)
for node_idx in v2fn[f_idx][v_idx]:
# Sum the signal from each node of the fiber in that voxel:
vox_fiber_sig += fiber_signal[f_idx][node_idx]
# And add the summed thing into the corresponding rows:
f_matrix_sig[keep_ct:keep_ct+n_bvecs] += vox_fiber_sig
keep_ct = keep_ct + n_bvecs
del v2f, v2fn
# Allocate the sparse matrix, using the more memory-efficient 'csr'
# format:
life_matrix = sps.csr_matrix((f_matrix_sig,
[f_matrix_row, f_matrix_col]))
return life_matrix, vox_coords
def _signals(self, data, vox_coords):
"""
Helper function to extract and separate all the signals we need to fit
and evaluate a fit of this model
Parameters
----------
data : 4D array
vox_coords: n by 3 array
The coordinates into the data array of the fiber nodes.
"""
# Fitting is done on the S0-normalized-and-demeaned diffusion-weighted
# signal:
idx_tuple = (vox_coords[:, 0], vox_coords[:, 1], vox_coords[:, 2])
# We'll look at a 2D array, extracting the data from the voxels:
vox_data = data[idx_tuple]
weighted_signal = vox_data[:, ~self.gtab.b0s_mask]
b0_signal = np.mean(vox_data[:, self.gtab.b0s_mask], -1)
relative_signal = (weighted_signal/b0_signal[:, None])
# The mean of the relative signal across directions in each voxel:
mean_sig = np.mean(relative_signal, -1)
to_fit = (relative_signal - mean_sig[:, None]).ravel()
return (to_fit, weighted_signal, b0_signal, relative_signal, mean_sig,
vox_data)
def fit(self, data, streamline, affine=None, evals=[0.001, 0, 0],
sphere=None):
"""
Fit the LiFE FiberModel for data and a set of streamlines associated
with this data
Parameters
----------
data : 4D array
Diffusion-weighted data
streamline : list
A bunch of streamlines
affine: 4 by 4 array (optional)
The affine to go from the streamline coordinates to the data
coordinates. Defaults to use `np.eye(4)`
evals : list (optional)
The eigenvalues of the tensor response function used in constructing
the model signal. Default: [0.001, 0, 0]
sphere: `dipy.core.Sphere` instance, or False
Whether to approximate (and cache) the signal on a discrete
sphere. This may confer a significant speed-up in setting up the
problem, but is not as accurate. If `False`, we use the exact
gradients along the streamlines to calculate the matrix, instead of
an approximation.
Returns
-------
FiberFit class instance
"""
if affine is None:
affine = np.eye(4)
life_matrix, vox_coords = \
self.setup(streamline, affine, evals=evals, sphere=sphere)
(to_fit, weighted_signal, b0_signal, relative_signal, mean_sig,
vox_data) = self._signals(data, vox_coords)
beta = opt.sparse_nnls(to_fit, life_matrix)
return FiberFit(self, life_matrix, vox_coords, to_fit, beta,
weighted_signal, b0_signal, relative_signal, mean_sig,
vox_data, streamline, affine, evals)
class FiberFit(ReconstFit):
"""
A fit of the LiFE model to diffusion data
"""
def __init__(self, fiber_model, life_matrix, vox_coords, to_fit, beta,
weighted_signal, b0_signal, relative_signal, mean_sig,
vox_data, streamline, affine, evals):
"""
Parameters
----------
fiber_model : A FiberModel class instance
params : the parameters derived from a fit of the model to the data.
"""
ReconstFit.__init__(self, fiber_model, vox_data)
self.life_matrix = life_matrix
self.vox_coords = vox_coords
self.fit_data = to_fit
self.beta = beta
self.weighted_signal = weighted_signal
self.b0_signal = b0_signal
self.relative_signal = relative_signal
self.mean_signal = mean_sig
self.streamline = streamline
self.affine = affine
self.evals = evals
def predict(self, gtab=None, S0=None):
"""
Predict the signal
Parameters
----------
gtab : GradientTable
Default: use self.gtab
S0 : float or array
The non-diffusion-weighted signal in the voxels for which a
prediction is made. Default: use self.b0_signal
Returns
-------
prediction : ndarray of shape (voxels, bvecs)
An array with a prediction of the signal in each voxel/direction
"""
# We generate the prediction and in each voxel, we add the
# offset, according to the isotropic part of the signal, which was
# removed prior to fitting:
if gtab is None:
_matrix = self.life_matrix
gtab = self.model.gtab
else:
_model = FiberModel(gtab)
_matrix, _ = _model.setup(self.streamline,
self.affine,
self.evals)
pred_weighted = np.reshape(opt.spdot(_matrix, self.beta),
(self.vox_coords.shape[0],
np.sum(~gtab.b0s_mask)))
pred = np.empty((self.vox_coords.shape[0], gtab.bvals.shape[0]))
if S0 is None:
S0 = self.b0_signal
pred[..., gtab.b0s_mask] = S0[:, None]
pred[..., ~gtab.b0s_mask] =\
(pred_weighted + self.mean_signal[:, None]) * S0[:, None]
return pred
|
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class CredentialList(ListResource):
def __init__(self, version):
"""
Initialize the CredentialList
:param Version version: Version that contains the resource
:returns: twilio.rest.ip_messaging.v1.credential.CredentialList
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialList
"""
super(CredentialList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Credentials'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams CredentialInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.credential.CredentialInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists CredentialInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.ip_messaging.v1.credential.CredentialInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of CredentialInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialPage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return CredentialPage(self._version, response, self._solution)
def create(self, type, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Create a new CredentialInstance
:param CredentialInstance.PushService type: The type
:param unicode friendly_name: The friendly_name
:param unicode certificate: The certificate
:param unicode private_key: The private_key
:param bool sandbox: The sandbox
:param unicode api_key: The api_key
:param unicode secret: The secret
:returns: Newly created CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
data = values.of({
'Type': type,
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return CredentialInstance(
self._version,
payload,
)
def get(self, sid):
"""
Constructs a CredentialContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.credential.CredentialContext
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialContext
"""
return CredentialContext(
self._version,
sid=sid,
)
def __call__(self, sid):
"""
Constructs a CredentialContext
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.credential.CredentialContext
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialContext
"""
return CredentialContext(
self._version,
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.CredentialList>'
class CredentialPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the CredentialPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.ip_messaging.v1.credential.CredentialPage
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialPage
"""
super(CredentialPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of CredentialInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.ip_messaging.v1.credential.CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
return CredentialInstance(
self._version,
payload,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.IpMessaging.V1.CredentialPage>'
class CredentialContext(InstanceContext):
def __init__(self, version, sid):
"""
Initialize the CredentialContext
:param Version version: Version that contains the resource
:param sid: The sid
:returns: twilio.rest.ip_messaging.v1.credential.CredentialContext
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialContext
"""
super(CredentialContext, self).__init__(version)
# Path Solution
self._solution = {
'sid': sid,
}
self._uri = '/Credentials/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a CredentialInstance
:returns: Fetched CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return CredentialInstance(
self._version,
payload,
sid=self._solution['sid'],
)
def update(self, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Update the CredentialInstance
:param unicode friendly_name: The friendly_name
:param unicode certificate: The certificate
:param unicode private_key: The private_key
:param bool sandbox: The sandbox
:param unicode api_key: The api_key
:param unicode secret: The secret
:returns: Updated CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'Certificate': certificate,
'PrivateKey': private_key,
'Sandbox': sandbox,
'ApiKey': api_key,
'Secret': secret,
})
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return CredentialInstance(
self._version,
payload,
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the CredentialInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.CredentialContext {}>'.format(context)
class CredentialInstance(InstanceResource):
class PushService(object):
GCM = "gcm"
APN = "apn"
FCM = "fcm"
def __init__(self, version, payload, sid=None):
"""
Initialize the CredentialInstance
:returns: twilio.rest.ip_messaging.v1.credential.CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
super(CredentialInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload['sid'],
'account_sid': payload['account_sid'],
'friendly_name': payload['friendly_name'],
'type': payload['type'],
'sandbox': payload['sandbox'],
'date_created': deserialize.iso8601_datetime(payload['date_created']),
'date_updated': deserialize.iso8601_datetime(payload['date_updated']),
'url': payload['url'],
}
# Context
self._context = None
self._solution = {
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CredentialContext for this CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialContext
"""
if self._context is None:
self._context = CredentialContext(
self._version,
sid=self._solution['sid'],
)
return self._context
@property
def sid(self):
"""
:returns: The sid
:rtype: unicode
"""
return self._properties['sid']
@property
def account_sid(self):
"""
:returns: The account_sid
:rtype: unicode
"""
return self._properties['account_sid']
@property
def friendly_name(self):
"""
:returns: The friendly_name
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def type(self):
"""
:returns: The type
:rtype: CredentialInstance.PushService
"""
return self._properties['type']
@property
def sandbox(self):
"""
:returns: The sandbox
:rtype: unicode
"""
return self._properties['sandbox']
@property
def date_created(self):
"""
:returns: The date_created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date_updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a CredentialInstance
:returns: Fetched CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
return self._proxy.fetch()
def update(self, friendly_name=values.unset, certificate=values.unset,
private_key=values.unset, sandbox=values.unset, api_key=values.unset,
secret=values.unset):
"""
Update the CredentialInstance
:param unicode friendly_name: The friendly_name
:param unicode certificate: The certificate
:param unicode private_key: The private_key
:param bool sandbox: The sandbox
:param unicode api_key: The api_key
:param unicode secret: The secret
:returns: Updated CredentialInstance
:rtype: twilio.rest.ip_messaging.v1.credential.CredentialInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
certificate=certificate,
private_key=private_key,
sandbox=sandbox,
api_key=api_key,
secret=secret,
)
def delete(self):
"""
Deletes the CredentialInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.IpMessaging.V1.CredentialInstance {}>'.format(context)
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
def _make_entity_pb(dataset_id, kind, integer_id, name=None, str_val=None):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
entity_pb = datastore_pb.Entity()
entity_pb.key.partition_id.dataset_id = dataset_id
path_element = entity_pb.key.path_element.add()
path_element.kind = kind
path_element.id = integer_id
if name is not None and str_val is not None:
prop = entity_pb.property.add()
prop.name = name
prop.value.string_value = str_val
return entity_pb
class Test__require_dataset_id(unittest2.TestCase):
_MARKER = object()
def _callFUT(self, passed=_MARKER, first_key=None):
from gcloud.datastore.api import _require_dataset_id
if passed is self._MARKER:
return _require_dataset_id(first_key=first_key)
return _require_dataset_id(dataset_id=passed, first_key=first_key)
def _monkey(self, dataset_id):
from gcloud.datastore._testing import _monkey_defaults
return _monkey_defaults(dataset_id=dataset_id)
def test_implicit_unset_wo_keys(self):
with self._monkey(None):
with self.assertRaises(EnvironmentError):
self._callFUT()
def test_implicit_unset_w_keys(self):
from gcloud.datastore.test_batch import _Key
ID = 'DATASET'
with self._monkey(None):
self.assertEqual(self._callFUT(first_key=_Key(ID)), ID)
def test_implicit_unset_w_existing_batch_wo_keys(self):
ID = 'DATASET'
with self._monkey(None):
with _NoCommitBatch(dataset_id=ID, connection=object()):
self.assertEqual(self._callFUT(), ID)
def test_implicit_unset_w_existing_batch_w_keys(self):
from gcloud.datastore.test_batch import _Key
ID = 'DATASET'
OTHER = 'OTHER'
with self._monkey(None):
with _NoCommitBatch(dataset_id=ID, connection=object()):
self.assertEqual(self._callFUT(first_key=_Key(OTHER)), ID)
def test_implicit_unset_w_existing_transaction_wo_keys(self):
ID = 'DATASET'
with self._monkey(None):
with _NoCommitTransaction(dataset_id=ID, connection=object()):
self.assertEqual(self._callFUT(), ID)
def test_implicit_unset_w_existing_transaction_w_keys(self):
from gcloud.datastore.test_batch import _Key
ID = 'DATASET'
OTHER = 'OTHER'
with self._monkey(None):
with _NoCommitTransaction(dataset_id=ID, connection=object()):
self.assertEqual(self._callFUT(first_key=_Key(OTHER)), ID)
def test_implicit_unset_passed_explicitly_wo_keys(self):
ID = 'DATASET'
with self._monkey(None):
self.assertEqual(self._callFUT(ID), ID)
def test_implicit_unset_passed_explicitly_w_keys(self):
from gcloud.datastore.test_batch import _Key
ID = 'DATASET'
OTHER = 'OTHER'
with self._monkey(None):
self.assertEqual(self._callFUT(ID, first_key=_Key(OTHER)), ID)
def test_id_implicit_set_wo_keys(self):
IMPLICIT_ID = 'IMPLICIT'
with self._monkey(IMPLICIT_ID):
stored_id = self._callFUT()
self.assertTrue(stored_id is IMPLICIT_ID)
def test_id_implicit_set_w_keys(self):
from gcloud.datastore.test_batch import _Key
IMPLICIT_ID = 'IMPLICIT'
OTHER = 'OTHER'
with self._monkey(IMPLICIT_ID):
self.assertEqual(self._callFUT(first_key=_Key(OTHER)), OTHER)
def test_id_implicit_set_passed_explicitly_wo_keys(self):
ID = 'DATASET'
IMPLICIT_ID = 'IMPLICIT'
with self._monkey(IMPLICIT_ID):
self.assertEqual(self._callFUT(ID), ID)
def test_id_implicit_set_passed_explicitly_w_keys(self):
from gcloud.datastore.test_batch import _Key
ID = 'DATASET'
IMPLICIT_ID = 'IMPLICIT'
OTHER = 'OTHER'
with self._monkey(IMPLICIT_ID):
self.assertEqual(self._callFUT(ID, first_key=_Key(OTHER)), ID)
class Test__require_connection(unittest2.TestCase):
_MARKER = object()
def _callFUT(self, passed=_MARKER):
from gcloud.datastore.api import _require_connection
if passed is self._MARKER:
return _require_connection()
return _require_connection(passed)
def _monkey(self, connection):
from gcloud.datastore._testing import _monkey_defaults
return _monkey_defaults(connection=connection)
def test_implicit_unset(self):
with self._monkey(None):
with self.assertRaises(EnvironmentError):
self._callFUT()
def test_implicit_unset_w_existing_batch(self):
ID = 'DATASET'
CONNECTION = object()
with self._monkey(None):
with _NoCommitBatch(dataset_id=ID, connection=CONNECTION):
self.assertEqual(self._callFUT(), CONNECTION)
def test_implicit_unset_w_existing_transaction(self):
ID = 'DATASET'
CONNECTION = object()
with self._monkey(None):
with _NoCommitTransaction(dataset_id=ID, connection=CONNECTION):
self.assertEqual(self._callFUT(), CONNECTION)
def test_implicit_unset_passed_explicitly(self):
CONNECTION = object()
with self._monkey(None):
self.assertTrue(self._callFUT(CONNECTION) is CONNECTION)
def test_implicit_set(self):
IMPLICIT_CONNECTION = object()
with self._monkey(IMPLICIT_CONNECTION):
self.assertTrue(self._callFUT() is IMPLICIT_CONNECTION)
def test_implicit_set_passed_explicitly(self):
IMPLICIT_CONNECTION = object()
CONNECTION = object()
with self._monkey(IMPLICIT_CONNECTION):
self.assertTrue(self._callFUT(CONNECTION) is CONNECTION)
class Test_get_multi_function(unittest2.TestCase):
def setUp(self):
from gcloud.datastore._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.datastore._testing import _tear_down_defaults
_tear_down_defaults(self)
def _callFUT(self, keys, missing=None, deferred=None,
connection=None, dataset_id=None):
from gcloud.datastore.api import get_multi
return get_multi(keys, missing=missing, deferred=deferred,
connection=connection, dataset_id=dataset_id)
def test_wo_connection(self):
from gcloud.datastore.key import Key
DATASET_ID = 'DATASET'
key = Key('Kind', 1234, dataset_id=DATASET_ID)
self.assertRaises(EnvironmentError,
self._callFUT, [key], dataset_id=DATASET_ID)
def test_no_keys(self):
results = self._callFUT([])
self.assertEqual(results, [])
def test_miss(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
connection = _Connection()
key = Key('Kind', 1234, dataset_id=DATASET_ID)
results = self._callFUT([key], connection=connection,
dataset_id=DATASET_ID)
self.assertEqual(results, [])
def test_miss_wo_dataset_id(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
connection = _Connection()
key = Key('Kind', 1234, dataset_id=DATASET_ID)
results = self._callFUT([key], connection=connection)
self.assertEqual(results, [])
expected = {
'dataset_id': DATASET_ID,
'key_pbs': [key.to_protobuf()],
'transaction_id': None,
'eventual': False,
}
self.assertEqual(connection._called_with, expected)
def test_miss_w_missing(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID = 1234
# Make a missing entity pb to be returned from mock backend.
missed = datastore_pb.Entity()
missed.key.partition_id.dataset_id = DATASET_ID
path_element = missed.key.path_element.add()
path_element.kind = KIND
path_element.id = ID
# Set missing entity on mock connection.
connection = _Connection()
connection._missing = [missed]
key = Key(KIND, ID, dataset_id=DATASET_ID)
missing = []
entities = self._callFUT([key], connection=connection,
missing=missing, dataset_id=DATASET_ID)
self.assertEqual(entities, [])
self.assertEqual([missed.key.to_protobuf() for missed in missing],
[key.to_protobuf()])
def test_w_missing_non_empty(self):
from gcloud.datastore.key import Key
DATASET_ID = 'DATASET'
CONNECTION = object()
key = Key('Kind', 1234, dataset_id=DATASET_ID)
missing = ['this', 'list', 'is', 'not', 'empty']
self.assertRaises(ValueError, self._callFUT,
[key], connection=CONNECTION,
missing=missing)
def test_w_deferred_non_empty(self):
from gcloud.datastore.key import Key
DATASET_ID = 'DATASET'
CONNECTION = object()
key = Key('Kind', 1234, dataset_id=DATASET_ID)
deferred = ['this', 'list', 'is', 'not', 'empty']
self.assertRaises(ValueError, self._callFUT,
[key], connection=CONNECTION,
deferred=deferred)
def test_miss_w_deferred(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
key = Key('Kind', 1234, dataset_id=DATASET_ID)
# Set deferred entity on mock connection.
connection = _Connection()
connection._deferred = [key.to_protobuf()]
deferred = []
entities = self._callFUT([key], connection=connection,
deferred=deferred, dataset_id=DATASET_ID)
self.assertEqual(entities, [])
self.assertEqual([def_key.to_protobuf() for def_key in deferred],
[key.to_protobuf()])
def _verifyProtobufCall(self, called_with, URI, conn):
self.assertEqual(called_with['uri'], URI)
self.assertEqual(called_with['method'], 'POST')
self.assertEqual(called_with['headers']['Content-Type'],
'application/x-protobuf')
self.assertEqual(called_with['headers']['User-Agent'],
conn.USER_AGENT)
def test_w_deferred_from_backend_but_not_passed(self):
from gcloud.datastore import _datastore_v1_pb2 as datastore_pb
from gcloud.datastore.connection import Connection
from gcloud.datastore.key import Key
from gcloud.datastore import test_connection
# Shortening name, import line above was too long.
cmp_key_after_req = test_connection._compare_key_pb_after_request
DATASET_ID = 'DATASET'
key1 = Key('Kind', dataset_id=DATASET_ID)
key2 = Key('Kind', 2345, dataset_id=DATASET_ID)
key_pb1 = key1.to_protobuf()
key_pb2 = key2.to_protobuf()
# Build mock first response.
rsp_pb1 = datastore_pb.LookupResponse()
entity1 = datastore_pb.Entity()
entity1.key.CopyFrom(key_pb1)
# Add the entity to the "found" part of the response.
rsp_pb1.found.add(entity=entity1)
# Add the second key to the "deferred" part of the response.
rsp_pb1.deferred.add().CopyFrom(key_pb2)
# Build mock second response.
rsp_pb2 = datastore_pb.LookupResponse()
# Add in entity that was deferred.
entity2 = datastore_pb.Entity()
entity2.key.CopyFrom(key_pb2)
rsp_pb2.found.add(entity=entity2)
conn = Connection()
# Add mock http object to connection with response from above.
http = conn._http = _HttpMultiple(
({'status': '200'}, rsp_pb1.SerializeToString()),
({'status': '200'}, rsp_pb2.SerializeToString()),
)
missing = []
found = self._callFUT([key1, key2], missing=missing, connection=conn)
self.assertEqual(len(found), 2)
self.assertEqual(len(missing), 0)
# Check the actual contents on the response.
self.assertEqual(found[0].key.path, key1.path)
self.assertEqual(found[0].key.dataset_id, key1.dataset_id)
self.assertEqual(found[1].key.path, key2.path)
self.assertEqual(found[1].key.dataset_id, key2.dataset_id)
# Check that our http object was called correctly.
cw = http._called_with
rq_class = datastore_pb.LookupRequest
request = rq_class()
self.assertEqual(len(cw), 2)
# Make URI to check for requests.
URI = '/'.join([
conn.api_base_url,
'datastore',
conn.API_VERSION,
'datasets',
DATASET_ID,
'lookup',
])
# Make sure the first called with argument checks out.
self._verifyProtobufCall(cw[0], URI, conn)
request.ParseFromString(cw[0]['body'])
keys = list(request.key)
self.assertEqual(len(keys), 2)
cmp_key_after_req(self, key_pb1, keys[0])
cmp_key_after_req(self, key_pb2, keys[1])
# Make sure the second called with argument checks out.
self._verifyProtobufCall(cw[1], URI, conn)
request.ParseFromString(cw[1]['body'])
keys = list(request.key)
self.assertEqual(len(keys), 1)
cmp_key_after_req(self, key_pb2, keys[0])
def test_hit(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID = 1234
PATH = [{'kind': KIND, 'id': ID}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(DATASET_ID, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
connection = _Connection(entity_pb)
key = Key(KIND, ID, dataset_id=DATASET_ID)
result, = self._callFUT([key], connection=connection,
dataset_id=DATASET_ID)
new_key = result.key
# Check the returned value is as expected.
self.assertFalse(new_key is key)
self.assertEqual(new_key.dataset_id, DATASET_ID)
self.assertEqual(new_key.path, PATH)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
def test_hit_multiple_keys_same_dataset(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID1 = 1234
ID2 = 2345
# Make a found entity pb to be returned from mock backend.
entity_pb1 = _make_entity_pb(DATASET_ID, KIND, ID1)
entity_pb2 = _make_entity_pb(DATASET_ID, KIND, ID2)
# Make a connection to return the entity pbs.
connection = _Connection(entity_pb1, entity_pb2)
key1 = Key(KIND, ID1, dataset_id=DATASET_ID)
key2 = Key(KIND, ID2, dataset_id=DATASET_ID)
retrieved1, retrieved2 = self._callFUT(
[key1, key2], connection=connection, dataset_id=DATASET_ID)
# Check values match.
self.assertEqual(retrieved1.key.path, key1.path)
self.assertEqual(dict(retrieved1), {})
self.assertEqual(retrieved2.key.path, key2.path)
self.assertEqual(dict(retrieved2), {})
def test_hit_multiple_keys_different_dataset(self):
from gcloud.datastore.key import Key
DATASET_ID1 = 'DATASET'
DATASET_ID2 = 'DATASET-ALT'
# Make sure our IDs are actually different.
self.assertNotEqual(DATASET_ID1, DATASET_ID2)
key1 = Key('KIND', 1234, dataset_id=DATASET_ID1)
key2 = Key('KIND', 1234, dataset_id=DATASET_ID2)
with self.assertRaises(ValueError):
self._callFUT([key1, key2], connection=object(),
dataset_id=DATASET_ID1)
def test_implicit_wo_transaction(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID = 1234
PATH = [{'kind': KIND, 'id': ID}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(DATASET_ID, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
CUSTOM_CONNECTION = _Connection(entity_pb)
key = Key(KIND, ID, dataset_id=DATASET_ID)
with _monkey_defaults(connection=CUSTOM_CONNECTION,
dataset_id=DATASET_ID):
result, = self._callFUT([key])
expected_called_with = {
'dataset_id': DATASET_ID,
'key_pbs': [key.to_protobuf()],
'transaction_id': None,
'eventual': False,
}
self.assertEqual(CUSTOM_CONNECTION._called_with, expected_called_with)
new_key = result.key
# Check the returned value is as expected.
self.assertFalse(new_key is key)
self.assertEqual(new_key.dataset_id, DATASET_ID)
self.assertEqual(new_key.path, PATH)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
def test_w_transaction(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID = 1234
PATH = [{'kind': KIND, 'id': ID}]
TRANSACTION = 'TRANSACTION'
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(DATASET_ID, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
CUSTOM_CONNECTION = _Connection(entity_pb)
key = Key(KIND, ID, dataset_id=DATASET_ID)
with _NoCommitTransaction(DATASET_ID, CUSTOM_CONNECTION, TRANSACTION):
result, = self._callFUT([key], connection=CUSTOM_CONNECTION,
dataset_id=DATASET_ID)
expected_called_with = {
'dataset_id': DATASET_ID,
'key_pbs': [key.to_protobuf()],
'transaction_id': TRANSACTION,
'eventual': False,
}
self.assertEqual(CUSTOM_CONNECTION._called_with, expected_called_with)
new_key = result.key
# Check the returned value is as expected.
self.assertFalse(new_key is key)
self.assertEqual(new_key.dataset_id, DATASET_ID)
self.assertEqual(new_key.path, PATH)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
def test_max_loops(self):
from gcloud._testing import _Monkey
from gcloud.datastore import api
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID = 1234
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(DATASET_ID, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
connection = _Connection(entity_pb)
key = Key(KIND, ID, dataset_id=DATASET_ID)
deferred = []
missing = []
with _Monkey(api, _MAX_LOOPS=-1):
result = self._callFUT([key], missing=missing, deferred=deferred,
connection=connection,
dataset_id=DATASET_ID)
# Make sure we have no results, even though the connection has been
# set up as in `test_hit` to return a single result.
self.assertEqual(result, [])
self.assertEqual(missing, [])
self.assertEqual(deferred, [])
class Test_get_function(unittest2.TestCase):
def setUp(self):
from gcloud.datastore._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.datastore._testing import _tear_down_defaults
_tear_down_defaults(self)
def _callFUT(self, key, missing=None, deferred=None,
connection=None, dataset_id=None):
from gcloud.datastore.api import get
return get(key, missing=missing, deferred=deferred,
connection=connection, dataset_id=dataset_id)
def test_hit(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
KIND = 'Kind'
ID = 1234
PATH = [{'kind': KIND, 'id': ID}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(DATASET_ID, KIND, ID, 'foo', 'Foo')
# Make a connection to return the entity pb.
connection = _Connection(entity_pb)
key = Key(KIND, ID, dataset_id=DATASET_ID)
result = self._callFUT(key, connection=connection,
dataset_id=DATASET_ID)
new_key = result.key
# Check the returned value is as expected.
self.assertFalse(new_key is key)
self.assertEqual(new_key.dataset_id, DATASET_ID)
self.assertEqual(new_key.path, PATH)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
def test_miss(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
connection = _Connection()
key = Key('Kind', 1234, dataset_id=DATASET_ID)
result = self._callFUT(key, connection=connection,
dataset_id=DATASET_ID)
self.assertTrue(result is None)
class Test_put_multi_function(unittest2.TestCase):
def setUp(self):
from gcloud.datastore._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.datastore._testing import _tear_down_defaults
_tear_down_defaults(self)
def _callFUT(self, entities, connection=None, dataset_id=None):
from gcloud.datastore.api import put_multi
return put_multi(entities, connection=connection,
dataset_id=dataset_id)
def test_no_connection(self):
from gcloud.datastore import _implicit_environ
from gcloud.datastore.test_batch import _Entity
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
entity = _Entity(foo=u'bar')
entity.key = _Key(_DATASET)
self.assertEqual(_implicit_environ.get_default_connection(), None)
with self.assertRaises(EnvironmentError):
self._callFUT([entity], dataset_id=_DATASET)
def test_no_dataset_id(self):
from gcloud.datastore import _implicit_environ
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Entity
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
entity = _Entity(foo=u'bar')
entity.key = _Key(_DATASET)
self.assertEqual(_implicit_environ.get_default_connection(), None)
result = self._callFUT([entity], connection=connection)
self.assertEqual(result, None)
self.assertEqual(len(connection._committed), 1)
dataset_id, mutation, transaction_id = connection._committed[0]
self.assertEqual(dataset_id, _DATASET)
upserts = list(mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, entity.key.to_protobuf())
properties = list(upserts[0].property)
self.assertEqual(properties[0].name, 'foo')
self.assertEqual(properties[0].value.string_value, u'bar')
self.assertTrue(transaction_id is None)
def test_no_entities(self):
from gcloud.datastore import _implicit_environ
self.assertEqual(_implicit_environ.get_default_connection(), None)
result = self._callFUT([])
self.assertEqual(result, None)
def test_w_single_empty_entity(self):
# https://github.com/GoogleCloudPlatform/gcloud-python/issues/649
from gcloud.datastore.entity import Entity
self.assertRaises(ValueError, self._callFUT, Entity())
def test_no_batch_w_partial_key(self):
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Entity
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
entity = _Entity(foo=u'bar')
key = entity.key = _Key(_DATASET)
key._id = None
result = self._callFUT([entity], connection=connection,
dataset_id=_DATASET)
self.assertEqual(result, None)
self.assertEqual(len(connection._committed), 1)
dataset_id, mutation, transaction_id = connection._committed[0]
self.assertEqual(dataset_id, _DATASET)
inserts = list(mutation.insert_auto_id)
self.assertEqual(len(inserts), 1)
self.assertEqual(inserts[0].key, key.to_protobuf())
properties = list(inserts[0].property)
self.assertEqual(properties[0].name, 'foo')
self.assertEqual(properties[0].value.string_value, u'bar')
self.assertTrue(transaction_id is None)
def test_existing_batch_w_completed_key(self):
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Entity
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
entity = _Entity(foo=u'bar')
key = entity.key = _Key(_DATASET)
# Set up Batch on stack so we can check it is used.
with _NoCommitBatch(_DATASET, connection) as CURR_BATCH:
result = self._callFUT([entity], connection=connection)
self.assertEqual(result, None)
self.assertEqual(len(CURR_BATCH.mutation.insert_auto_id), 0)
upserts = list(CURR_BATCH.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key.to_protobuf())
properties = list(upserts[0].property)
self.assertEqual(properties[0].name, 'foo')
self.assertEqual(properties[0].value.string_value, u'bar')
self.assertEqual(len(CURR_BATCH.mutation.delete), 0)
def test_implicit_connection(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Entity
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
entity = _Entity(foo=u'bar')
key = entity.key = _Key(_DATASET)
with _monkey_defaults(connection=connection):
# Set up Batch on stack so we can check it is used.
with _NoCommitBatch(_DATASET, connection) as CURR_BATCH:
result = self._callFUT([entity])
self.assertEqual(result, None)
self.assertEqual(len(CURR_BATCH.mutation.insert_auto_id), 0)
self.assertEqual(len(CURR_BATCH.mutation.upsert), 1)
upserts = list(CURR_BATCH.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key.to_protobuf())
properties = list(upserts[0].property)
self.assertEqual(properties[0].name, 'foo')
self.assertEqual(properties[0].value.string_value, u'bar')
self.assertEqual(len(CURR_BATCH.mutation.delete), 0)
class Test_put_function(unittest2.TestCase):
def setUp(self):
from gcloud.datastore._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.datastore._testing import _tear_down_defaults
_tear_down_defaults(self)
def _callFUT(self, entity, connection=None, dataset_id=None):
from gcloud.datastore.api import put
return put(entity, connection=connection, dataset_id=dataset_id)
def test_implicit_connection(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Entity
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
entity = _Entity(foo=u'bar')
key = entity.key = _Key(_DATASET)
with _monkey_defaults(connection=connection):
# Set up Batch on stack so we can check it is used.
with _NoCommitBatch(_DATASET, connection) as CURR_BATCH:
result = self._callFUT(entity)
self.assertEqual(result, None)
self.assertEqual(len(CURR_BATCH.mutation.insert_auto_id), 0)
self.assertEqual(len(CURR_BATCH.mutation.upsert), 1)
upserts = list(CURR_BATCH.mutation.upsert)
self.assertEqual(len(upserts), 1)
self.assertEqual(upserts[0].key, key.to_protobuf())
properties = list(upserts[0].property)
self.assertEqual(properties[0].name, 'foo')
self.assertEqual(properties[0].value.string_value, u'bar')
self.assertEqual(len(CURR_BATCH.mutation.delete), 0)
class Test_delete_multi_function(unittest2.TestCase):
def setUp(self):
from gcloud.datastore._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.datastore._testing import _tear_down_defaults
_tear_down_defaults(self)
def _callFUT(self, keys, connection=None, dataset_id=None):
from gcloud.datastore.api import delete_multi
return delete_multi(keys, connection=connection, dataset_id=dataset_id)
def test_no_connection(self):
from gcloud.datastore import _implicit_environ
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
key = _Key(_DATASET)
self.assertEqual(_implicit_environ.get_default_connection(), None)
with self.assertRaises(EnvironmentError):
self._callFUT([key], dataset_id=_DATASET)
def test_no_dataset_id(self):
from gcloud.datastore import _implicit_environ
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
self.assertEqual(_implicit_environ.get_default_connection(), None)
result = self._callFUT([key], connection=connection)
self.assertEqual(result, None)
self.assertEqual(len(connection._committed), 1)
dataset_id, mutation, transaction_id = connection._committed[0]
self.assertEqual(dataset_id, _DATASET)
self.assertEqual(list(mutation.delete), [key.to_protobuf()])
self.assertTrue(transaction_id is None)
def test_no_keys(self):
from gcloud.datastore import _implicit_environ
self.assertEqual(_implicit_environ.get_default_connection(), None)
result = self._callFUT([])
self.assertEqual(result, None)
def test_no_batch(self):
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
result = self._callFUT([key], connection=connection,
dataset_id=_DATASET)
self.assertEqual(result, None)
self.assertEqual(len(connection._committed), 1)
dataset_id, mutation, transaction_id = connection._committed[0]
self.assertEqual(dataset_id, _DATASET)
self.assertEqual(list(mutation.delete), [key.to_protobuf()])
self.assertTrue(transaction_id is None)
def test_wo_batch_w_key_different_than_default_dataset_id(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DEFAULT_DATASET = 'DEFAULT'
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
with _monkey_defaults(connection=connection,
dataset_id=_DEFAULT_DATASET):
result = self._callFUT([key])
self.assertEqual(result, None)
self.assertEqual(len(connection._committed), 1)
dataset_id, mutation, transaction_id = connection._committed[0]
self.assertEqual(dataset_id, _DATASET)
self.assertEqual(list(mutation.delete), [key.to_protobuf()])
self.assertTrue(transaction_id is None)
def test_w_existing_batch(self):
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
# Set up Batch on stack so we can check it is used.
with _NoCommitBatch(_DATASET, connection) as CURR_BATCH:
result = self._callFUT([key])
self.assertEqual(result, None)
self.assertEqual(len(CURR_BATCH.mutation.insert_auto_id), 0)
self.assertEqual(len(CURR_BATCH.mutation.upsert), 0)
deletes = list(CURR_BATCH.mutation.delete)
self.assertEqual(len(deletes), 1)
self.assertEqual(deletes[0], key._key)
self.assertEqual(len(connection._committed), 0)
def test_w_existing_transaction(self):
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
# Set up Batch on stack so we can check it is used.
with _NoCommitTransaction(_DATASET, connection) as CURR_BATCH:
result = self._callFUT([key])
self.assertEqual(result, None)
self.assertEqual(len(CURR_BATCH.mutation.insert_auto_id), 0)
self.assertEqual(len(CURR_BATCH.mutation.upsert), 0)
deletes = list(CURR_BATCH.mutation.delete)
self.assertEqual(len(deletes), 1)
self.assertEqual(deletes[0], key._key)
self.assertEqual(len(connection._committed), 0)
def test_implicit_connection_and_dataset_id(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
with _monkey_defaults(connection=connection, dataset_id=_DATASET):
# Set up Batch on stack so we can check it is used.
with _NoCommitBatch(_DATASET, connection) as CURR_BATCH:
result = self._callFUT([key])
self.assertEqual(result, None)
self.assertEqual(len(CURR_BATCH.mutation.insert_auto_id), 0)
self.assertEqual(len(CURR_BATCH.mutation.upsert), 0)
deletes = list(CURR_BATCH.mutation.delete)
self.assertEqual(len(deletes), 1)
self.assertEqual(deletes[0], key._key)
self.assertEqual(len(connection._committed), 0)
class Test_delete_function(unittest2.TestCase):
def setUp(self):
from gcloud.datastore._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.datastore._testing import _tear_down_defaults
_tear_down_defaults(self)
def _callFUT(self, key, connection=None, dataset_id=None):
from gcloud.datastore.api import delete
return delete(key, connection=connection, dataset_id=dataset_id)
def test_no_batch(self):
from gcloud.datastore.test_batch import _Connection
from gcloud.datastore.test_batch import _Key
# Build basic mocks needed to delete.
_DATASET = 'DATASET'
connection = _Connection()
key = _Key(_DATASET)
result = self._callFUT(key, connection=connection,
dataset_id=_DATASET)
self.assertEqual(result, None)
self.assertEqual(len(connection._committed), 1)
dataset_id, mutation, transaction_id = connection._committed[0]
self.assertEqual(dataset_id, _DATASET)
self.assertEqual(list(mutation.delete), [key.to_protobuf()])
self.assertTrue(transaction_id is None)
class Test_allocate_ids_function(unittest2.TestCase):
def _callFUT(self, incomplete_key, num_ids, connection=None):
from gcloud.datastore.api import allocate_ids
return allocate_ids(incomplete_key, num_ids, connection=connection)
def test_w_explicit_connection(self):
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
DATASET_ID = 'DATASET'
INCOMPLETE_KEY = Key('KIND', dataset_id=DATASET_ID)
CONNECTION = _Connection()
NUM_IDS = 2
result = self._callFUT(INCOMPLETE_KEY, NUM_IDS, connection=CONNECTION)
# Check the IDs returned match.
self.assertEqual([key.id for key in result], list(range(NUM_IDS)))
# Check connection is called correctly.
self.assertEqual(CONNECTION._called_dataset_id, DATASET_ID)
self.assertEqual(len(CONNECTION._called_key_pbs), NUM_IDS)
def test_w_implicit_connection(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
CUSTOM_CONNECTION = _Connection()
NUM_IDS = 2
with _monkey_defaults(connection=CUSTOM_CONNECTION,
dataset_id='DATASET'):
INCOMPLETE_KEY = Key('KIND')
result = self._callFUT(INCOMPLETE_KEY, NUM_IDS)
# Check the IDs returned.
self.assertEqual([key.id for key in result], list(range(NUM_IDS)))
def test_with_already_completed_key(self):
from gcloud.datastore._testing import _monkey_defaults
from gcloud.datastore.key import Key
from gcloud.datastore.test_connection import _Connection
CUSTOM_CONNECTION = _Connection()
with _monkey_defaults(connection=CUSTOM_CONNECTION,
dataset_id='DATASET'):
COMPLETE_KEY = Key('KIND', 1234)
self.assertRaises(ValueError, self._callFUT,
COMPLETE_KEY, 2)
class _NoCommitBatch(object):
def __init__(self, dataset_id, connection):
from gcloud.datastore.batch import Batch
self._batch = Batch(dataset_id, connection)
def __enter__(self):
from gcloud.datastore.batch import _BATCHES
_BATCHES.push(self._batch)
return self._batch
def __exit__(self, *args):
from gcloud.datastore.batch import _BATCHES
_BATCHES.pop()
class _NoCommitTransaction(object):
def __init__(self, dataset_id, connection, transaction_id='TRANSACTION'):
from gcloud.datastore.transaction import Transaction
xact = self._transaction = Transaction(dataset_id, connection)
xact._id = transaction_id
def __enter__(self):
from gcloud.datastore.batch import _BATCHES
_BATCHES.push(self._transaction)
return self._transaction
def __exit__(self, *args):
from gcloud.datastore.batch import _BATCHES
_BATCHES.pop()
class _HttpMultiple(object):
def __init__(self, *responses):
self._called_with = []
self._responses = list(responses)
def request(self, **kw):
self._called_with.append(kw)
result, self._responses = self._responses[0], self._responses[1:]
return result
|
|
from datetime import datetime
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db.models import (
CharField, DateTimeField, F, Max, OuterRef, Subquery, Value,
)
from django.db.models.functions import Upper
from django.test import TestCase
from .models import Article, Author, ChildArticle, OrderedByFArticle, Reference
class OrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Article.objects.create(headline="Article 1", pub_date=datetime(2005, 7, 26))
cls.a2 = Article.objects.create(headline="Article 2", pub_date=datetime(2005, 7, 27))
cls.a3 = Article.objects.create(headline="Article 3", pub_date=datetime(2005, 7, 27))
cls.a4 = Article.objects.create(headline="Article 4", pub_date=datetime(2005, 7, 28))
cls.author_1 = Author.objects.create(name="Name 1")
cls.author_2 = Author.objects.create(name="Name 2")
for i in range(2):
Author.objects.create()
def test_default_ordering(self):
"""
By default, Article.objects.all() orders by pub_date descending, then
headline ascending.
"""
self.assertQuerysetEqual(
Article.objects.all(), [
"Article 4",
"Article 2",
"Article 3",
"Article 1",
],
attrgetter("headline")
)
# Getting a single item should work too:
self.assertEqual(Article.objects.all()[0], self.a4)
def test_default_ordering_override(self):
"""
Override ordering with order_by, which is in the same format as the
ordering attribute in models.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("pub_date", "-headline"), [
"Article 1",
"Article 3",
"Article 2",
"Article 4",
],
attrgetter("headline")
)
def test_order_by_override(self):
"""
Only the last order_by has any effect (since they each override any
previous ordering).
"""
self.assertQuerysetEqual(
Article.objects.order_by("id"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("id").order_by("-headline"), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_nulls_first_and_last(self):
msg = "nulls_first and nulls_last are mutually exclusive"
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by(F("author").desc(nulls_last=True, nulls_first=True))
def assertQuerysetEqualReversible(self, queryset, sequence):
self.assertSequenceEqual(queryset, sequence)
self.assertSequenceEqual(queryset.reverse(), list(reversed(sequence)))
def test_order_by_nulls_last(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_last.
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").desc(nulls_last=True), 'headline'),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").asc(nulls_last=True), 'headline'),
[self.a3, self.a4, self.a1, self.a2],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(Upper("author__name").desc(nulls_last=True), 'headline'),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(Upper("author__name").asc(nulls_last=True), 'headline'),
[self.a3, self.a4, self.a1, self.a2],
)
def test_order_by_nulls_first(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_first.
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").asc(nulls_first=True), 'headline'),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(F("author").desc(nulls_first=True), 'headline'),
[self.a1, self.a2, self.a4, self.a3],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(Upper("author__name").asc(nulls_first=True), 'headline'),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertQuerysetEqualReversible(
Article.objects.order_by(Upper("author__name").desc(nulls_first=True), 'headline'),
[self.a1, self.a2, self.a4, self.a3],
)
def test_orders_nulls_first_on_filtered_subquery(self):
Article.objects.filter(headline='Article 1').update(author=self.author_1)
Article.objects.filter(headline='Article 2').update(author=self.author_1)
Article.objects.filter(headline='Article 4').update(author=self.author_2)
Author.objects.filter(name__isnull=True).delete()
author_3 = Author.objects.create(name='Name 3')
article_subquery = Article.objects.filter(
author=OuterRef('pk'),
headline__icontains='Article',
).order_by().values('author').annotate(
last_date=Max('pub_date'),
).values('last_date')
self.assertQuerysetEqualReversible(
Author.objects.annotate(
last_date=Subquery(article_subquery, output_field=DateTimeField())
).order_by(
F('last_date').asc(nulls_first=True)
).distinct(),
[author_3, self.author_1, self.author_2],
)
def test_stop_slicing(self):
"""
Use the 'stop' part of slicing notation to limit the results.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[:2], [
"Article 1",
"Article 2",
],
attrgetter("headline")
)
def test_stop_start_slicing(self):
"""
Use the 'stop' and 'start' parts of slicing notation to offset the
result list.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[1:3], [
"Article 2",
"Article 3",
],
attrgetter("headline")
)
def test_random_ordering(self):
"""
Use '?' to order randomly.
"""
self.assertEqual(
len(list(Article.objects.order_by("?"))), 4
)
def test_reversed_ordering(self):
"""
Ordering can be reversed using the reverse() method on a queryset.
This allows you to extract things like "the last two items" (reverse
and then take the first two).
"""
self.assertQuerysetEqual(
Article.objects.all().reverse()[:2], [
"Article 1",
"Article 3",
],
attrgetter("headline")
)
def test_reverse_ordering_pure(self):
qs1 = Article.objects.order_by(F('headline').asc())
qs2 = qs1.reverse()
self.assertQuerysetEqual(
qs2, [
'Article 4',
'Article 3',
'Article 2',
'Article 1',
],
attrgetter('headline'),
)
self.assertQuerysetEqual(
qs1, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_reverse_meta_ordering_pure(self):
Article.objects.create(
headline='Article 5',
pub_date=datetime(2005, 7, 30),
author=self.author_1,
second_author=self.author_2,
)
Article.objects.create(
headline='Article 5',
pub_date=datetime(2005, 7, 30),
author=self.author_2,
second_author=self.author_1,
)
self.assertQuerysetEqual(
Article.objects.filter(headline='Article 5').reverse(),
['Name 2', 'Name 1'],
attrgetter('author.name'),
)
self.assertQuerysetEqual(
Article.objects.filter(headline='Article 5'),
['Name 1', 'Name 2'],
attrgetter('author.name'),
)
def test_no_reordering_after_slicing(self):
msg = 'Cannot reverse a query once a slice has been taken.'
qs = Article.objects.all()[0:2]
with self.assertRaisesMessage(TypeError, msg):
qs.reverse()
with self.assertRaisesMessage(TypeError, msg):
qs.last()
def test_extra_ordering(self):
"""
Ordering can be based on fields included from an 'extra' clause
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_quoting(self):
"""
If the extra clause uses an SQL keyword for a name, it will be
protected by quoting.
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_with_table_name(self):
self.assertQuerysetEqual(
Article.objects.extra(order_by=['ordering_article.headline']), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.extra(order_by=['-ordering_article.headline']), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_pk(self):
"""
'pk' works as an ordering option in Meta.
"""
self.assertQuerysetEqual(
Author.objects.all(),
list(reversed(range(1, Author.objects.count() + 1))),
attrgetter("pk"),
)
def test_order_by_fk_attname(self):
"""
ordering by a foreign key by its attribute name prevents the query
from inheriting its related model ordering option (#19195).
"""
for i in range(1, 5):
author = Author.objects.get(pk=i)
article = getattr(self, "a%d" % (5 - i))
article.author = author
article.save(update_fields={'author'})
self.assertQuerysetEqual(
Article.objects.order_by('author_id'), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression(self):
self.assertQuerysetEqual(
Article.objects.order_by(F('headline')), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').asc()), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').desc()), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression_duplicates(self):
"""
A column may only be included once (the first occurrence) so we check
to ensure there are no duplicates by inspecting the SQL.
"""
qs = Article.objects.order_by(F('headline').asc(), F('headline').desc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
qs = Article.objects.order_by(F('headline').desc(), F('headline').asc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_constant_value(self):
# Order by annotated constant from selected columns.
qs = Article.objects.annotate(
constant=Value('1', output_field=CharField()),
).order_by('constant', '-headline')
self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1])
# Order by annotated constant which is out of selected columns.
self.assertSequenceEqual(
qs.values_list('headline', flat=True), [
'Article 4',
'Article 3',
'Article 2',
'Article 1',
],
)
# Order by constant.
qs = Article.objects.order_by(Value('1', output_field=CharField()), '-headline')
self.assertSequenceEqual(qs, [self.a4, self.a3, self.a2, self.a1])
def test_order_by_constant_value_without_output_field(self):
msg = 'Cannot resolve expression type, unknown output_field'
qs = Article.objects.annotate(constant=Value('1')).order_by('constant')
for ordered_qs in (
qs,
qs.values('headline'),
Article.objects.order_by(Value('1')),
):
with self.subTest(ordered_qs=ordered_qs), self.assertRaisesMessage(FieldError, msg):
ordered_qs.first()
def test_related_ordering_duplicate_table_reference(self):
"""
An ordering referencing a model with an ordering referencing a model
multiple time no circular reference should be detected (#24654).
"""
first_author = Author.objects.create()
second_author = Author.objects.create()
self.a1.author = first_author
self.a1.second_author = second_author
self.a1.save()
self.a2.author = second_author
self.a2.second_author = first_author
self.a2.save()
r1 = Reference.objects.create(article_id=self.a1.pk)
r2 = Reference.objects.create(article_id=self.a2.pk)
self.assertSequenceEqual(Reference.objects.all(), [r2, r1])
def test_default_ordering_by_f_expression(self):
"""F expressions can be used in Meta.ordering."""
articles = OrderedByFArticle.objects.all()
articles.filter(headline='Article 2').update(author=self.author_2)
articles.filter(headline='Article 3').update(author=self.author_1)
self.assertQuerysetEqual(
articles, ['Article 1', 'Article 4', 'Article 3', 'Article 2'],
attrgetter('headline')
)
def test_order_by_ptr_field_with_default_ordering_by_expression(self):
ca1 = ChildArticle.objects.create(
headline='h2',
pub_date=datetime(2005, 7, 27),
author=self.author_2,
)
ca2 = ChildArticle.objects.create(
headline='h2',
pub_date=datetime(2005, 7, 27),
author=self.author_1,
)
ca3 = ChildArticle.objects.create(
headline='h3',
pub_date=datetime(2005, 7, 27),
author=self.author_1,
)
ca4 = ChildArticle.objects.create(headline='h1', pub_date=datetime(2005, 7, 28))
articles = ChildArticle.objects.order_by('article_ptr')
self.assertSequenceEqual(articles, [ca4, ca2, ca1, ca3])
|
|
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field available_timeslots on 'ProposalVersion'
db.create_table('reviews_proposalversion_available_timeslots', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('proposalversion', models.ForeignKey(orm['reviews.proposalversion'], null=False)),
('timeslot', models.ForeignKey(orm['proposals.timeslot'], null=False))
))
db.create_unique('reviews_proposalversion_available_timeslots', ['proposalversion_id', 'timeslot_id'])
def backwards(self, orm):
# Removing M2M table for field available_timeslots on 'ProposalVersion'
db.delete_table('reviews_proposalversion_available_timeslots')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'conference.audiencelevel': {
'Meta': {'object_name': 'AudienceLevel'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'conference.conference': {
'Meta': {'object_name': 'Conference'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reviews_active': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'reviews_end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'reviews_start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'conference.section': {
'Meta': {'object_name': 'Section'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'conference.sessionduration': {
'Meta': {'object_name': 'SessionDuration'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'minutes': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
'conference.sessionkind': {
'Meta': {'object_name': 'SessionKind'},
'closed': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'conference.track': {
'Meta': {'ordering': "['order']", 'object_name': 'Track'},
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'proposals.proposal': {
'Meta': {'object_name': 'Proposal'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'proposal_participations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['speakers.Speaker']"}),
'audience_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.AudienceLevel']"}),
'available_timeslots': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['proposals.TimeSlot']", 'null': 'True', 'blank': 'True'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
'duration': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionDuration']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionKind']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'proposals'", 'to': "orm['speakers.Speaker']"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Track']", 'null': 'True', 'blank': 'True'})
},
'proposals.timeslot': {
'Meta': {'unique_together': "((u'date', u'slot', u'section'),)", 'object_name': 'TimeSlot'},
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Section']"}),
'slot': ('django.db.models.fields.IntegerField', [], {})
},
'reviews.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'deleted_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deleted_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['proposals.Proposal']"}),
'proposal_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reviews.ProposalVersion']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'reviews.proposalmetadata': {
'Meta': {'object_name': 'ProposalMetaData'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_activity_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'latest_comment_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'latest_proposalversion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reviews.ProposalVersion']", 'null': 'True', 'blank': 'True'}),
'latest_review_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'latest_version_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'num_reviews': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'proposal': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'review_metadata'", 'unique': 'True', 'to': "orm['proposals.Proposal']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'reviews.proposalversion': {
'Meta': {'object_name': 'ProposalVersion'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'additional_speakers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "u'proposalversion_participations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['speakers.Speaker']"}),
'audience_level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.AudienceLevel']"}),
'available_timeslots': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['proposals.TimeSlot']", 'null': 'True', 'blank': 'True'}),
'conference': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Conference']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '400'}),
'duration': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionDuration']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.SessionKind']"}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'original': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'versions'", 'to': "orm['proposals.Proposal']"}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {}),
'speaker': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'proposalversions'", 'to': "orm['speakers.Speaker']"}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.utcnow'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['conference.Track']", 'null': 'True', 'blank': 'True'})
},
'reviews.review': {
'Meta': {'unique_together': "(('user', 'proposal'),)", 'object_name': 'Review'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'proposal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviews'", 'to': "orm['proposals.Proposal']"}),
'proposal_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reviews.ProposalVersion']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'rating': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'speakers.speaker': {
'Meta': {'object_name': 'Speaker'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'speaker_profile'", 'unique': 'True', 'to': "orm['auth.User']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['reviews']
|
|
import io
import json
import warnings
from .core import url_to_fs
from .utils import merge_offset_ranges
# Parquet-Specific Utilities for fsspec
#
# Most of the functions defined in this module are NOT
# intended for public consumption. The only exception
# to this is `open_parquet_file`, which should be used
# place of `fs.open()` to open parquet-formatted files
# on remote file systems.
def open_parquet_file(
path,
fs=None,
columns=None,
row_groups=None,
storage_options=None,
engine="auto",
max_gap=64_000,
max_block=256_000_000,
footer_sample_size=1_000_000,
**kwargs,
):
"""
Return a file-like object for a single Parquet file.
The specified parquet `engine` will be used to parse the
footer metadata, and determine the required byte ranges
from the file. The target path will then be opened with
the "parts" (`KnownPartsOfAFile`) caching strategy.
Note that this method is intended for usage with remote
file systems, and is unlikely to improve parquet-read
performance on local file systems.
Parameters
----------
path: str
Target file path.
fs: AbstractFileSystem, optional
Filesystem object to use for opening the file. If nothing is
specified, an `AbstractFileSystem` object will be inferred.
engine : str, default "auto"
Parquet engine to use for metadata parsing. Allowed options
include "fastparquet", "pyarrow", and "auto". The specified
engine must be installed in the current environment. If
"auto" is specified, and both engines are installed,
"fastparquet" will take precedence over "pyarrow".
columns: list, optional
List of all column names that may be read from the file.
row_groups : list, optional
List of all row-group indices that may be read from the file.
storage_options : dict, optional
Used to generate an `AbstractFileSystem` object if `fs` was
not specified.
max_gap : int, optional
Neighboring byte ranges will only be merged when their
inter-range gap is <= `max_gap`. Default is 64KB.
max_block : int, optional
Neighboring byte ranges will only be merged when the size of
the aggregated range is <= `max_block`. Default is 256MB.
footer_sample_size : int, optional
Number of bytes to read from the end of the path to look
for the footer metadata. If the sampled bytes do not contain
the footer, a second read request will be required, and
performance will suffer. Default is 1MB.
**kwargs :
Optional key-word arguments to pass to `fs.open`
"""
# Make sure we have an `AbstractFileSystem` object
# to work with
if fs is None:
fs = url_to_fs(path, storage_options=(storage_options or {}))[0]
# Fetch the known byte ranges needed to read
# `columns` and/or `row_groups`
data = _get_parquet_byte_ranges(
[path],
fs,
columns=columns,
row_groups=row_groups,
engine=engine,
max_gap=max_gap,
max_block=max_block,
footer_sample_size=footer_sample_size,
)
# Call self.open with "parts" caching
options = kwargs.pop("cache_options", {}).copy()
return fs.open(
path,
mode="rb",
cache_type="parts",
cache_options={**options, **{"data": data[path]}},
**kwargs,
)
def _get_parquet_byte_ranges(
paths,
fs,
columns=None,
row_groups=None,
max_gap=64_000,
max_block=256_000_000,
footer_sample_size=1_000_000,
engine="auto",
):
"""Get a dictionary of the known byte ranges needed
to read a specific column/row-group selection from a
Parquet dataset. Each value in the output dictionary
is intended for use as the `data` argument for the
`KnownPartsOfAFile` caching strategy of a single path.
"""
# Set the engine
engine = _set_engine(engine)
# Get file sizes asynchronously
file_sizes = fs.sizes(paths)
# Populate global paths, starts, & ends
result = {}
data_paths = []
data_starts = []
data_ends = []
add_header_magic = True
if columns is None and row_groups is None:
# We are NOT selecting specific columns or row-groups.
#
# We can avoid sampling the footers, and just transfer
# all file data with cat_ranges
for i, path in enumerate(paths):
result[path] = {}
for b in range(0, file_sizes[i], max_block):
data_paths.append(path)
data_starts.append(b)
data_ends.append(min(b + max_block, file_sizes[i]))
add_header_magic = False # "Magic" should already be included
else:
# We ARE selecting specific columns or row-groups.
#
# Gather file footers.
# We just take the last `footer_sample_size` bytes of each
# file (or the entire file if it is smaller than that)
footer_starts = []
footer_ends = []
for i, path in enumerate(paths):
footer_ends.append(file_sizes[i])
sample_size = max(0, file_sizes[i] - footer_sample_size)
footer_starts.append(sample_size)
footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
# Check our footer samples and re-sample if necessary.
missing_footer_starts = footer_starts.copy()
missing_footer_ends = footer_starts.copy()
large_footer = 0
for i, path in enumerate(paths):
footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
real_footer_start = file_sizes[i] - (footer_size + 8)
if real_footer_start < footer_starts[i]:
missing_footer_starts[i] = real_footer_start
missing_footer_ends[i] = footer_starts[i]
large_footer = max(large_footer, (footer_size + 8))
if large_footer:
warnings.warn(
f"Not enough data was used to sample the parquet footer. "
f"Try setting footer_sample_size >= {large_footer}."
)
for i, block in enumerate(
fs.cat_ranges(
paths,
missing_footer_starts,
missing_footer_ends,
)
):
footer_samples[i] = block + footer_samples[i]
# Calculate required byte ranges for each path
for i, path in enumerate(paths):
# Deal with small-file case.
# Just include all remaining bytes of the file
# in a single range.
if file_sizes[i] < max_block:
if footer_starts[i] > 0:
# Only need to transfer the data if the
# footer sample isn't already the whole file
data_paths.append(path)
data_starts.append(0)
data_ends.append(footer_starts[i])
continue
# Use "engine" to collect data byte ranges
path_data_starts, path_data_ends = engine._parquet_byte_ranges(
footer_samples[i], columns, row_groups, footer_starts[i]
)
data_paths += [path] * len(path_data_starts)
data_starts += path_data_starts
data_ends += path_data_ends
# Merge adjacent offset ranges
data_paths, data_starts, data_ends = merge_offset_ranges(
data_paths,
data_starts,
data_ends,
max_gap=max_gap,
max_block=max_block,
sort=False, # Should already be sorted
)
# Start by populating `result` with footer samples
for i, path in enumerate(paths):
result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
# Use cat_ranges to gather the data byte_ranges
for i, data in enumerate(fs.cat_ranges(data_paths, data_starts, data_ends)):
if data_ends[i] > data_starts[i]:
result[data_paths[i]][(data_starts[i], data_ends[i])] = data
# Add b"PAR1" to header if necessary
if add_header_magic:
for i, path in enumerate(paths):
add_magic = True
for k in result[path].keys():
if k[0] == 0 and k[1] >= 4:
add_magic = False
break
if add_magic:
result[path][(0, 4)] = b"PAR1"
# Add b"" for reads beyond end of file
for i, path in enumerate(paths):
result[path][(file_sizes[i], 2 * file_sizes[i])] = b""
return result
def _set_engine(engine_str):
# Define a list of parquet engines to try
if engine_str == "auto":
try_engines = ("fastparquet", "pyarrow")
elif not isinstance(engine_str, str):
raise ValueError(
"Failed to set parquet engine! "
"Please pass 'fastparquet', 'pyarrow', or 'auto'"
)
elif engine_str not in ("fastparquet", "pyarrow"):
raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`")
else:
try_engines = [engine_str]
# Try importing the engines in `try_engines`,
# and choose the first one that succeeds
for engine in try_engines:
try:
if engine == "fastparquet":
return FastparquetEngine()
elif engine == "pyarrow":
return PyarrowEngine()
except ImportError:
pass
# Raise an error if a supported parquet engine
# was not found
raise ImportError(
f"The following parquet engines are not installed "
f"in your python environment: {try_engines}."
f"Please install 'fastparquert' or 'pyarrow' to "
f"utilize the `fsspec.parquet` module."
)
class FastparquetEngine:
# The purpose of the FastparquetEngine class is
# to check if fastparquet can be imported (on initialization)
# and to define a `_parquet_byte_ranges` method. In the
# future, this class may also be used to define other
# methods/logic that are specific to fastparquet.
def __init__(self):
import fastparquet as fp
self.fp = fp
def _parquet_byte_ranges(self, footer, columns, row_groups, footer_start):
data_starts, data_ends = [], []
pf = self.fp.ParquetFile(io.BytesIO(footer))
# Convert columns to a set and add any index columns
# speficied in the pandas metadata (just in case)
column_set = None if columns is None else set(columns)
if column_set is not None and hasattr(pf, "pandas_metadata"):
column_set |= set(pf.pandas_metadata.get("index_columns", []))
# Loop through column chunks to add required byte ranges
for r, row_group in enumerate(pf.row_groups):
# Skip this row-group if we are targetting
# specific row-groups
if row_groups is None or r in row_groups:
for column in row_group.columns:
name = column.meta_data.path_in_schema[0]
# Skip this column if we are targetting a
# specific columns
if column_set is None or name in column_set:
file_offset0 = column.meta_data.dictionary_page_offset
if file_offset0 is None:
file_offset0 = column.meta_data.data_page_offset
num_bytes = column.meta_data.total_compressed_size
if file_offset0 < footer_start:
data_starts.append(file_offset0)
data_ends.append(
min(file_offset0 + num_bytes, footer_start)
)
return data_starts, data_ends
class PyarrowEngine:
# The purpose of the PyarrowEngine class is
# to check if pyarrow can be imported (on initialization)
# and to define a `_parquet_byte_ranges` method. In the
# future, this class may also be used to define other
# methods/logic that are specific to pyarrow.
def __init__(self):
import pyarrow.parquet as pq
self.pq = pq
def _parquet_byte_ranges(self, footer, columns, row_groups, footer_start):
data_starts, data_ends = [], []
md = self.pq.ParquetFile(io.BytesIO(footer)).metadata
# Convert columns to a set and add any index columns
# speficied in the pandas metadata (just in case)
column_set = None if columns is None else set(columns)
if column_set is not None:
schema = md.schema.to_arrow_schema()
has_pandas_metadata = (
schema.metadata is not None and b"pandas" in schema.metadata
)
if has_pandas_metadata:
column_set |= set(
json.loads(schema.metadata[b"pandas"].decode("utf8")).get(
"index_columns", []
)
)
# Loop through column chunks to add required byte ranges
for r in range(md.num_row_groups):
# Skip this row-group if we are targetting
# specific row-groups
if row_groups is None or r in row_groups:
row_group = md.row_group(r)
for c in range(row_group.num_columns):
column = row_group.column(c)
name = column.path_in_schema
# Skip this column if we are targetting a
# specific columns
split_name = name.split(".")[0]
if (
column_set is None
or name in column_set
or split_name in column_set
):
file_offset0 = column.dictionary_page_offset
if file_offset0 is None:
file_offset0 = column.data_page_offset
num_bytes = column.total_compressed_size
if file_offset0 < footer_start:
data_starts.append(file_offset0)
data_ends.append(
min(file_offset0 + num_bytes, footer_start)
)
return data_starts, data_ends
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Exposes the Python wrapper conversion to trt_graph."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import platform
import tempfile
import six as _six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.util import nest
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
if platform.system() == "Windows":
raise RuntimeError("Windows platform is not supported")
# Lazily load the op, since it's not available in cpu-only builds. Importing
# this at top will cause tests that imports TF-TRT fail when they're built
# and run without CUDA/GPU.
gen_trt_ops = LazyLoader(
"gen_trt_ops", globals(),
"tensorflow.compiler.tf2tensorrt.ops.gen_trt_ops")
wrap_py_utils = LazyLoader(
"wrap_py_utils", globals(),
"tensorflow.compiler.tf2tensorrt.wrap_py_utils")
# Register TRT ops in python, so that when users import this module they can
# execute a TRT-converted graph without calling any of the methods in this
# module.
#
# This will call register_op_list() in
# tensorflow/python/framework/op_def_registry.py, but it doesn't register
# the op or the op kernel in C++ runtime.
try:
gen_trt_ops.trt_engine_op # pylint: disable=pointless-statement
except AttributeError:
pass
def _to_bytes(s):
"""Encode s if it is a sequence of chars."""
if isinstance(s, _six.text_type):
return s.encode("utf-8", errors="surrogateescape")
return s
def _to_string(s):
"""Decode s if it is a sequence of bytes."""
if isinstance(s, _six.binary_type):
return s.decode("utf-8")
return s
class TrtPrecisionMode(object):
FP32 = "FP32"
FP16 = "FP16"
INT8 = "INT8"
@staticmethod
def supported_precision_modes():
precisions = [
TrtPrecisionMode.FP32, TrtPrecisionMode.FP16, TrtPrecisionMode.INT8
]
return precisions + [p.lower() for p in precisions]
# Use a large enough number as the default max_workspace_size for TRT engines,
# so it can produce reasonable performance results with the default.
DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES = 1 << 30
# TrtConversionParams encapsulates the parameters that are used for TF-TRT
# conversion.
TrtConversionParams = collections.namedtuple(
"TrtConversionParams",
[
# A template RewriterConfig proto used to create a TRT-enabled
# RewriterConfig. If None, it will use a default one.
"rewriter_config_template",
# The maximum GPU temporary memory which the TRT engine can use at
# execution time. This corresponds to the 'workspaceSize' parameter of
# nvinfer1::IBuilder::setMaxWorkspaceSize().
"max_workspace_size_bytes",
# One of TrtPrecisionMode.supported_precision_modes().
"precision_mode",
# The minimum number of nodes required for a subgraph to be replaced by
# TRTEngineOp.
"minimum_segment_size",
# Whether to generate dynamic TRT ops which will build the TRT network
# and engine at run time.
# i.e. Since TensorRT version < 6.0 does not support dynamic dimensions
# other than the batch dimension, when the TensorFlow graph has a
# non-batch dimension of dynamic size, we would need to enable this
# option. This option should be set to True in TF 2.0.
"is_dynamic_op",
# Max number of cached TRT engines for dynamic TRT ops.
# Created TRT engines for a dynamic dimension are cached.
# This is the maximum number of engines that can be cached.
# If the number of cached engines is already at max but none of them
# supports the input shapes, the TRTEngineOp will fall back to run the
# original TF subgraph that corresponds to the TRTEngineOp.
"maximum_cached_engines",
# This argument is ignored if precision_mode is not INT8. If set to
# True, a calibration graph will be created to calibrate the missing
# ranges. The calibration graph must be converted to an inference graph
# by running calibration with calibrate(). If set to False, quantization
# nodes will be expected for every tensor in the graph (exlcuding those
# which will be fused). If a range is missing, an error will occur.
# Please note that accuracy may be negatively affected if there is a
# mismatch between which tensors TRT quantizes and which tensors were
# trained with fake quantization.
"use_calibration",
# Max size for the input batch.
# This option is deprecated in TF 2.0.
"max_batch_size",
])
DEFAULT_TRT_CONVERSION_PARAMS = TrtConversionParams(
rewriter_config_template=None,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=True,
maximum_cached_engines=1,
use_calibration=True,
max_batch_size=1)
_TRT_ENGINE_OP_NAME = "TRTEngineOp"
def _check_conversion_params(conversion_params):
"""Validate the provided TrtConversionParams.
Args:
conversion_params: a TrtConversionParams instance.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
supported_precision_modes = TrtPrecisionMode.supported_precision_modes()
if conversion_params.precision_mode not in supported_precision_modes:
raise ValueError(
("precision mode '{}' is not supported."
"It should be one of {}").format(conversion_params.precision_mode,
supported_precision_modes))
def _check_trt_version_compatibility():
"""Check compatibility of TensorRT version.
Raises:
RuntimeError: if the TensorRT library version is incompatible.
"""
linked_version = wrap_py_utils.get_linked_tensorrt_version()
loaded_version = wrap_py_utils.get_loaded_tensorrt_version()
assert isinstance(linked_version, tuple)
assert isinstance(loaded_version, tuple)
assert len(linked_version) == 3
assert len(loaded_version) == 3
tf_logging.info("Linked TensorRT version: %s" % str(linked_version))
tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
if loaded_version < linked_version:
tf_logging.error(
"Loaded TensorRT %s but linked TensorFlow against TensorRT %s. " %
(".".join([str(x) for x in loaded_version]),
".".join([str(x) for x in linked_version])) +
"TensorRT does not support forward compatibility. " +
"It is also required to use the same major version of TensorRT " +
"during compilation and runtime.")
raise RuntimeError("Incompatible TensorRT versions")
if loaded_version[0] > linked_version[0]:
tf_logging.error(
"Loaded TensorRT %s but linked TensorFlow against TensorRT %s. " %
(".".join([str(x) for x in loaded_version]),
".".join([str(x) for x in linked_version])) +
"It is required to use the same major version " +
"of TensorRT during compilation and runtime.")
raise RuntimeError("Incompatible TensorRT major version")
if loaded_version != linked_version:
tf_logging.info(
"Loaded TensorRT %s and linked TensorFlow against TensorRT %s. " %
(".".join([str(x) for x in loaded_version]),
".".join([str(x) for x in linked_version])) +
"This is supported because TensorRT " +
" minor/patch upgrades are backward compatible")
def get_tensorrt_rewriter_config(conversion_params, is_v2=False):
"""Returns a RewriterConfig proto for TRT transformation.
Args:
conversion_params: a TrtConversionParams instance.
is_v2: whether we're getting a RewriterConfig for TF 2.0.
Returns:
A RewriterConfig proto which sets a TensorRTOptimizer to run Grappler.
Raises:
TypeError: if any of the parameters are of unexpected type.
ValueError: if any of the parameters are of unexpected value.
"""
if conversion_params.rewriter_config_template is not None and not isinstance(
conversion_params.rewriter_config_template,
rewriter_config_pb2.RewriterConfig):
raise TypeError(
"rewriter_config_template should be a RewriterConfig proto.")
_check_conversion_params(conversion_params)
rewriter_config_with_trt = rewriter_config_pb2.RewriterConfig()
if conversion_params.rewriter_config_template is None:
# Layout optimizer may add Const nodes followed by Reshape nodes, thus we
# need to run constant folding again.
rewriter_config_with_trt.optimizers.extend(
["constfold", "layout", "constfold"])
rewriter_config_with_trt.meta_optimizer_iterations = (
rewriter_config_pb2.RewriterConfig.ONE)
else:
rewriter_config_with_trt.CopyFrom(
conversion_params.rewriter_config_template)
optimizer = rewriter_config_with_trt.custom_optimizers.add()
# Add a constfold optimizer to cleanup the unused Const nodes.
rewriter_config_with_trt.custom_optimizers.add().name = "constfold"
optimizer.name = "TensorRTOptimizer"
optimizer.parameter_map[
"minimum_segment_size"].i = conversion_params.minimum_segment_size
optimizer.parameter_map[
"max_workspace_size_bytes"].i = conversion_params.max_workspace_size_bytes
optimizer.parameter_map["precision_mode"].s = _to_bytes(
conversion_params.precision_mode)
optimizer.parameter_map[
"maximum_cached_engines"].i = conversion_params.maximum_cached_engines
optimizer.parameter_map[
"use_calibration"].b = conversion_params.use_calibration
if is_v2:
# Static mode (building TRT engine without executing the op) is deprecated
# in TF 2.0. See TrtGraphConverterV2 for more details.
if not conversion_params.is_dynamic_op:
raise ValueError("Option is_dynamic_op=False is not supported in TF 2.0, "
"please set it to True instead.")
optimizer.parameter_map["is_dynamic_op"].b = True
else:
optimizer.parameter_map[
"max_batch_size"].i = conversion_params.max_batch_size
optimizer.parameter_map["is_dynamic_op"].b = conversion_params.is_dynamic_op
return rewriter_config_with_trt
# Remove all scope prefixes in the node name. In TF 2.0, the same concrete
# function can be initialized multiple times with different prefixes, and
# this will result in the same TRTEngineOp being initialized multiple times
# with different cache and duplicate TRT engines.
# TODO(laigd): this may be caused by the fact that TRTEngineOp is not
# stateful, need to investigate.
# TODO(laigd): we rely on the fact that all functions are fully inlined
# before TF-TRT optimizer is called, as otherwise it may generate the same
# name when optimizing a different function graph. Fix this.
def _get_canonical_engine_name(name):
return name.split("/")[-1]
class TrtGraphConverter(object):
"""A converter for TF-TRT transformation for TF 1.x GraphDef/SavedModels.
To run the conversion without quantization calibration (e.g. for FP32/FP16
precision modes):
```python
converter = TrtGraphConverter(
input_saved_model_dir="my_dir",
precision_mode=TrtPrecisionMode.FP16)
converted_graph_def = converter.convert()
converter.save(output_saved_model_dir)
```
To run the conversion with quantization calibration:
```python
converter = TrtGraphConverter(
input_saved_model_dir="my_dir",
precision_mode=TrtPrecisionMode.INT8)
converter.convert()
# Run calibration 10 times.
converted_graph_def = converter.calibrate(
fetch_names=['output:0'],
num_runs=10,
feed_dict_fn=lambda: {'input:0': my_next_data()})
converter.save(output_saved_model_dir)
```
"""
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
input_graph_def=None,
nodes_blacklist=None,
session_config=None,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
use_calibration=True):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
input_graph_def: a GraphDef object containing a model to be transformed.
If set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
nodes_blacklist: list of node names to prevent the converter from
touching.
session_config: the ConfigProto used to create a Session. It's also used
as a template to create a TRT-enabled ConfigProto for conversion. If not
specified, a default ConfigProto will be used.
max_batch_size: max size for the input batch.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the
'workspaceSize' parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph
to be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the
TRT network and engine at run time.
maximum_cached_engines: max number of cached TRT engines in dynamic TRT
ops. If the number of cached engines is already at max but none of them
can serve the input, the TRTEngineOp will fall back to run the TF
function based on which the TRTEngineOp is created.
use_calibration: this argument is ignored if precision_mode is not INT8.
If set to True, a calibration graph will be created to calibrate the
missing ranges. The calibration graph must be converted to an inference
graph by running calibration with calibrate(). If set to False,
quantization nodes will be expected for every tensor in the graph
(exlcuding those which will be fused). If a range is missing, an error
will occur. Please note that accuracy may be negatively affected if
there is a mismatch between which tensors TRT quantizes and which
tensors were trained with fake quantization.
Raises:
ValueError: if the combination of the parameters is invalid.
RuntimeError: if this class is used in TF 2.0.
"""
if context.executing_eagerly():
raise RuntimeError(
"Please use tf.experimental.tensorrt.Converter in TF 2.0.")
if input_graph_def and input_saved_model_dir:
raise ValueError(
"Can only specify one of input_graph_def and input_saved_model_dir")
if not input_graph_def and not input_saved_model_dir:
raise ValueError("Must specify one of input_graph_def and "
"input_saved_model_dir")
_check_trt_version_compatibility()
self._input_graph_def = input_graph_def
self._nodes_blacklist = nodes_blacklist
self._input_saved_model_dir = input_saved_model_dir
self._converted = False
self._grappler_meta_graph_def = None
self._input_saved_model_tags = (
input_saved_model_tags or [tag_constants.SERVING])
self._input_saved_model_signature_key = (
input_saved_model_signature_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
self._session_config = session_config or config_pb2.ConfigProto()
# For calibration usage.
self._calibration_graph = None
self._calibration_data_collected = False
self._need_calibration = (
precision_mode == TrtPrecisionMode.INT8 and use_calibration)
if self._need_calibration and not is_dynamic_op:
tf_logging.warn(
"INT8 precision mode with calibration is supported with "
"dynamic TRT ops only. Disregarding is_dynamic_op parameter.")
is_dynamic_op = True
# TODO(laigd):
# - Verify in int8 mode that maximum_cached_engines is set properly.
# - If it fails to build the int8 engine it should return error.
rewriter_config_template = None
if (session_config and session_config.HasField("graph_options") and
session_config.graph_options.HasField("rewrite_options")):
rewriter_config_template = session_config.graph_options.rewrite_options
self._conversion_params = TrtConversionParams(
rewriter_config_template=rewriter_config_template,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
use_calibration=use_calibration,
max_batch_size=max_batch_size)
_check_conversion_params(self._conversion_params)
def _run_conversion(self):
"""Run Grappler's OptimizeGraph() tool to convert the graph."""
# Create custom ConfigProto for Grappler.
grappler_session_config = config_pb2.ConfigProto()
grappler_session_config.CopyFrom(self._session_config)
custom_rewriter_config = get_tensorrt_rewriter_config(
conversion_params=self._conversion_params)
grappler_session_config.graph_options.rewrite_options.CopyFrom(
custom_rewriter_config)
# Run Grappler.
self._converted_graph_def = tf_optimizer.OptimizeGraph(
grappler_session_config,
self._grappler_meta_graph_def,
graph_id=b"tf_graph")
self._converted = True
def _add_nodes_blacklist(self):
if self._nodes_blacklist:
collection_def = self._grappler_meta_graph_def.collection_def["train_op"]
blacklist = collection_def.node_list.value
for i in self._nodes_blacklist:
if isinstance(i, ops.Tensor):
blacklist.append(_to_bytes(i.name))
else:
blacklist.append(_to_bytes(i))
def _convert_graph_def(self):
"""Convert the input GraphDef."""
graph = ops.Graph()
with graph.as_default():
importer.import_graph_def(self._input_graph_def, name="")
self._grappler_meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True), graph=graph)
self._add_nodes_blacklist()
self._run_conversion()
def _collections_to_keep(self, collection_keys):
# TODO(laigd): currently we use the collection key to filter out
# collections that depend on variable ops, but this may miss some
# other user-defined collections. A better way would be to use
# CollectionDef::NodeList for the filtering.
collections_to_remove = (
ops.GraphKeys._VARIABLE_COLLECTIONS + [
ops.GraphKeys.TRAIN_OP, ops.GraphKeys.WHILE_CONTEXT,
ops.GraphKeys.COND_CONTEXT
])
return [key for key in collection_keys if key not in collections_to_remove]
def _convert_saved_model(self):
"""Convert the input SavedModel."""
graph = ops.Graph()
with session.Session(graph=graph, config=self._session_config) as sess:
input_meta_graph_def = loader.load(sess, self._input_saved_model_tags,
self._input_saved_model_dir)
input_signature_def = input_meta_graph_def.signature_def[
self._input_saved_model_signature_key]
def _gather_names(tensor_info):
"""Get the node names from a TensorInfo."""
return set([tensor_info[key].name.split(":")[0] for key in tensor_info])
# Get input and outputs from all SignatureDef.
output_node_names = _gather_names(input_signature_def.inputs).union(
_gather_names(input_signature_def.outputs))
# Preserve nodes in collection
for collection_key in self._collections_to_keep(
input_meta_graph_def.collection_def):
for op in sess.graph.get_collection(collection_key):
if isinstance(op, ops.Operation):
output_node_names.add(op.name.split(":")[0])
# Freeze the variables in the SavedModel graph and copy the frozen
# graph over.
frozen_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph.as_graph_def(add_shapes=True),
list(output_node_names))
self._grappler_meta_graph_def = meta_graph_pb2.MetaGraphDef()
self._grappler_meta_graph_def.graph_def.CopyFrom(frozen_graph_def)
# Copy the collections that are not variables.
for collection_key in self._collections_to_keep(
input_meta_graph_def.collection_def):
self._grappler_meta_graph_def.collection_def[collection_key].CopyFrom(
input_meta_graph_def.collection_def[collection_key])
self._add_nodes_blacklist()
# Copy other information.
self._grappler_meta_graph_def.meta_info_def.CopyFrom(
input_meta_graph_def.meta_info_def)
self._grappler_meta_graph_def.signature_def[
self._input_saved_model_signature_key].CopyFrom(input_signature_def)
# TODO(laigd): maybe add back AssetFileDef.
self._run_conversion()
def convert(self):
"""Run the TF-TRT conversion.
Returns:
The converted GraphDef for TF 1.x.
"""
assert not self._converted
if self._input_graph_def:
self._convert_graph_def()
else:
self._convert_saved_model()
return self._converted_graph_def
def calibrate(self,
fetch_names,
num_runs,
feed_dict_fn=None,
input_map_fn=None):
"""Run the calibration and return the calibrated GraphDef.
Args:
fetch_names: a list of output tensor name to fetch during calibration.
num_runs: number of runs of the graph during calibration.
feed_dict_fn: a function that returns a dictionary mapping input names (as
strings) in the GraphDef to be calibrated to values (e.g. Python list,
numpy arrays, etc). One and only one of `feed_dict_fn` and
`input_map_fn` should be specified.
input_map_fn: a function that returns a dictionary mapping input names (as
strings) in the GraphDef to be calibrated to Tensor objects. The values
of the named input tensors in the GraphDef to be calibrated will be
re-mapped to the respective `Tensor` values during calibration. One and
only one of `feed_dict_fn` and `input_map_fn` should be specified.
Raises:
ValueError: if the input combination is invalid.
RuntimeError: if this method is called in eager mode.
Returns:
The GraphDef after the calibration.
"""
assert self._converted
assert self._need_calibration
assert not self._calibration_data_collected
if (feed_dict_fn and input_map_fn) or (not feed_dict_fn and
not input_map_fn):
raise ValueError(
"Should specify one and only one of feed_dict_fn and input_map_fn.")
if input_map_fn:
for k, v in input_map_fn().items():
if not isinstance(k, str):
raise ValueError("Keys of input_map_fn must be of type str")
if not isinstance(v, ops.Tensor):
raise ValueError("Values of input_map_fn must be of type tf.Tensor")
self._calibration_graph = ops.Graph()
with self._calibration_graph.as_default():
fetches = importer.import_graph_def(
self._converted_graph_def,
input_map=input_map_fn() if input_map_fn else None,
return_elements=fetch_names,
name="")
with session.Session(
graph=self._calibration_graph,
config=self._session_config) as calibration_sess:
for _ in range(num_runs):
calibration_sess.run(
fetches, feed_dict=feed_dict_fn() if feed_dict_fn else None)
# Maps device name to the corresponding get_calibration_data.
#
# TODO(laigd): a better way would be to use calibration_sess to list
# all the devices, add one get_calibration_data for each device, and
# fetch each such op for every resource until its found. This can work
# even when the device of the TRTEngineOp is empty or not fully specified.
device_to_get_resource_op_map = {}
with self._calibration_graph.as_default():
resource_name_input = array_ops.placeholder(dtypes.string)
for node in self._converted_graph_def.node:
if node.op == _TRT_ENGINE_OP_NAME:
# Adds the get_calibration_data op for the device if not done
# before. We only add one such op for each device.
# TODO(laigd): What if the device is empty?????
if node.device not in device_to_get_resource_op_map:
with self._calibration_graph.device(node.device):
serialized_resources_output = (
gen_trt_ops.get_calibration_data_op(resource_name_input))
device_to_get_resource_op_map[node.device] = (
serialized_resources_output)
# Get the calibration resource.
calibration_result = calibration_sess.run(
device_to_get_resource_op_map[node.device],
feed_dict={
resource_name_input: _get_canonical_engine_name(node.name)
})
node.attr["calibration_data"].s = calibration_result
self._calibration_data_collected = True
return self._converted_graph_def
def save(self, output_saved_model_dir):
"""Save the converted graph as a SavedModel.
Args:
output_saved_model_dir: construct a SavedModel using the converted
GraphDef and save it to the specified directory. This option only works
when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None in
__init__().
Raises:
ValueError: if the input to the converter is a GraphDef instead of a
SavedModel.
"""
assert self._converted
if self._need_calibration:
assert self._calibration_data_collected
if self._input_graph_def:
raise ValueError(
"Not able to save to a SavedModel since input is a GraphDef")
def _restore_collections(dest_graph, src_meta_graph_def, collection_keys):
"""Restores collections that we need to keep."""
scope = ""
for key in collection_keys:
collection_def = src_meta_graph_def.collection_def[key]
kind = collection_def.WhichOneof("kind")
if kind is None:
tf_logging.error(
"Cannot identify data type for collection %s. Skipping.", key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto and kind == "bytes_list":
proto_type = ops.get_collection_proto_type(key)
# It is assumed that there are no Variables Keys in collections
for value in collection_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
try:
new_value = from_proto(proto, import_scope=scope)
except:
continue
dest_graph.add_to_collection(key, new_value)
else:
field = getattr(collection_def, kind)
if kind == "node_list":
for value in field.value:
name = ops.prepend_name_scope(value, scope)
# Since the graph has been optimized, the node may no longer
# exists
try:
col_op = dest_graph.as_graph_element(name)
except (TypeError, ValueError, KeyError) as e:
continue
dest_graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the
# fact that Python2 distinguishes between int and long, while
# Python3 has only int.
for value in field.value:
dest_graph.add_to_collection(key, int(value))
else:
for value in field.value:
dest_graph.add_to_collection(key,
ops.prepend_name_scope(value, scope))
# Write the transformed graphdef as SavedModel.
saved_model_builder = builder.SavedModelBuilder(output_saved_model_dir)
with ops.Graph().as_default():
importer.import_graph_def(self._converted_graph_def, name="")
_restore_collections(
ops.get_default_graph(), self._grappler_meta_graph_def,
self._collections_to_keep(
self._grappler_meta_graph_def.collection_def))
# We don't use any specific converter here.
with session.Session(config=self._session_config) as sess:
saved_model_builder.add_meta_graph_and_variables(
sess,
self._input_saved_model_tags,
signature_def_map=self._grappler_meta_graph_def.signature_def)
# Ignore other meta graphs from the input SavedModel.
saved_model_builder.save()
def _get_resource_handle(name, device):
with ops.device(device):
return gen_trt_ops.create_trt_resource_handle(resource_name=name)
class _TRTEngineResourceDeleter(tracking.CapturableResourceDeleter):
"""Resource deleter for destroying TRT engine cache resource."""
def __init__(self, resource_name, device):
super(_TRTEngineResourceDeleter, self).__init__()
self._resource_name = resource_name
self._device = device
def destroy_resource(self):
handle = _get_resource_handle(self._resource_name, self._device)
with ops.device(self._device):
gen_resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True)
class _TRTEngineResource(tracking.TrackableResource):
"""Class to track the serialized engines resource."""
def __init__(self,
resource_name,
filename,
maximum_cached_engines,
device="GPU"):
super(_TRTEngineResource, self).__init__(
device=device, deleter=_TRTEngineResourceDeleter(resource_name, device))
self._resource_name = resource_name
# Track the serialized engine file in the SavedModel.
self._filename = self._track_trackable(
tracking.Asset(filename), "_serialized_trt_resource_filename")
self._maximum_cached_engines = maximum_cached_engines
def _create_resource(self):
return _get_resource_handle(self._resource_name, self._resource_device)
def _initialize(self):
gen_trt_ops.initialize_trt_resource(
self.resource_handle,
self._filename,
max_cached_engines_count=self._maximum_cached_engines)
@tf_export("experimental.tensorrt.Converter", v1=[])
class TrtGraphConverterV2(object):
"""An offline converter for TF-TRT transformation for TF 2.0 SavedModels.
Currently this is not available on Windows platform.
Note that in V2, is_dynamic_op=False is not supported, meaning TRT engines
will be built only when the corresponding TRTEngineOp is executed. But we
still provide a way to avoid the cost of building TRT engines during inference
(see more below).
There are several ways to run the conversion:
1. FP32/FP16 precision
```python
params = DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode='FP16')
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="my_dir", conversion_params=params)
converter.convert()
converter.save(output_saved_model_dir)
```
In this case, no TRT engines will be built or saved in the converted
SavedModel. But if input data is available during conversion, we can still
build and save the TRT engines to reduce the cost during inference (see
option 2 below).
2. FP32/FP16 precision with pre-built engines
```python
params = DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode='FP16',
# Set this to a large enough number so it can cache all the engines.
maximum_cached_engines=16)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="my_dir", conversion_params=params)
converter.convert()
# Define a generator function that yields input data, and use it to execute
# the graph to build TRT engines.
# With TensorRT 5.1, different engines will be built (and saved later) for
# different input shapes to the TRTEngineOp.
def my_input_fn():
for _ in range(num_runs):
inp1, inp2 = ...
yield inp1, inp2
converter.build(input_fn=my_input_fn) # Generate corresponding TRT engines
converter.save(output_saved_model_dir) # Generated engines will be saved.
```
In this way, one engine will be built/saved for each unique input shapes of
the TRTEngineOp. This is good for applications that cannot afford building
engines during inference but have access to input data that is similar to
the one used in production (for example, that has the same input shapes).
Also, the generated TRT engines is platform dependent, so we need to run
`build()` in an environment that is similar to production (e.g. with
same type of GPU).
3. INT8 precision and calibration with pre-built engines
```python
params = DEFAULT_TRT_CONVERSION_PARAMS._replace(
precision_mode='INT8',
# Currently only one INT8 engine is supported in this mode.
maximum_cached_engines=1,
use_calibration=True)
converter = tf.experimental.tensorrt.Converter(
input_saved_model_dir="my_dir", conversion_params=params)
# Define a generator function that yields input data, and run INT8
# calibration with the data. All input data should have the same shape.
# At the end of convert(), the calibration stats (e.g. range information)
# will be saved and can be used to generate more TRT engines with different
# shapes. Also, one TRT engine will be generated (with the same shape as
# the calibration data) for save later.
def my_calibration_input_fn():
for _ in range(num_runs):
inp1, inp2 = ...
yield inp1, inp2
converter.convert(calibration_input_fn=my_calibration_input_fn)
# (Optional) Generate more TRT engines offline (same as the previous
# option), to avoid the cost of generating them during inference.
def my_input_fn():
for _ in range(num_runs):
inp1, inp2 = ...
yield inp1, inp2
converter.build(input_fn=my_input_fn)
# Save the TRT engine and the engines.
converter.save(output_saved_model_dir)
```
"""
def __init__(self,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
conversion_params=DEFAULT_TRT_CONVERSION_PARAMS):
"""Initialize the converter.
Args:
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
conversion_params: a TrtConversionParams instance.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
assert context.executing_eagerly()
_check_trt_version_compatibility()
_check_conversion_params(conversion_params)
self._conversion_params = conversion_params
self._input_saved_model_dir = input_saved_model_dir
self._input_saved_model_tags = (
input_saved_model_tags or [tag_constants.SERVING])
self._input_saved_model_signature_key = (
input_saved_model_signature_key or
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
self._need_calibration = (
conversion_params.precision_mode == TrtPrecisionMode.INT8 and
conversion_params.use_calibration)
if (self._need_calibration and not conversion_params.is_dynamic_op):
raise ValueError("INT8 precision mode with calibration is not supported "
"with static TensorRT ops. Set is_dynamic_op to True.")
self._converted = False
def _run_conversion(self, meta_graph_def):
"""Run Grappler's OptimizeGraph() tool to convert the graph.
Args:
meta_graph_def: the MetaGraphDef instance to run the optimizations on.
Returns:
The optimized GraphDef.
"""
rewriter_config = get_tensorrt_rewriter_config(
conversion_params=self._conversion_params, is_v2=True)
grappler_session_config = config_pb2.ConfigProto()
grappler_session_config.graph_options.rewrite_options.CopyFrom(
rewriter_config)
return tf_optimizer.OptimizeGraph(
grappler_session_config, meta_graph_def, graph_id=b"tf_graph")
def _for_each_trt_node(self, graph_def, fn):
"""Helper method to manipulate all TRTEngineOps in a GraphDef."""
for node in graph_def.node:
if node.op == _TRT_ENGINE_OP_NAME:
fn(node)
for func in graph_def.library.function:
for node in func.node_def:
if node.op == _TRT_ENGINE_OP_NAME:
fn(node)
# TODO(laigd): provide a utility function to optimize a ConcreteFunction and
# use it here (b/124792963).
def convert(self, calibration_input_fn=None):
"""Convert the input SavedModel in 2.0 format.
Args:
calibration_input_fn: a generator function that yields input data as a
list or tuple, which will be used to execute the converted signature for
calibration. All the returned input data should have the same shape.
Example:
```
def input_fn():
yield input1, input2, input3
```
Raises:
ValueError: if the input combination is invalid.
Returns:
The TF-TRT converted Function.
"""
assert not self._converted
if (self._need_calibration and not calibration_input_fn):
raise ValueError("Should specify calibration_input_fn because INT8 "
"calibration is needed")
if (not self._need_calibration and calibration_input_fn):
raise ValueError("Should not specify calibration_input_fn because INT8 "
"calibration is not needed")
self._saved_model = load.load(self._input_saved_model_dir,
self._input_saved_model_tags)
func = self._saved_model.signatures[self._input_saved_model_signature_key]
frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
grappler_meta_graph_def = saver.export_meta_graph(
graph_def=frozen_func.graph.as_graph_def(), graph=frozen_func.graph)
# Add a collection 'train_op' so that Grappler knows the outputs.
fetch_collection = meta_graph_pb2.CollectionDef()
for array in frozen_func.inputs + frozen_func.outputs:
fetch_collection.node_list.value.append(array.name)
grappler_meta_graph_def.collection_def["train_op"].CopyFrom(
fetch_collection)
# Run TRT optimizer in Grappler to convert the graph.
self._converted_graph_def = self._run_conversion(grappler_meta_graph_def)
self._converted_func = wrap_function.function_from_graph_def(
self._converted_graph_def,
[tensor.name for tensor in frozen_func.inputs],
[tensor.name for tensor in frozen_func.outputs])
# Reconstruct the output signatures using the ones from original model.
self._converted_func.graph.structured_outputs = nest.pack_sequence_as(
func.graph.structured_outputs,
self._converted_func.graph.structured_outputs)
if self._need_calibration:
for inp in calibration_input_fn():
self._converted_func(*map(ops.convert_to_tensor, inp))
def _save_calibration_table(node):
calibration_table = gen_trt_ops.get_calibration_data_op(
_get_canonical_engine_name(node.name))
node.attr["calibration_data"].s = calibration_table.numpy()
self._for_each_trt_node(self._converted_graph_def,
_save_calibration_table)
# Rebuild the function since calibration has changed the graph.
calibrated_func = wrap_function.function_from_graph_def(
self._converted_graph_def,
[tensor.name for tensor in self._converted_func.inputs],
[tensor.name for tensor in self._converted_func.outputs])
calibrated_func.graph.structured_outputs = nest.pack_sequence_as(
self._converted_func.graph.structured_outputs,
calibrated_func.graph.structured_outputs)
self._converted_func = calibrated_func
self._converted = True
def build(self, input_fn):
"""Run inference with converted graph in order to build TensorRT engines.
Args:
input_fn: a generator function that yields input data as a list or tuple,
which will be used to execute the converted signature to generate TRT
engines.
Example:
```
def input_fn():
yield input1, input2, input3
```
"""
for inp in input_fn():
self._converted_func(*map(ops.convert_to_tensor, inp))
def save(self, output_saved_model_dir):
"""Save the converted SavedModel.
Args:
output_saved_model_dir: directory to saved the converted SavedModel.
"""
assert self._converted
# Serialize the TRT engines in the cache if any, and create trackable
# resource to track them.
engine_asset_dir = tempfile.mkdtemp()
resource_map = {}
def _serialize_and_track_engine(node):
"""Serialize TRT engines in the cache and track them."""
# Don't dump the same cache twice.
canonical_engine_name = _get_canonical_engine_name(node.name)
if canonical_engine_name in resource_map:
return
filename = os.path.join(engine_asset_dir,
"trt-serialized-engine." + canonical_engine_name)
try:
gen_trt_ops.serialize_trt_resource(
resource_name=canonical_engine_name,
filename=filename,
delete_resource=True)
except errors.NotFoundError:
tf_logging.info("Could not find %s in TF-TRT cache. "
"This can happen if build() is not called, "
"which means TensorRT engines will be built "
"and cached at runtime." % canonical_engine_name)
return
# TODO(laigd): add an option for the user to choose the device.
resource_map[canonical_engine_name] = _TRTEngineResource(
canonical_engine_name, filename,
self._conversion_params.maximum_cached_engines)
self._for_each_trt_node(self._converted_graph_def,
_serialize_and_track_engine)
self._saved_model.trt_engine_resources = resource_map
# Rewrite the signature map using the optimized ConcreteFunction.
signatures = {
key: value for key, value in self._saved_model.signatures.items()
}
signatures[self._input_saved_model_signature_key] = self._converted_func
save.save(self._saved_model, output_saved_model_dir, signatures)
# TODO(laigd): use TrtConversionParams here.
def create_inference_graph(
input_graph_def,
outputs,
max_batch_size=1,
max_workspace_size_bytes=DEFAULT_TRT_MAX_WORKSPACE_SIZE_BYTES,
precision_mode=TrtPrecisionMode.FP32,
minimum_segment_size=3,
is_dynamic_op=False,
maximum_cached_engines=1,
input_saved_model_dir=None,
input_saved_model_tags=None,
input_saved_model_signature_key=None,
output_saved_model_dir=None,
session_config=None):
"""Python wrapper for the TRT transformation.
Args:
input_graph_def: a GraphDef object containing a model to be transformed. If
set to None, the graph will be read from the SavedModel loaded from
input_saved_model_dir.
outputs: list of tensors or node names for the model outputs. Only used when
input_graph_def is not None.
max_batch_size: max size for the input batch.
max_workspace_size_bytes: the maximum GPU temporary memory which the TRT
engine can use at execution time. This corresponds to the 'workspaceSize'
parameter of nvinfer1::IBuilder::setMaxWorkspaceSize().
precision_mode: one of TrtPrecisionMode.supported_precision_modes().
minimum_segment_size: the minimum number of nodes required for a subgraph to
be replaced by TRTEngineOp.
is_dynamic_op: whether to generate dynamic TRT ops which will build the TRT
network and engine at run time.
maximum_cached_engines: max number of cached TRT engines in dynamic TRT ops.
If the number of cached engines is already at max but none of them can
serve the input, the TRTEngineOp will fall back to run the TF function
based on which the TRTEngineOp is created.
input_saved_model_dir: the directory to load the SavedModel which contains
the input graph to transforms. Used only when input_graph_def is None.
input_saved_model_tags: list of tags to load the SavedModel.
input_saved_model_signature_key: the key of the signature to optimize the
graph for.
output_saved_model_dir: if not None, construct a SavedModel using the
returned GraphDef and save it to the specified directory. This option only
works when the input graph is loaded from a SavedModel, i.e. when
input_saved_model_dir is specified and input_graph_def is None.
session_config: the ConfigProto used to create a Session. It's also used as
a template to create a TRT-enabled ConfigProto for conversion. If not
specified, a default ConfigProto will be used.
Returns:
A GraphDef transformed from input_graph_def (or the SavedModel graph def
loaded from input_saved_model_dir, if input_graph_def is not present), where
all TRT compatible subgraphs are replaced with TRTEngineOps, and a TF
function is added for each of the subgraphs.
If is_dynamic_op is True, each TRTEngineOp will contain a serialized
subgraph GraphDef, which will be converted to a TRT engine at execution time
and the TRT engine will be cached for future usage. A new TRT engine will be
created each time when none of the cached engines match the input shapes. If
it fails to execute the TRT engine or the number of cached engines reaches
maximum_cached_engines, the op will fall back to call the corresponding TF
function.
If is_dynamic_op is False, each TRTEngineOp will contain a serialized TRT
engine created from the corresponding subgraph. No more engines will be
created on the fly, and the op will fall back to call the corresponding TF
function when it fails to execute the engine.
Raises:
ValueError: if the combination of the parameters is invalid.
"""
trt_converter = TrtGraphConverter(
input_saved_model_dir=input_saved_model_dir,
input_saved_model_tags=input_saved_model_tags,
input_saved_model_signature_key=input_saved_model_signature_key,
input_graph_def=input_graph_def,
nodes_blacklist=outputs,
session_config=session_config,
max_batch_size=max_batch_size,
max_workspace_size_bytes=max_workspace_size_bytes,
precision_mode=precision_mode,
minimum_segment_size=minimum_segment_size,
is_dynamic_op=is_dynamic_op,
maximum_cached_engines=maximum_cached_engines,
use_calibration=False)
converted_graph_def = trt_converter.convert()
if output_saved_model_dir:
trt_converter.save(output_saved_model_dir)
return converted_graph_def
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import addresses
from . import neighbors
from . import unnumbered
from . import config
from . import state
class ipv4(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/routed-vlan/ipv4. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Parameters for the IPv4 address family.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__addresses",
"__neighbors",
"__unnumbered",
"__config",
"__state",
)
_yang_name = "ipv4"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__addresses = YANGDynClass(
base=addresses.addresses,
is_container="container",
yang_name="addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
self.__neighbors = YANGDynClass(
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
self.__unnumbered = YANGDynClass(
base=unnumbered.unnumbered,
is_container="container",
yang_name="unnumbered",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return ["interfaces", "interface", "routed-vlan", "ipv4"]
def _get_addresses(self):
"""
Getter method for addresses, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/addresses (container)
YANG Description: Enclosing container for address list
"""
return self.__addresses
def _set_addresses(self, v, load=False):
"""
Setter method for addresses, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/addresses (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_addresses is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_addresses() directly.
YANG Description: Enclosing container for address list
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=addresses.addresses,
is_container="container",
yang_name="addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """addresses must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=addresses.addresses, is_container='container', yang_name="addresses", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__addresses = t
if hasattr(self, "_set"):
self._set()
def _unset_addresses(self):
self.__addresses = YANGDynClass(
base=addresses.addresses,
is_container="container",
yang_name="addresses",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
def _get_neighbors(self):
"""
Getter method for neighbors, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/neighbors (container)
YANG Description: Enclosing container for neighbor list
"""
return self.__neighbors
def _set_neighbors(self, v, load=False):
"""
Setter method for neighbors, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/neighbors (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbors is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbors() directly.
YANG Description: Enclosing container for neighbor list
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbors must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=neighbors.neighbors, is_container='container', yang_name="neighbors", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__neighbors = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbors(self):
self.__neighbors = YANGDynClass(
base=neighbors.neighbors,
is_container="container",
yang_name="neighbors",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
def _get_unnumbered(self):
"""
Getter method for unnumbered, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/unnumbered (container)
YANG Description: Top-level container for setting unnumbered interfaces.
Includes reference the interface that provides the
address information
"""
return self.__unnumbered
def _set_unnumbered(self, v, load=False):
"""
Setter method for unnumbered, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/unnumbered (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_unnumbered is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_unnumbered() directly.
YANG Description: Top-level container for setting unnumbered interfaces.
Includes reference the interface that provides the
address information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=unnumbered.unnumbered,
is_container="container",
yang_name="unnumbered",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """unnumbered must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=unnumbered.unnumbered, is_container='container', yang_name="unnumbered", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__unnumbered = t
if hasattr(self, "_set"):
self._set()
def _unset_unnumbered(self):
self.__unnumbered = YANGDynClass(
base=unnumbered.unnumbered,
is_container="container",
yang_name="unnumbered",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/config (container)
YANG Description: Top-level IPv4 configuration data for the interface
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Top-level IPv4 configuration data for the interface
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state (container)
YANG Description: Top level IPv4 operational state data
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /interfaces/interface/routed_vlan/ipv4/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Top level IPv4 operational state data
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="container",
is_config=True,
)
addresses = __builtin__.property(_get_addresses, _set_addresses)
neighbors = __builtin__.property(_get_neighbors, _set_neighbors)
unnumbered = __builtin__.property(_get_unnumbered, _set_unnumbered)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[
("addresses", addresses),
("neighbors", neighbors),
("unnumbered", unnumbered),
("config", config),
("state", state),
]
)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from cloudfiles.errors import ContainerNotEmpty
from django import http
from django.core.urlresolvers import reverse
from mox import IsA
from horizon import api
from horizon import test
from .tables import ContainersTable, ObjectsTable
CONTAINER_INDEX_URL = reverse('horizon:nova:containers:index')
class ContainerViewTests(test.BaseViewTests):
def setUp(self):
super(ContainerViewTests, self).setUp()
self.container = api.Container(None)
self.container.name = 'containerName'
self.container.size_used = 128
self.containers = (self.container,)
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest), marker=None).AndReturn(
([self.container], False))
self.mox.ReplayAll()
res = self.client.get(CONTAINER_INDEX_URL)
self.assertTemplateUsed(res, 'nova/containers/index.html')
self.assertIn('table', res.context)
containers = res.context['table'].data
self.assertEqual(len(containers), 1)
self.assertEqual(containers[0].name, 'containerName')
def test_delete_container(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
api.swift_delete_container(IsA(http.HttpRequest),
'containerName')
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_delete_container_nonempty(self):
self.mox.StubOutWithMock(api, 'swift_delete_container')
exception = ContainerNotEmpty('containerNotEmpty')
api.swift_delete_container(
IsA(http.HttpRequest),
'containerName').AndRaise(exception)
self.mox.ReplayAll()
action_string = "containers__delete__%s" % self.container.name
form_data = {"action": action_string}
req = self.factory.post(CONTAINER_INDEX_URL, form_data)
table = ContainersTable(req, self.containers)
handled = table.maybe_handle()
self.assertEqual(handled['location'], CONTAINER_INDEX_URL)
def test_create_container_get(self):
res = self.client.get(reverse('horizon:nova:containers:create'))
self.assertTemplateUsed(res, 'nova/containers/create.html')
def test_create_container_post(self):
formData = {'name': 'containerName',
'method': 'CreateContainer'}
self.mox.StubOutWithMock(api, 'swift_create_container')
api.swift_create_container(
IsA(http.HttpRequest), u'containerName')
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:nova:containers:create'),
formData)
self.assertRedirectsNoFollow(res, CONTAINER_INDEX_URL)
class ObjectViewTests(test.BaseViewTests):
CONTAINER_NAME = 'containerName'
def setUp(self):
class FakeCloudFile(object):
def __init__(self):
self.metadata = {}
def sync_metadata(self):
pass
super(ObjectViewTests, self).setUp()
swift_object = api.swift.SwiftObject(FakeCloudFile())
swift_object.name = u"test_object"
swift_object.size = '128'
swift_object.container = api.swift.Container(None)
swift_object.container.name = self.CONTAINER_NAME
self.swift_objects = [swift_object]
def test_index(self):
self.mox.StubOutWithMock(api, 'swift_get_objects')
api.swift_get_objects(
IsA(http.HttpRequest),
self.CONTAINER_NAME,
marker=None).AndReturn((self.swift_objects, False))
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/index.html')
self.assertItemsEqual(res.context['table'].data, self.swift_objects)
def test_upload_index(self):
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertTemplateUsed(res, 'nova/objects/upload.html')
def test_upload(self):
OBJECT_DATA = 'objectData'
OBJECT_FILE = tempfile.TemporaryFile()
OBJECT_FILE.write(OBJECT_DATA)
OBJECT_FILE.flush()
OBJECT_FILE.seek(0)
OBJECT_NAME = 'objectName'
formData = {'method': 'UploadObject',
'container_name': self.CONTAINER_NAME,
'name': OBJECT_NAME,
'object_file': OBJECT_FILE}
self.mox.StubOutWithMock(api, 'swift_upload_object')
api.swift_upload_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME),
OBJECT_DATA).AndReturn(self.swift_objects[0])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]))
self.assertContains(res, 'enctype="multipart/form-data"')
res = self.client.post(reverse('horizon:nova:containers:object_upload',
args=[self.CONTAINER_NAME]),
formData)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME]))
def test_delete(self):
self.mox.StubOutWithMock(api, 'swift_delete_object')
api.swift_delete_object(
IsA(http.HttpRequest),
self.CONTAINER_NAME, self.swift_objects[0].name)
self.mox.ReplayAll()
OBJECT_INDEX_URL = reverse('horizon:nova:containers:object_index',
args=[self.CONTAINER_NAME])
action_string = "objects__delete__%s" % self.swift_objects[0].name
form_data = {"action": action_string}
req = self.factory.post(OBJECT_INDEX_URL, form_data)
kwargs = {"container_name": self.CONTAINER_NAME}
table = ObjectsTable(req, self.swift_objects, **kwargs)
handled = table.maybe_handle()
self.assertEqual(handled['location'], OBJECT_INDEX_URL)
def test_download(self):
OBJECT_DATA = 'objectData'
OBJECT_NAME = 'objectName'
self.mox.StubOutWithMock(api, 'swift_get_object_data')
self.mox.StubOutWithMock(api.swift, 'swift_get_object')
api.swift.swift_get_object(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)) \
.AndReturn(self.swift_objects[0])
api.swift_get_object_data(IsA(http.HttpRequest),
unicode(self.CONTAINER_NAME),
unicode(OBJECT_NAME)).AndReturn(OBJECT_DATA)
self.mox.ReplayAll()
res = self.client.get(reverse(
'horizon:nova:containers:object_download',
args=[self.CONTAINER_NAME, OBJECT_NAME]))
self.assertEqual(res.content, OBJECT_DATA)
self.assertTrue(res.has_header('Content-Disposition'))
def test_copy_index(self):
OBJECT_NAME = 'objectName'
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn(([container], False))
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:nova:containers:object_copy',
args=[self.CONTAINER_NAME,
OBJECT_NAME]))
self.assertTemplateUsed(res, 'nova/objects/copy.html')
def test_copy(self):
NEW_CONTAINER_NAME = self.CONTAINER_NAME
NEW_OBJECT_NAME = 'newObjectName'
ORIG_CONTAINER_NAME = 'origContainerName'
ORIG_OBJECT_NAME = 'origObjectName'
formData = {'method': 'CopyObject',
'new_container_name': NEW_CONTAINER_NAME,
'new_object_name': NEW_OBJECT_NAME,
'orig_container_name': ORIG_CONTAINER_NAME,
'orig_object_name': ORIG_OBJECT_NAME}
container = self.mox.CreateMock(api.Container)
container.name = self.CONTAINER_NAME
self.mox.StubOutWithMock(api, 'swift_get_containers')
api.swift_get_containers(
IsA(http.HttpRequest)).AndReturn(([container], False))
self.mox.StubOutWithMock(api, 'swift_copy_object')
api.swift_copy_object(IsA(http.HttpRequest),
ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME,
NEW_CONTAINER_NAME,
NEW_OBJECT_NAME)
self.mox.ReplayAll()
res = self.client.post(reverse('horizon:nova:containers:object_copy',
args=[ORIG_CONTAINER_NAME,
ORIG_OBJECT_NAME]),
formData)
self.assertRedirectsNoFollow(res,
reverse('horizon:nova:containers:object_index',
args=[NEW_CONTAINER_NAME]))
|
|
class SpecialFunctions(object):
"""
This class implements special functions using high-level code.
Elementary and some other functions (e.g. gamma function, basecase
hypergeometric series) are assumed to be predefined by the context as
"builtins" or "low-level" functions.
"""
defined_functions = {}
# The series for the Jacobi theta functions converge for |q| < 1;
# in the current implementation they throw a ValueError for
# abs(q) > THETA_Q_LIM
THETA_Q_LIM = 1 - 10**-7
def __init__(self):
cls = self.__class__
for name in cls.defined_functions:
f, wrap = cls.defined_functions[name]
cls._wrap_specfun(name, f, wrap)
self.mpq_1 = self._mpq((1,1))
self.mpq_0 = self._mpq((0,1))
self.mpq_1_2 = self._mpq((1,2))
self.mpq_3_2 = self._mpq((3,2))
self.mpq_1_4 = self._mpq((1,4))
self.mpq_1_16 = self._mpq((1,16))
self.mpq_3_16 = self._mpq((3,16))
self.mpq_5_2 = self._mpq((5,2))
self.mpq_3_4 = self._mpq((3,4))
self.mpq_7_4 = self._mpq((7,4))
self.mpq_5_4 = self._mpq((5,4))
self._aliases.update({
'phase' : 'arg',
'conjugate' : 'conj',
'nthroot' : 'root',
'polygamma' : 'psi',
'hurwitz' : 'zeta',
#'digamma' : 'psi0',
#'trigamma' : 'psi1',
#'tetragamma' : 'psi2',
#'pentagamma' : 'psi3',
'fibonacci' : 'fib',
'factorial' : 'fac',
})
# Default -- do nothing
@classmethod
def _wrap_specfun(cls, name, f, wrap):
setattr(cls, name, f)
# Optional fast versions of common functions in common cases.
# If not overridden, default (generic hypergeometric series)
# implementations will be used
def _besselj(ctx, n, z): raise NotImplementedError
def _erf(ctx, z): raise NotImplementedError
def _erfc(ctx, z): raise NotImplementedError
def _gamma_upper_int(ctx, z, a): raise NotImplementedError
def _expint_int(ctx, n, z): raise NotImplementedError
def _zeta(ctx, s): raise NotImplementedError
def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError
def _ei(ctx, z): raise NotImplementedError
def _e1(ctx, z): raise NotImplementedError
def _ci(ctx, z): raise NotImplementedError
def _si(ctx, z): raise NotImplementedError
def _altzeta(ctx, s): raise NotImplementedError
def defun_wrapped(f):
SpecialFunctions.defined_functions[f.__name__] = f, True
def defun(f):
SpecialFunctions.defined_functions[f.__name__] = f, False
def defun_static(f):
setattr(SpecialFunctions, f.__name__, f)
@defun_wrapped
def cot(ctx, z): return ctx.one / ctx.tan(z)
@defun_wrapped
def sec(ctx, z): return ctx.one / ctx.cos(z)
@defun_wrapped
def csc(ctx, z): return ctx.one / ctx.sin(z)
@defun_wrapped
def coth(ctx, z): return ctx.one / ctx.tanh(z)
@defun_wrapped
def sech(ctx, z): return ctx.one / ctx.cosh(z)
@defun_wrapped
def csch(ctx, z): return ctx.one / ctx.sinh(z)
@defun_wrapped
def acot(ctx, z): return ctx.atan(ctx.one / z)
@defun_wrapped
def asec(ctx, z): return ctx.acos(ctx.one / z)
@defun_wrapped
def acsc(ctx, z): return ctx.asin(ctx.one / z)
@defun_wrapped
def acoth(ctx, z): return ctx.atanh(ctx.one / z)
@defun_wrapped
def asech(ctx, z): return ctx.acosh(ctx.one / z)
@defun_wrapped
def acsch(ctx, z): return ctx.asinh(ctx.one / z)
@defun
def sign(ctx, x):
x = ctx.convert(x)
if not x or ctx.isnan(x):
return x
if ctx._is_real_type(x):
return ctx.mpf(cmp(x, 0))
return x / abs(x)
@defun
def agm(ctx, a, b=1):
if b == 1:
return ctx.agm1(a)
a = ctx.convert(a)
b = ctx.convert(b)
return ctx._agm(a, b)
@defun_wrapped
def sinc(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sin(x)/x
@defun_wrapped
def sincpi(ctx, x):
if ctx.isinf(x):
return 1/x
if not x:
return x+1
return ctx.sinpi(x)/(ctx.pi*x)
# TODO: tests; improve implementation
@defun_wrapped
def expm1(ctx, x):
if not x:
return ctx.zero
# exp(x) - 1 ~ x
if ctx.mag(x) < -ctx.prec:
return x + 0.5*x**2
# TODO: accurately eval the smaller of the real/imag parts
return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1)
@defun_wrapped
def powm1(ctx, x, y):
mag = ctx.mag
one = ctx.one
w = x**y - one
M = mag(w)
# Only moderate cancellation
if M > -8:
return w
# Check for the only possible exact cases
if not w:
if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)):
return w
x1 = x - one
magy = mag(y)
lnx = ctx.ln(x)
# Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2)
if magy + mag(lnx) < -ctx.prec:
return lnx*y + (lnx*y)**2/2
# TODO: accurately eval the smaller of the real/imag part
return ctx.sum_accurately(lambda: iter([x**y, -1]), 1)
@defun
def _rootof1(ctx, k, n):
k = int(k)
n = int(n)
k %= n
if not k:
return ctx.one
elif 2*k == n:
return -ctx.one
elif 4*k == n:
return ctx.j
elif 4*k == 3*n:
return -ctx.j
return ctx.expjpi(2*ctx.mpf(k)/n)
@defun
def root(ctx, x, n, k=0):
n = int(n)
x = ctx.convert(x)
if k:
# Special case: there is an exact real root
if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0):
return -ctx.root(-x, n)
# Multiply by root of unity
prec = ctx.prec
try:
ctx.prec += 10
v = ctx.root(x, n, 0) * ctx._rootof1(k, n)
finally:
ctx.prec = prec
return +v
return ctx._nthroot(x, n)
@defun
def unitroots(ctx, n, primitive=False):
gcd = ctx._gcd
prec = ctx.prec
try:
ctx.prec += 10
if primitive:
v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1]
else:
# TODO: this can be done *much* faster
v = [ctx._rootof1(k,n) for k in range(n)]
finally:
ctx.prec = prec
return [+x for x in v]
@defun
def arg(ctx, x):
x = ctx.convert(x)
re = ctx._re(x)
im = ctx._im(x)
return ctx.atan2(im, re)
@defun
def fabs(ctx, x):
return abs(ctx.convert(x))
@defun
def re(ctx, x):
x = ctx.convert(x)
if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers
return x.real
return x
@defun
def im(ctx, x):
x = ctx.convert(x)
if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers
return x.imag
return ctx.zero
@defun
def conj(ctx, x):
return ctx.convert(x).conjugate()
@defun
def polar(ctx, z):
return (ctx.fabs(z), ctx.arg(z))
@defun_wrapped
def rect(ctx, r, phi):
return r * ctx.mpc(*ctx.cos_sin(phi))
@defun
def log(ctx, x, b=None):
if b is None:
return ctx.ln(x)
wp = ctx.prec + 20
return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp)
@defun
def log10(ctx, x):
return ctx.log(x, 10)
@defun
def modf(ctx, x, y):
return ctx.convert(x) % ctx.convert(y)
@defun
def degrees(ctx, x):
return x / ctx.degree
@defun
def radians(ctx, x):
return x * ctx.degree
@defun_wrapped
def lambertw(ctx, z, k=0):
k = int(k)
if ctx.isnan(z):
return z
ctx.prec += 20
mag = ctx.mag(z)
# Start from fp approximation
if ctx is ctx._mp and abs(mag) < 900 and abs(k) < 10000 and \
abs(z+0.36787944117144) > 0.01:
w = ctx._fp.lambertw(z, k)
else:
absz = abs(z)
# We must be extremely careful near the singularities at -1/e and 0
u = ctx.exp(-1)
if absz <= u:
if not z:
# w(0,0) = 0; for all other branches we hit the pole
if not k:
return z
return ctx.ninf
if not k:
w = z
# For small real z < 0, the -1 branch aves roughly like log(-z)
elif k == -1 and not ctx.im(z) and ctx.re(z) < 0:
w = ctx.ln(-z)
# Use a simple asymptotic approximation.
else:
w = ctx.ln(z)
# The branches are roughly logarithmic. This approximation
# gets better for large |k|; need to check that this always
# works for k ~= -1, 0, 1.
if k: w += k * 2*ctx.pi*ctx.j
elif k == 0 and ctx.im(z) and absz <= 0.7:
# Both the W(z) ~= z and W(z) ~= ln(z) approximations break
# down around z ~= -0.5 (converging to the wrong branch), so patch
# with a constant approximation (adjusted for sign)
if abs(z+0.5) < 0.1:
if ctx.im(z) > 0:
w = ctx.mpc(0.7+0.7j)
else:
w = ctx.mpc(0.7-0.7j)
else:
w = z
else:
if z == ctx.inf:
if k == 0:
return z
else:
return z + 2*k*ctx.pi*ctx.j
if z == ctx.ninf:
return (-z) + (2*k+1)*ctx.pi*ctx.j
# Simple asymptotic approximation as above
w = ctx.ln(z)
if k:
w += k * 2*ctx.pi*ctx.j
# Use Halley iteration to solve w*exp(w) = z
two = ctx.mpf(2)
weps = ctx.ldexp(ctx.eps, 15)
for i in xrange(100):
ew = ctx.exp(w)
wew = w*ew
wewz = wew-z
wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two))
if abs(wn-w) < weps*abs(wn):
return wn
else:
w = wn
ctx.warn("Lambert W iteration failed to converge for %s" % z)
return wn
@defun_wrapped
def bell(ctx, n, x=1):
x = ctx.convert(x)
if not n:
if ctx.isnan(x):
return x
return type(x)(1)
if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n):
return x**n
if n == 1: return x
if n == 2: return x*(x+1)
if x == 0: return ctx.sincpi(n)
return _polyexp(ctx, n, x, True) / ctx.exp(x)
def _polyexp(ctx, n, x, extra=False):
def _terms():
if extra:
yield ctx.sincpi(n)
t = x
k = 1
while 1:
yield k**n * t
k += 1
t = t*x/k
return ctx.sum_accurately(_terms, check_step=4)
@defun_wrapped
def polyexp(ctx, s, z):
if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s):
return z**s
if z == 0: return z*s
if s == 0: return ctx.expm1(z)
if s == 1: return ctx.exp(z)*z
if s == 2: return ctx.exp(z)*z*(z+1)
return _polyexp(ctx, s, z)
@defun_wrapped
def cyclotomic(ctx, n, z):
n = int(n)
assert n >= 0
p = ctx.one
if n == 0:
return p
if n == 1:
return z - p
if n == 2:
return z + p
# Use divisor product representation. Unfortunately, this sometimes
# includes singularities for roots of unity, which we have to cancel out.
# Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1).
a_prod = 1
b_prod = 1
num_zeros = 0
num_poles = 0
for d in range(1,n+1):
if not n % d:
w = ctx.moebius(n//d)
# Use powm1 because it is important that we get 0 only
# if it really is exactly 0
b = -ctx.powm1(z, d)
if b:
p *= b**w
else:
if w == 1:
a_prod *= d
num_zeros += 1
elif w == -1:
b_prod *= d
num_poles += 1
#print n, num_zeros, num_poles
if num_zeros:
if num_zeros > num_poles:
p *= 0
else:
p *= a_prod
p /= b_prod
return p
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.