code
stringlengths 1
199k
|
|---|
"""The WebDriver implementation."""
from ..common.exceptions import ErrorInResponseException
from ..common.exceptions import InvalidSwitchToTargetException
from ..common.exceptions import NoSuchElementException
import utils
from webelement import WebElement
from remote_connection import RemoteConnection
class WebDriver(object):
def __init__(self, remote_server_addr, browser_name, platform):
self._conn = RemoteConnection(
remote_server_addr, browser_name, platform)
def get(self, url):
"""Loads a web page in the current browser."""
self._post("url", url)
def get_title(self):
"""Gets the title of the current page."""
resp = self._get("title")
return resp
def find_element_by_id(self, id_):
"""Finds element by id."""
return self._find_element_by("id", id_)
def find_elements_by_xpath(self, xpath):
"""Finds multiple elements by xpath."""
resp = self._post("elements", "xpath", xpath)
elems = []
for token in resp:
elems.append(self._get_elem(token))
return elems
def find_element_by_xpath(self, xpath):
"""Finds an element by xpath."""
return self._find_element_by("xpath", xpath)
def find_element_by_link_text(self, link_text):
"""Finds an element by its link text.
Returns None if the element is not a link.
"""
return self._find_element_by("link text", link_text)
def find_element_by_name(self, name):
"""Finds and element by its name."""
return self._find_element_by("name", name)
def execute_script(self, script, *args):
converted_args = []
for arg in args:
if type(arg) == WebElement:
converted_args.append({"type": "ELEMENT", "value": arg.id})
else:
converted_args.append({"type": "STRING", "value": arg})
resp = self._post("execute", script, converted_args)
if "NULL" == resp["type"]:
pass
elif "ELEMENT" == resp["type"]:
return self._get_elem(resp["value"])
else:
return resp["value"]
def get_current_url(self):
"""Gets the current url."""
return self._get("url")
def get_page_source(self):
"""Gets the page source."""
return self._get("source")
def close(self):
"""Closes the current window.
Quit the browser if it's the last window open.
"""
self._delete("window")
def quit(self):
"""Quits the driver and close every associated window."""
self._conn.quit()
def switch_to_window(self, window_name):
"""Switches focus to a window."""
resp = self._post("window/%s" % window_name)
if resp and "No window found" in resp:
raise InvalidSwitchToTargetException(
"Window %s not found" % window_name)
def switch_to_frame(self, index_or_name):
"""Switches focus to a frame by index or name."""
self._post("frame/%s" % str(index_or_name))
def back(self):
"""Goes back in browser history."""
self._post("back")
def forward(self):
"""Goes forward in browser history."""
self._post("forward")
# Options
def get_cookie(self):
"""Gets all the cookies. Return a set of dicts."""
return self._get("cookie")
def delete_cookie(self, name):
"""Delete a cookie with the given name."""
self._delete("cookie/%s" % name)
def delete_all_cookies(self):
"""Delete all the cookies."""
self._delete("cookie")
def add_cookie(self, cookie_dict):
self._post("addCookie", cookie_dict)
@property
def conn(self):
return self._conn
def _get_elem(self, resp_value):
"""Creates a WebElement from a response token."""
return WebElement(self, resp_value.split("/")[1])
def _find_element_by(self, by, value):
try:
resp = self._post("element", by, value)
if not resp:
raise NoSuchElementException(resp)
return self._get_elem(resp[0])
except ErrorInResponseException, ex:
utils.handle_find_element_exception(ex)
def _get(self, path, *params):
"""Sends a command to the server using http GET method."""
return utils.return_value_if_exists(
self._conn.get(path, *params))
def _post(self, path, *params):
"""Sends a command to the server using http POST method."""
return utils.return_value_if_exists(
self._conn.post(path, *params))
def _delete(self, path):
"""Sends a command to the server using http DELETE method."""
return utils.return_value_if_exists(
self._conn.delete(path))
|
"""Helper classes to implement caching."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import datetime
import logging
import sys
import threading
import unittest
import appengine_config
from models.counters import PerfCounter
def iter_all(query, batch_size=100):
"""Yields query results iterator. Proven method for large datasets."""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = query.with_cursor(prev_cursor)
for entity in query.run(batch_size=batch_size):
any_records = True
yield entity
prev_cursor = query.cursor()
class AbstractScopedSingleton(object):
"""A singleton object bound to and managed by a container.
This singleton stores its instance inside the container. When container is
wiped, the singleton instance is garbage collected and destroyed. You can
use a dict as a container and then wipe it yourself. You can use
threading.local as a container and it will be wiped automatically when
thread exits.
"""
CONTAINER = None
@classmethod
def _instances(cls):
assert cls.CONTAINER is not None
if 'instances' not in cls.CONTAINER:
cls.CONTAINER['instances'] = {}
return cls.CONTAINER['instances']
@classmethod
def instance(cls, *args, **kwargs):
"""Creates new or returns existing instance of the object."""
# pylint: disable=protected-access
_instance = cls._instances().get(cls)
if not _instance:
try:
_instance = cls(*args, **kwargs)
except:
logging.exception(
'Failed to instantiate %s: %s, %s', cls, args, kwargs)
raise
appengine_config.log_appstats_event('%s.create' % cls.__name__, {})
_instance._init_args = (args, kwargs)
cls._instances()[cls] = _instance
else:
_before = _instance._init_args
_now = (args, kwargs)
if _now != _before:
raise AssertionError(
'Singleton initiated with %s already exists. '
'Failed to re-initialized it with %s.' % (_before, _now))
return _instance
@classmethod
def clear_all(cls):
"""Clear all active instances."""
if cls._instances():
for _instance in list(cls._instances().values()):
_instance.clear()
del cls.CONTAINER['instances']
@classmethod
def clear_instance(cls):
"""Destroys the instance of this cls."""
appengine_config.log_appstats_event(
'%s.destroy' % cls.__name__, {})
_instance = cls._instances().get(cls)
if _instance:
del cls._instances()[cls]
def clear(self):
"""Destroys this object and its content."""
appengine_config.log_appstats_event(
'%s.destroy' % self.__class__.__name__, {})
_instance = self._instances().get(self.__class__)
if _instance:
del self._instances()[self.__class__]
_process_scoped_singleton = {}
_request_scoped_singleton = threading.local()
class ProcessScopedSingleton(AbstractScopedSingleton):
"""A singleton object bound to the process."""
CONTAINER = _process_scoped_singleton
class RequestScopedSingleton(AbstractScopedSingleton):
"""A singleton object bound to the request scope."""
CONTAINER = _request_scoped_singleton.__dict__
class LRUCache(object):
"""A dict that supports capped size and LRU eviction of items."""
def __init__(
self, max_item_count=None,
max_size_bytes=None, max_item_size_bytes=None):
assert max_item_count or max_size_bytes
if max_item_count:
assert max_item_count > 0
if max_size_bytes:
assert max_size_bytes > 0
self.total_size = 0
self.max_item_count = max_item_count
self.max_size_bytes = max_size_bytes
self.max_item_size_bytes = max_item_size_bytes
self.items = collections.OrderedDict([])
def get_entry_size(self, key, value):
"""Computes item size. Override and compute properly for your items."""
return sys.getsizeof(key) + sys.getsizeof(value)
def _compute_current_size(self):
total = 0
for key, item in self.items.iteritems():
total += sys.getsizeof(key) + self.get_item_size(item)
return total
def _allocate_space(self, key, value):
"""Remove items in FIFO order until size constraints are met."""
entry_size = self.get_entry_size(key, value)
if self.max_item_size_bytes and entry_size > self.max_item_size_bytes:
return False
while True:
over_count = False
over_size = False
if self.max_item_count:
over_count = len(self.items) >= self.max_item_count
if self.max_size_bytes:
over_size = self.total_size + entry_size >= self.max_size_bytes
if not (over_count or over_size):
if self.max_size_bytes:
self.total_size += entry_size
assert self.total_size < self.max_size_bytes
return True
if self.items:
_key, _value = self.items.popitem(last=False)
if self.max_size_bytes:
self.total_size -= self.get_entry_size(_key, _value)
assert self.total_size >= 0
else:
break
return False
def _record_access(self, key):
"""Pop and re-add the item."""
item = self.items.pop(key)
self.items[key] = item
def contains(self, key):
"""Checks if item is contained without accessing it."""
assert key
return key in self.items
def put(self, key, value):
assert key
if self._allocate_space(key, value):
self.items[key] = value
return True
return False
def get(self, key):
"""Accessing item makes it less likely to be evicted."""
assert key
if key in self.items:
self._record_access(key)
return True, self.items[key]
return False, None
def delete(self, key):
assert key
if key in self.items:
del self.items[key]
return True
return False
class NoopCacheConnection(object):
"""Connection to no-op cache that provides no caching."""
def put(self, *unused_args, **unused_kwargs):
return None
def get(self, *unused_args, **unused_kwargs):
return False, None
def delete(self, *unused_args, **unused_kwargs):
return None
class AbstractCacheEntry(object):
"""Object representation while in cache."""
# we don't track deletions; deleted item will hang around this long
CACHE_ENTRY_TTL_SEC = 5 * 60
@classmethod
def internalize(cls, unused_key, *args, **kwargs):
"""Converts incoming objects into cache entry object."""
return (args, kwargs)
@classmethod
def externalize(cls, unused_key, *args, **kwargs):
"""Converts cache entry into external object."""
return (args, kwargs)
def has_expired(self):
age = (datetime.datetime.utcnow() - self.created_on).total_seconds()
return age > self.CACHE_ENTRY_TTL_SEC
def is_up_to_date(self, unused_key, unused_update):
"""Compare entry and the update object to decide if entry is fresh."""
raise NotImplementedError()
def updated_on(self):
"""Return last update time for entity."""
raise NotImplementedError()
class AbstractCacheConnection(object):
PERSISTENT_ENTITY = None
CACHE_ENTRY = None
@classmethod
def init_counters(cls):
name = cls.__name__
cls.CACHE_RESYNC = PerfCounter(
'gcb-models-%s-cache-resync' % name,
'A number of times an vfs cache was updated.')
cls.CACHE_PUT = PerfCounter(
'gcb-models-%s-cache-put' % name,
'A number of times an object was put into cache.')
cls.CACHE_GET = PerfCounter(
'gcb-models-%s-cache-get' % name,
'A number of times an object was pulled from cache.')
cls.CACHE_DELETE = PerfCounter(
'gcb-models-%s-cache-delete' % name,
'A number of times an object was deleted from cache.')
cls.CACHE_HIT = PerfCounter(
'gcb-models-%s-cache-hit' % name,
'A number of times an object was found cache.')
cls.CACHE_HIT_NONE = PerfCounter(
'gcb-models-%s-cache-hit-none' % name,
'A number of times an object was found cache, but it was None.')
cls.CACHE_MISS = PerfCounter(
'gcb-models-%s-cache-miss' % name,
'A number of times an object was not found in the cache.')
cls.CACHE_NOT_FOUND = PerfCounter(
'gcb-models-%s-cache-not-found' % name,
'A number of times an object was requested, but was not found in '
'the cache or underlying provider.')
cls.CACHE_UPDATE_COUNT = PerfCounter(
'gcb-models-%s-cache-update-count' % name,
'A number of update objects received.')
cls.CACHE_EVICT = PerfCounter(
'gcb-models-%s-cache-evict' % name,
'A number of times an object was evicted from cache because it was '
'changed.')
cls.CACHE_EXPIRE = PerfCounter(
'gcb-models-%s-cache-expire' % name,
'A number of times an object has expired from cache because it was '
'too old.')
@classmethod
def make_key_prefix(cls, ns):
return '%s:%s' % (cls.__name__, ns)
@classmethod
def make_key(cls, ns, entry_key):
return '%s:%s' % (cls.make_key_prefix(ns), entry_key)
@classmethod
def is_enabled(cls):
raise NotImplementedError()
@classmethod
def new_connection(cls, *args, **kwargs):
if not cls.is_enabled():
return NoopCacheConnection()
conn = cls(*args, **kwargs)
# pylint: disable=protected-access
conn.apply_updates(conn._get_incremental_updates())
return conn
def __init__(self, namespace):
"""Override this method and properly instantiate self.cache."""
self.namespace = namespace
self.cache = None
appengine_config.log_appstats_event(
'%s.connect' % self.__class__.__name__, {'namespace': namespace})
def apply_updates(self, updates):
"""Applies a list of global changes to the local cache."""
self.CACHE_RESYNC.inc()
for key, update in updates.iteritems():
_key = self.make_key(self.namespace, key)
found, entry = self.cache.get(_key)
if not found:
continue
if entry is None:
self.CACHE_EVICT.inc()
self.cache.delete(_key)
continue
if not entry.is_up_to_date(key, update):
self.CACHE_EVICT.inc()
self.cache.delete(_key)
continue
if entry.has_expired():
self.CACHE_EXPIRE.inc()
self.cache.delete(_key)
continue
def _get_most_recent_updated_on(self):
"""Get the most recent item cached. Datastore deletions are missed..."""
has_items = False
max_updated_on = datetime.datetime.fromtimestamp(0)
prefix = self.make_key_prefix(self.namespace)
for key, entry in self.cache.items.iteritems():
if not key.startswith(prefix):
continue
has_items = True
if not entry:
continue
updated_on = entry.updated_on()
if not updated_on: # old entities may be missing this field
updated_on = datetime.datetime.fromtimestamp(0)
if updated_on > max_updated_on:
max_updated_on = updated_on
return has_items, max_updated_on
def get_updates_when_empty(self):
"""Override this method to pre-load cache when it's completely empty."""
return {}
def _get_incremental_updates(self):
"""Gets a list of global changes older than the most recent item cached.
WARNING!!! We fetch the updates since the timestamp of the oldest item
we have cached so far. This will bring all objects that have changed or
were created since that time.
This will NOT bring the notifications about object deletions. Thus cache
will continue to serve deleted objects until they expire.
Returns:
an dict of {key: update} objects that represent recent updates
"""
has_items, updated_on = self._get_most_recent_updated_on()
if not has_items:
return self.get_updates_when_empty()
q = self.PERSISTENT_ENTITY.all()
if updated_on:
q.filter('updated_on > ', updated_on)
result = {
entity.key().name(): entity for entity in iter_all(q)}
self.CACHE_UPDATE_COUNT.inc(len(result.keys()))
return result
def put(self, key, *args):
self.CACHE_PUT.inc()
self.cache.put(
self.make_key(self.namespace, key),
self.CACHE_ENTRY.internalize(key, *args))
def get(self, key):
self.CACHE_GET.inc()
_key = self.make_key(self.namespace, key)
found, entry = self.cache.get(_key)
if not found:
self.CACHE_MISS.inc()
return False, None
if not entry:
self.CACHE_HIT_NONE.inc()
return True, None
if entry.has_expired():
self.CACHE_EXPIRE.inc()
self.cache.delete(_key)
return False, None
self.CACHE_HIT.inc()
return True, self.CACHE_ENTRY.externalize(key, entry)
def delete(self, key):
self.CACHE_DELETE.inc()
self.cache.delete(self.make_key(self.namespace, key))
class LRUCacheTests(unittest.TestCase):
def test_ordereddict_works(self):
_dict = collections.OrderedDict([])
_dict['a'] = '1'
_dict['b'] = '2'
_dict['c'] = '3'
self.assertEqual(('a', '1'), _dict.popitem(last=False))
self.assertEqual(('c', '3'), _dict.popitem(last=True))
def test_initialization(self):
with self.assertRaises(AssertionError):
LRUCache()
with self.assertRaises(AssertionError):
LRUCache(max_item_count=-1)
with self.assertRaises(AssertionError):
LRUCache(max_size_bytes=-1)
LRUCache(max_item_count=1)
LRUCache(max_size_bytes=1)
def test_evict_by_count(self):
cache = LRUCache(max_item_count=3)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertTrue(cache.contains('a'))
self.assertTrue(cache.put('d', '4'))
self.assertFalse(cache.contains('a'))
self.assertEquals(cache.get('a'), (False, None))
def test_evict_by_count_lru(self):
cache = LRUCache(max_item_count=3)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertEquals(cache.get('a'), (True, '1'))
self.assertTrue(cache.put('d', '4'))
self.assertTrue(cache.contains('a'))
self.assertFalse(cache.contains('b'))
def test_evict_by_size(self):
min_size = sys.getsizeof(LRUCache(max_item_count=1).items)
item_size = sys.getsizeof('a1')
cache = LRUCache(max_size_bytes=min_size + 3 * item_size)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertFalse(cache.put('d', bytearray(1000)))
def test_evict_by_size_lru(self):
cache = LRUCache(max_size_bytes=5000)
self.assertTrue(cache.put('a', bytearray(4500)))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertTrue(cache.contains('a'))
self.assertTrue(cache.put('d', bytearray(1000)))
self.assertFalse(cache.contains('a'))
self.assertTrue(cache.contains('b'))
def test_max_item_size(self):
cache = LRUCache(max_size_bytes=5000, max_item_size_bytes=1000)
self.assertFalse(cache.put('a', bytearray(4500)))
self.assertEquals(cache.get('a'), (False, None))
self.assertTrue(cache.put('a', bytearray(500)))
found, _ = cache.get('a')
self.assertTrue(found)
class SingletonTests(unittest.TestCase):
def test_singleton(self):
class A(RequestScopedSingleton):
def __init__(self, data):
self.data = data
class B(RequestScopedSingleton):
def __init__(self, data):
self.data = data
# TODO(psimakov): prevent direct instantiation
A('aaa')
B('bbb')
# using instance() creates and returns the same instance
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = A.instance('bar')
assert a.data == 'bar'
assert b.data == 'bar'
assert a is b
# re-initialization fails if arguments differ
RequestScopedSingleton.clear_all()
a = A.instance('dog')
try:
b = A.instance('cat')
raise Exception('Expected to fail.')
except AssertionError:
pass
# clearing one keep others
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = B.instance('cat')
a.clear()
c = B.instance('cat')
assert c is b
# clearing all clears all
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = B.instance('cat')
RequestScopedSingleton.clear_all()
c = A.instance('bar')
d = B.instance('cat')
assert a is not c
assert b is not d
def run_all_unit_tests():
"""Runs all unit tests in this module."""
suites_list = []
for test_class in [LRUCacheTests, SingletonTests]:
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
suites_list.append(suite)
unittest.TextTestRunner().run(unittest.TestSuite(suites_list))
if __name__ == '__main__':
run_all_unit_tests()
|
"""The tests for the time_pattern automation."""
from datetime import timedelta
from unittest.mock import patch
import pytest
import voluptuous as vol
import homeassistant.components.automation as automation
import homeassistant.components.homeassistant.triggers.time_pattern as time_pattern
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_when_hour_matches(hass, calls):
"""Test for firing if hour is matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=3
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": 0,
"minutes": "*",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, hour=0))
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_time_changed(hass, now.replace(year=now.year + 1, hour=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_minute_matches(hass, calls):
"""Test for firing if minutes are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=30
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": 0,
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, minute=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_second_matches(hass, calls):
"""Test for firing if seconds are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=30
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": 0,
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, second=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_second_as_string_matches(hass, calls):
"""Test for firing if seconds are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=15
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": "30",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, time_that_will_not_match_right_away + timedelta(seconds=15)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_all_matches(hass, calls):
"""Test for firing if everything matches."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=4
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": 1,
"minutes": 2,
"seconds": 3,
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=3)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_periodic_seconds(hass, calls):
"""Test for firing periodically every second."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": "/10",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=0, minute=0, second=10)
)
await hass.async_block_till_done()
assert len(calls) >= 1
async def test_if_fires_periodic_minutes(hass, calls):
"""Test for firing periodically every minute."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "/2",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=0, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_periodic_hours(hass, calls):
"""Test for firing periodically every hour."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "/2",
"minutes": "*",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=2, minute=0, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_default_values(hass, calls):
"""Test for firing at 2 minutes every hour."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "time_pattern", "minutes": "2"},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=1)
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=2, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 2
async def test_invalid_schemas(hass, calls):
"""Test invalid schemas."""
schemas = (
None,
{},
{"platform": "time_pattern"},
{"platform": "time_pattern", "minutes": "/"},
{"platform": "time_pattern", "minutes": "*/5"},
{"platform": "time_pattern", "minutes": "/90"},
{"platform": "time_pattern", "hours": 12, "minutes": 0, "seconds": 100},
)
for value in schemas:
with pytest.raises(vol.Invalid):
time_pattern.TRIGGER_SCHEMA(value)
|
"""
PlugIn for Nexus OS driver
"""
import logging
from neutron.common import exceptions as exc
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_credentials_v2 as cred
from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc
from neutron.plugins.cisco.common import config as conf
from neutron.plugins.cisco.db import nexus_db_v2 as nxos_db
from neutron.plugins.cisco.l2device_plugin_base import L2DevicePluginBase
LOG = logging.getLogger(__name__)
class NexusPlugin(L2DevicePluginBase):
"""Nexus PlugIn Main Class."""
_networks = {}
def __init__(self):
"""Extract configuration parameters from the configuration file."""
self._client = importutils.import_object(conf.CISCO.nexus_driver)
LOG.debug(_("Loaded driver %s"), conf.CISCO.nexus_driver)
self._nexus_switches = conf.get_nexus_dictionary()
self.credentials = {}
def get_credential(self, nexus_ip):
if nexus_ip not in self.credentials:
_nexus_username = cred.Store.get_username(nexus_ip)
_nexus_password = cred.Store.get_password(nexus_ip)
self.credentials[nexus_ip] = {
'username': _nexus_username,
'password': _nexus_password
}
return self.credentials[nexus_ip]
def get_all_networks(self, tenant_id):
"""Get all networks.
Returns a dictionary containing all <network_uuid, network_name> for
the specified tenant.
"""
LOG.debug(_("NexusPlugin:get_all_networks() called"))
return self._networks.values()
def create_network(self, tenant_id, net_name, net_id, vlan_name, vlan_id,
host, instance):
"""Create network.
Create a VLAN in the appropriate switch/port, and configure the
appropriate interfaces for this VLAN.
"""
LOG.debug(_("NexusPlugin:create_network() called"))
# Grab the switch IP and port for this host
for switch_ip, attr in self._nexus_switches:
if str(attr) == str(host):
port_id = self._nexus_switches[switch_ip, attr]
break
else:
raise cisco_exc.NexusComputeHostNotConfigured(host=host)
# Check if this network is already in the DB
vlan_created = False
vlan_enabled = False
try:
nxos_db.get_port_vlan_switch_binding(port_id, vlan_id, switch_ip)
except cisco_exc.NexusPortBindingNotFound:
_nexus_ip = switch_ip
_nexus_ports = (port_id,)
_nexus_ssh_port = \
self._nexus_switches[switch_ip, 'ssh_port']
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
# Check for vlan/switch binding
try:
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
except cisco_exc.NexusPortBindingNotFound:
# Create vlan and trunk vlan on the port
self._client.create_vlan(
vlan_name, str(vlan_id), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port, vlan_id)
vlan_created = True
else:
# Only trunk vlan on the port
man = self._client.nxos_connect(_nexus_ip,
int(_nexus_ssh_port),
_nexus_username,
_nexus_password)
self._client.enable_vlan_on_trunk_int(man,
_nexus_ip,
port_id,
vlan_id)
vlan_enabled = True
try:
nxos_db.add_nexusport_binding(port_id, str(vlan_id),
switch_ip, instance)
except Exception:
with excutils.save_and_reraise_exception():
# Add binding failed, roll back any vlan creation/enabling
if vlan_created:
self._client.delete_vlan(
str(vlan_id), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port)
if vlan_enabled:
self._client.disable_vlan_on_trunk_int(man,
port_id,
vlan_id)
new_net_dict = {const.NET_ID: net_id,
const.NET_NAME: net_name,
const.NET_PORTS: {},
const.NET_VLAN_NAME: vlan_name,
const.NET_VLAN_ID: vlan_id}
self._networks[net_id] = new_net_dict
return new_net_dict
def add_router_interface(self, vlan_name, vlan_id, subnet_id,
gateway_ip, router_id):
"""Create VLAN SVI on the Nexus switch."""
# Find a switch to create the SVI on
switch_ip = self._find_switch_for_svi()
if not switch_ip:
raise cisco_exc.NoNexusSwitch()
_nexus_ip = switch_ip
_nexus_ssh_port = self._nexus_switches[switch_ip, 'ssh_port']
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
# Check if this vlan exists on the switch already
try:
nxos_db.get_nexusvlan_binding(vlan_id, switch_ip)
except cisco_exc.NexusPortBindingNotFound:
# Create vlan and trunk vlan on the port
self._client.create_vlan(
vlan_name, str(vlan_id), _nexus_ip,
_nexus_username, _nexus_password,
[], _nexus_ssh_port, vlan_id)
# Check if a router interface has already been created
try:
nxos_db.get_nexusvm_binding(vlan_id, router_id)
raise cisco_exc.SubnetInterfacePresent(subnet_id=subnet_id,
router_id=router_id)
except cisco_exc.NexusPortBindingNotFound:
self._client.create_vlan_svi(vlan_id, _nexus_ip, _nexus_username,
_nexus_password, _nexus_ssh_port,
gateway_ip)
nxos_db.add_nexusport_binding('router', str(vlan_id),
switch_ip, router_id)
return True
def remove_router_interface(self, vlan_id, router_id):
"""Remove VLAN SVI from the Nexus Switch."""
# Grab switch_ip from database
row = nxos_db.get_nexusvm_binding(vlan_id, router_id)
# Delete the SVI interface from the switch
_nexus_ip = row['switch_ip']
_nexus_ssh_port = self._nexus_switches[_nexus_ip, 'ssh_port']
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
self._client.delete_vlan_svi(vlan_id, _nexus_ip, _nexus_username,
_nexus_password, _nexus_ssh_port)
# Invoke delete_port to delete this row
# And delete vlan if required
return self.delete_port(router_id, vlan_id)
def _find_switch_for_svi(self):
"""Get a switch to create the SVI on."""
LOG.debug(_("Grabbing a switch to create SVI"))
if conf.CISCO.svi_round_robin:
LOG.debug(_("Using round robin to create SVI"))
switch_dict = dict(
(switch_ip, 0) for switch_ip, _ in self._nexus_switches)
try:
bindings = nxos_db.get_nexussvi_bindings()
# Build a switch dictionary with weights
for binding in bindings:
switch_ip = binding.switch_ip
if switch_ip not in switch_dict:
switch_dict[switch_ip] = 1
else:
switch_dict[switch_ip] += 1
# Search for the lowest value in the dict
if switch_dict:
switch_ip = min(switch_dict.items(), key=switch_dict.get)
return switch_ip[0]
except cisco_exc.NexusPortBindingNotFound:
pass
LOG.debug(_("No round robin or zero weights, using first switch"))
# Return the first switch in the config
for switch_ip, attr in self._nexus_switches:
return switch_ip
def delete_network(self, tenant_id, net_id, **kwargs):
"""Delete network.
Deletes the VLAN in all switches, and removes the VLAN configuration
from the relevant interfaces.
"""
LOG.debug(_("NexusPlugin:delete_network() called"))
def get_network_details(self, tenant_id, net_id, **kwargs):
"""Return the details of a particular network."""
LOG.debug(_("NexusPlugin:get_network_details() called"))
network = self._get_network(tenant_id, net_id)
return network
def update_network(self, tenant_id, net_id, **kwargs):
"""Update the properties of a particular Virtual Network."""
LOG.debug(_("NexusPlugin:update_network() called"))
def get_all_ports(self, tenant_id, net_id, **kwargs):
"""Get all ports.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:get_all_ports() called"))
def create_port(self, tenant_id, net_id, port_state, port_id, **kwargs):
"""Create port.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:create_port() called"))
def delete_port(self, device_id, vlan_id):
"""Delete port.
Delete port bindings from the database and scan whether the network
is still required on the interfaces trunked.
"""
LOG.debug(_("NexusPlugin:delete_port() called"))
# Delete DB row for this port
try:
row = nxos_db.get_nexusvm_binding(vlan_id, device_id)
except cisco_exc.NexusPortBindingNotFound:
return
nxos_db.remove_nexusport_binding(row['port_id'], row['vlan_id'],
row['switch_ip'],
row['instance_id'])
# Check for any other bindings with the same vlan_id and switch_ip
try:
nxos_db.get_nexusvlan_binding(row['vlan_id'], row['switch_ip'])
except cisco_exc.NexusPortBindingNotFound:
try:
# Delete this vlan from this switch
_nexus_ip = row['switch_ip']
_nexus_ports = ()
if row['port_id'] != 'router':
_nexus_ports = (row['port_id'],)
_nexus_ssh_port = (self._nexus_switches[_nexus_ip,
'ssh_port'])
_nexus_creds = self.get_credential(_nexus_ip)
_nexus_username = _nexus_creds['username']
_nexus_password = _nexus_creds['password']
self._client.delete_vlan(
str(row['vlan_id']), _nexus_ip,
_nexus_username, _nexus_password,
_nexus_ports, _nexus_ssh_port)
except Exception:
# The delete vlan operation on the Nexus failed,
# so this delete_port request has failed. For
# consistency, roll back the Nexus database to what
# it was before this request.
with excutils.save_and_reraise_exception():
nxos_db.add_nexusport_binding(row['port_id'],
row['vlan_id'],
row['switch_ip'],
row['instance_id'])
return row['instance_id']
def update_port(self, tenant_id, net_id, port_id, port_state, **kwargs):
"""Update port.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:update_port() called"))
def get_port_details(self, tenant_id, net_id, port_id, **kwargs):
"""Get port details.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:get_port_details() called"))
def plug_interface(self, tenant_id, net_id, port_id, remote_interface_id,
**kwargs):
"""Plug interfaces.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:plug_interface() called"))
def unplug_interface(self, tenant_id, net_id, port_id, **kwargs):
"""Unplug interface.
This is probably not applicable to the Nexus plugin.
Delete if not required.
"""
LOG.debug(_("NexusPlugin:unplug_interface() called"))
def _get_network(self, tenant_id, network_id, context, base_plugin_ref):
"""Get the Network ID."""
network = base_plugin_ref._get_network(context, network_id)
if not network:
raise exc.NetworkNotFound(net_id=network_id)
return {const.NET_ID: network_id, const.NET_NAME: network.name,
const.NET_PORTS: network.ports}
|
""" Provider info for CloudStack
"""
from perfkitbenchmarker import providers
from perfkitbenchmarker import provider_info
class CloudStackProviderInfo(provider_info.BaseProviderInfo):
UNSUPPORTED_BENCHMARKS = ['mysql_service']
CLOUD = providers.CLOUDSTACK
|
import mock
from st2actions.runners import announcementrunner
from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED
from st2common.models.api.trace import TraceContext
from base import RunnerTestCase
import st2tests.config as tests_config
mock_dispatcher = mock.Mock()
@mock.patch('st2common.transport.announcement.AnnouncementDispatcher.dispatch')
class AnnouncementRunnerTestCase(RunnerTestCase):
@classmethod
def setUpClass(cls):
tests_config.parse_args()
def test_runner_creation(self, dispatch):
runner = announcementrunner.get_runner()
self.assertTrue(runner is not None, 'Creation failed. No instance.')
self.assertEqual(type(runner), announcementrunner.AnnouncementRunner,
'Creation failed. No instance.')
self.assertEqual(runner._dispatcher.dispatch, dispatch)
def test_announcement(self, dispatch):
runner = announcementrunner.get_runner()
runner.runner_parameters = {
'experimental': True,
'route': 'general'
}
runner.liveaction = mock.Mock(context={})
runner.pre_run()
(status, result, _) = runner.run({'test': 'passed'})
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(result is not None)
self.assertEqual(result['test'], 'passed')
dispatch.assert_called_once_with('general', payload={'test': 'passed'},
trace_context=None)
def test_announcement_no_experimental(self, dispatch):
runner = announcementrunner.get_runner()
runner.action = mock.Mock(ref='some.thing')
runner.runner_parameters = {
'route': 'general'
}
runner.liveaction = mock.Mock(context={})
expected_msg = 'Experimental flag is missing for action some.thing'
self.assertRaisesRegexp(Exception, expected_msg, runner.pre_run)
@mock.patch('st2common.models.api.trace.TraceContext.__new__')
def test_announcement_with_trace(self, context, dispatch):
runner = announcementrunner.get_runner()
runner.runner_parameters = {
'experimental': True,
'route': 'general'
}
runner.liveaction = mock.Mock(context={
'trace_context': {
'id_': 'a',
'trace_tag': 'b'
}
})
runner.pre_run()
(status, result, _) = runner.run({'test': 'passed'})
self.assertEqual(status, LIVEACTION_STATUS_SUCCEEDED)
self.assertTrue(result is not None)
self.assertEqual(result['test'], 'passed')
context.assert_called_once_with(TraceContext,
**runner.liveaction.context['trace_context'])
dispatch.assert_called_once_with('general', payload={'test': 'passed'},
trace_context=context.return_value)
|
"""Utils for use from the console.
Includes functions that are used by interactive console utilities such as
approval or token handling.
"""
import getpass
import os
import time
import logging
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import data_store
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import search
from grr.lib import type_info
from grr.lib import utils
from grr.lib.flows.general import memory
def FormatISOTime(t):
"""Format a time in epoch notation to ISO UTC."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(t / 1e6))
def SearchClients(query_str, token=None, limit=1000):
"""Search indexes for clients. Returns list (client, hostname, os version)."""
client_schema = aff4.AFF4Object.classes["VFSGRRClient"].SchemaCls
results = []
result_urns = search.SearchClients(query_str, max_results=limit, token=token)
result_set = aff4.FACTORY.MultiOpen(result_urns, token=token)
for result in result_set:
results.append((result,
str(result.Get(client_schema.HOSTNAME)),
str(result.Get(client_schema.OS_VERSION)),
str(result.Get(client_schema.PING)),
))
return results
def DownloadDir(aff4_path, output_dir, bufsize=8192, preserve_path=True):
"""Take an aff4 path and download all files in it to output_dir.
Args:
aff4_path: Any aff4 path as a string
output_dir: A local directory to write to, will be created if not there.
bufsize: Buffer size to use.
preserve_path: If set all paths will be created.
Note that this works for collections as well. It will download all
files in the collection.
This only downloads files that are already in the datastore, it doesn't
queue anything on the client.
"""
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
fd = aff4.FACTORY.Open(aff4_path)
for child in fd.OpenChildren():
if preserve_path:
# Get a full path without the aff4:
full_dir = utils.JoinPath(output_dir, child.urn.Path())
full_dir = os.path.dirname(full_dir)
if not os.path.isdir(full_dir):
os.makedirs(full_dir)
outfile = os.path.join(full_dir, child.urn.Basename())
else:
outfile = os.path.join(output_dir, child.urn.Basename())
logging.info(u"Downloading %s to %s", child.urn, outfile)
with open(outfile, "wb") as out_fd:
try:
buf = child.Read(bufsize)
while buf:
out_fd.write(buf)
buf = child.Read(bufsize)
except IOError as e:
logging.error("Failed to read %s. Err: %s", child.urn, e)
def ListDrivers():
urn = aff4.ROOT_URN.Add(memory.DRIVER_BASE)
token = access_control.ACLToken(username="test")
fd = aff4.FACTORY.Open(urn, mode="r", token=token)
return list(fd.Query())
def OpenClient(client_id=None):
"""Opens the client, getting potential approval tokens.
Args:
client_id: The client id the approval should be revoked for.
Returns:
tuple containing (client, token) objects or (None, None) on if
no appropriate aproval tokens were found.
"""
token = access_control.ACLToken(username="test")
try:
token = ApprovalFind(client_id, token=token)
except access_control.UnauthorizedAccess as e:
logging.warn("No authorization found for access to client: %s", e)
try:
# Try and open with the token we managed to retrieve or the default.
client = aff4.FACTORY.Open(rdfvalue.RDFURN(client_id), mode="r",
token=token)
return client, token
except access_control.UnauthorizedAccess:
logging.warning("Unable to find a valid reason for client %s. You may need "
"to request approval.", client_id)
return None, None
def GetNotifications(user=None, token=None):
"""Show pending notifications for a user."""
if not user:
user = getpass.getuser()
user_obj = aff4.FACTORY.Open(aff4.ROOT_URN.Add("users").Add(user),
token=token)
return list(user_obj.Get(user_obj.Schema.PENDING_NOTIFICATIONS))
def ApprovalRequest(client_id, reason, approvers, token=None):
"""Request approval to access a host."""
return flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="RequestClientApprovalFlow",
reason=reason, approver=approvers, token=token)
def ApprovalGrant(token=None):
"""Iterate through requested access approving or not."""
user = getpass.getuser()
notifications = GetNotifications(user=user, token=token)
requests = [n for n in notifications if n.type == "GrantAccess"]
for request in requests:
_, client_id, user, reason = rdfvalue.RDFURN(request.subject).Split()
reason = utils.DecodeReasonString(reason)
print request
print "Reason: %s" % reason
if raw_input("Do you approve this request? [y/N] ").lower() == "y":
flow_id = flow.GRRFlow.StartFlow(client_id=client_id,
flow_name="GrantClientApprovalFlow",
reason=reason, delegate=user,
token=token)
# TODO(user): Remove the notification.
else:
print "skipping request"
print "Approval sent: %s" % flow_id
def ApprovalFind(object_id, token=None):
"""Find approvals issued for a specific client."""
user = getpass.getuser()
object_id = rdfvalue.RDFURN(object_id)
try:
approved_token = aff4.Approval.GetApprovalForObject(
object_id, token=token, username=user)
print "Found token %s" % str(approved_token)
return approved_token
except access_control.UnauthorizedAccess:
print "No token available for access to %s" % object_id
def ApprovalCreateRaw(aff4_path, reason="", expire_in=60*60*24*7,
token=None, approval_type="ClientApproval"):
"""Creates an approval with raw access.
This method requires raw datastore access to manipulate approvals directly.
This currently doesn't work for hunt or cron approvals, because they check
that each approver has the admin label. Since the fake users don't exist the
check fails.
Args:
aff4_path: The aff4_path or client id the approval should be created for.
reason: The reason to put in the token.
expire_in: Expiry in seconds to use in the token.
token: The token that will be used. If this is specified reason and expiry
are ignored.
approval_type: The type of the approval to create.
Returns:
The token.
Raises:
RuntimeError: On bad token.
"""
if approval_type == "ClientApproval":
urn = rdfvalue.ClientURN(aff4_path)
else:
urn = rdfvalue.RDFURN(aff4_path)
if not token:
expiry = time.time() + expire_in
token = rdfvalue.ACLToken(reason=reason, expiry=expiry)
if not token.reason:
raise RuntimeError("Cannot create approval with empty reason")
if not token.username:
token.username = getpass.getuser()
approval_urn = flow.GRRFlow.RequestApprovalWithReasonFlow.ApprovalUrnBuilder(
urn.Path(), token.username, token.reason)
super_token = access_control.ACLToken(username="raw-approval-superuser")
super_token.supervisor = True
approval_request = aff4.FACTORY.Create(approval_urn, approval_type,
mode="rw", token=super_token)
# Add approvals indicating they were approved by fake "raw" mode users.
approval_request.AddAttribute(
approval_request.Schema.APPROVER("%s1-raw" % token.username))
approval_request.AddAttribute(
approval_request.Schema.APPROVER("%s-raw2" % token.username))
approval_request.Close(sync=True)
def ApprovalRevokeRaw(aff4_path, token, remove_from_cache=False):
"""Revokes an approval for a given token.
This method requires raw datastore access to manipulate approvals directly.
Args:
aff4_path: The aff4_path or client id the approval should be created for.
token: The token that should be revoked.
remove_from_cache: If True, also remove the approval from the
security_manager cache.
"""
try:
urn = rdfvalue.ClientURN(aff4_path)
except type_info.TypeValueError:
urn = rdfvalue.RDFURN(aff4_path)
approval_urn = aff4.ROOT_URN.Add("ACL").Add(urn.Path()).Add(
token.username).Add(utils.EncodeReasonString(token.reason))
super_token = access_control.ACLToken(username="test")
super_token.supervisor = True
approval_request = aff4.FACTORY.Open(approval_urn, mode="rw",
token=super_token)
approval_request.DeleteAttribute(approval_request.Schema.APPROVER)
approval_request.Close()
if remove_from_cache:
data_store.DB.security_manager.acl_cache.ExpireObject(
utils.SmartUnicode(approval_urn))
def MigrateObjectsLabels(root_urn, obj_type, label_suffix=None, token=None):
"""Migrates labels of object under given root (non-recursive)."""
root = aff4.FACTORY.Create(root_urn, "AFF4Volume", mode="r", token=token)
children_urns = list(root.ListChildren())
if label_suffix:
children_urns = [urn.Add(label_suffix) for urn in children_urns]
print "Found %d children." % len(children_urns)
updated_objects = 0
ignored_objects = 0
for child in aff4.FACTORY.MultiOpen(
children_urns, mode="rw", token=token, age=aff4.NEWEST_TIME):
if isinstance(child, obj_type):
print "Current state: %d updated, %d ignored." % (updated_objects,
ignored_objects)
old_labels = child.Get(child.Schema.DEPRECATED_LABEL, [])
if not old_labels:
ignored_objects += 1
continue
if label_suffix:
child = aff4.FACTORY.Open(child.urn.Dirname(), mode="rw", token=token)
labels = [utils.SmartStr(label) for label in old_labels]
child.AddLabels(*labels, owner="GRR")
child.Close(sync=False)
updated_objects += 1
aff4.FACTORY.Flush()
def MigrateClientsAndUsersLabels(token=None):
"""Migrates clients and users labels."""
print "Migrating clients."
MigrateObjectsLabels(aff4.ROOT_URN, aff4.VFSGRRClient, token=token)
print "\nMigrating users."
MigrateObjectsLabels(aff4.ROOT_URN.Add("users"), aff4.GRRUser,
label_suffix="labels", token=token)
def MigrateHuntFinishedAndErrors(hunt_or_urn, token=None):
"""Migrates given hunt to collection-stored clients/errors lists."""
if hasattr(hunt_or_urn, "Schema"):
hunt = hunt_or_urn
if hunt.age_policy != aff4.ALL_TIMES:
raise RuntimeError("Hunt object should have ALL_TIMES age policy.")
else:
hunt = aff4.FACTORY.Open(hunt_or_urn, aff4_type="GRRHunt", token=token,
age=aff4.ALL_TIMES)
print "Migrating hunt %s." % hunt.urn
print "Processing all clients list."
aff4.FACTORY.Delete(hunt.all_clients_collection_urn, token=token)
with aff4.FACTORY.Create(hunt.all_clients_collection_urn,
aff4_type="PackedVersionedCollection",
mode="w", token=token) as all_clients_collection:
clients = set(hunt.GetValuesForAttribute(hunt.Schema.DEPRECATED_CLIENTS))
for client in reversed(sorted(clients, key=lambda x: x.age)):
all_clients_collection.Add(client)
print "Processing completed clients list."
aff4.FACTORY.Delete(hunt.completed_clients_collection_urn, token=token)
with aff4.FACTORY.Create(hunt.completed_clients_collection_urn,
aff4_type="PackedVersionedCollection",
mode="w", token=token) as comp_clients_collection:
clients = set(hunt.GetValuesForAttribute(hunt.Schema.DEPRECATED_FINISHED))
for client in reversed(sorted(clients, key=lambda x: x.age)):
comp_clients_collection.Add(client)
print "Processing errors list."
aff4.FACTORY.Delete(hunt.clients_errors_collection_urn, token=token)
with aff4.FACTORY.Create(hunt.clients_errors_collection_urn,
aff4_type="PackedVersionedCollection",
mode="w", token=token) as errors_collection:
for error in hunt.GetValuesForAttribute(hunt.Schema.DEPRECATED_ERRORS):
errors_collection.Add(error)
def MigrateAllHuntsFinishedAndError(token=None):
"""Migrates all hunts to collection-stored clients/errors lists."""
hunts_list = list(aff4.FACTORY.Open("aff4:/hunts",
token=token).ListChildren())
all_hunts = aff4.FACTORY.MultiOpen(hunts_list, aff4_type="GRRHunt", mode="r",
age=aff4.ALL_TIMES, token=token)
index = 0
for hunt in all_hunts:
MigrateHuntFinishedAndErrors(hunt, token=token)
index += 1
print ""
print "Done %d out of %d hunts." % (index, len(hunts_list))
def MigrateLabelsSeparator(token=None):
"""Migrates labels from '.' to '|' separator."""
processed_set_labels = 0
processed_mapped_labels = 0
for urn in ["aff4:/index/labels/generic",
"aff4:/index/labels/clients",
"aff4:/index/labels/users"]:
urn = rdfvalue.RDFURN(urn)
print "Processing %s" % urn
urns_index = aff4.FACTORY.Create(urn.Add("urns_index"),
"AFF4Index", mode="r", token=token)
query_results = list(urns_index.MultiQuery(
[aff4.AFF4Object.SchemaCls.LABELS], [".+"]).iteritems())
aff4.FACTORY.Delete(urn.Add("urns_index"), token=token)
with aff4.FACTORY.Create(urn.Add("urns_index"),
"AFF4Index", mode="w",
token=token) as new_urns_index:
for label, target_urns in query_results:
label = label.replace(".", "|", 1)
for target_urn in target_urns:
print "Remapping: %s -> %s" % (utils.SmartStr(label),
utils.SmartStr(target_urn))
new_urns_index.Add(target_urn,
aff4.AFF4Object.SchemaCls.LABELS, label)
processed_mapped_labels += 1
used_labels_index = aff4.FACTORY.Create(urn.Add("used_labels_index"),
"AFF4IndexSet", mode="r",
token=token)
used_labels = list(used_labels_index.ListValues(limit=None))
aff4.FACTORY.Delete(urn.Add("used_labels_index"), token=token)
with aff4.FACTORY.Create(urn.Add("used_labels_index"),
"AFF4IndexSet", mode="w",
token=token) as new_used_labels:
for value in used_labels:
value = value.replace(".", "|", 1)
print "Resetting: %s" % (utils.SmartStr(value))
new_used_labels.Add(value)
processed_set_labels += 1
print "Processed set labels: %d, mapped labels: %d" % (
processed_set_labels,
processed_mapped_labels)
|
import logging
import json
import re
from webob import Response
from ryu.app import conf_switch_key as cs_key
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.base import app_manager
from ryu.controller import conf_switch
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import set_ev_cls
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.exception import OFPUnknownVersion
from ryu.lib import dpid as dpid_lib
from ryu.lib import mac
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.lib.ovs import bridge
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto import ofproto_v1_3_parser
from ryu.ofproto import ether
from ryu.ofproto import inet
SWITCHID_PATTERN = dpid_lib.DPID_PATTERN + r'|all'
VLANID_PATTERN = r'[0-9]{1,4}|all'
QOS_TABLE_ID = 0
REST_ALL = 'all'
REST_SWITCHID = 'switch_id'
REST_COMMAND_RESULT = 'command_result'
REST_PRIORITY = 'priority'
REST_VLANID = 'vlan_id'
REST_DL_VLAN = 'dl_vlan'
REST_PORT_NAME = 'port_name'
REST_QUEUE_TYPE = 'type'
REST_QUEUE_MAX_RATE = 'max_rate'
REST_QUEUE_MIN_RATE = 'min_rate'
REST_QUEUES = 'queues'
REST_QOS = 'qos'
REST_QOS_ID = 'qos_id'
REST_COOKIE = 'cookie'
REST_MATCH = 'match'
REST_IN_PORT = 'in_port'
REST_SRC_MAC = 'dl_src'
REST_DST_MAC = 'dl_dst'
REST_DL_TYPE = 'dl_type'
REST_DL_TYPE_ARP = 'ARP'
REST_DL_TYPE_IPV4 = 'IPv4'
REST_DL_TYPE_IPV6 = 'IPv6'
REST_DL_VLAN = 'dl_vlan'
REST_SRC_IP = 'nw_src'
REST_DST_IP = 'nw_dst'
REST_SRC_IPV6 = 'ipv6_src'
REST_DST_IPV6 = 'ipv6_dst'
REST_NW_PROTO = 'nw_proto'
REST_NW_PROTO_TCP = 'TCP'
REST_NW_PROTO_UDP = 'UDP'
REST_NW_PROTO_ICMP = 'ICMP'
REST_NW_PROTO_ICMPV6 = 'ICMPv6'
REST_TP_SRC = 'tp_src'
REST_TP_DST = 'tp_dst'
REST_DSCP = 'ip_dscp'
REST_ACTION = 'actions'
REST_ACTION_QUEUE = 'queue'
REST_ACTION_MARK = 'mark'
REST_ACTION_METER = 'meter'
REST_METER_ID = 'meter_id'
REST_METER_BURST_SIZE = 'burst_size'
REST_METER_RATE = 'rate'
REST_METER_PREC_LEVEL = 'prec_level'
REST_METER_BANDS = 'bands'
REST_METER_ACTION_DROP = 'drop'
REST_METER_ACTION_REMARK = 'remark'
DEFAULT_FLOW_PRIORITY = 0
QOS_PRIORITY_MAX = ofproto_v1_3_parser.UINT16_MAX - 1
QOS_PRIORITY_MIN = 1
VLANID_NONE = 0
VLANID_MIN = 2
VLANID_MAX = 4094
COOKIE_SHIFT_VLANID = 32
BASE_URL = '/qos'
REQUIREMENTS = {'switchid': SWITCHID_PATTERN,
'vlanid': VLANID_PATTERN}
LOG = logging.getLogger(__name__)
class RestQoSAPI(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'conf_switch': conf_switch.ConfSwitchSet,
'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(RestQoSAPI, self).__init__(*args, **kwargs)
# logger configure
QoSController.set_logger(self.logger)
self.cs = kwargs['conf_switch']
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {}
self.data['dpset'] = self.dpset
self.data['waiters'] = self.waiters
wsgi.registory['QoSController'] = self.data
wsgi.register(QoSController, self.data)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION or \
dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
@set_ev_cls(conf_switch.EventConfSwitchSet)
def conf_switch_set_handler(self, ev):
if ev.key == cs_key.OVSDB_ADDR:
QoSController.set_ovsdb_addr(ev.dpid, ev.value)
else:
QoSController._LOGGER.debug("unknown event: %s", ev)
@set_ev_cls(conf_switch.EventConfSwitchDel)
def conf_switch_del_handler(self, ev):
if ev.key == cs_key.OVSDB_ADDR:
QoSController.delete_ovsdb_addr(ev.dpid)
else:
QoSController._LOGGER.debug("unknown event: %s", ev)
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def handler_datapath(self, ev):
if ev.enter:
QoSController.regist_ofs(ev.dp, self.CONF)
else:
QoSController.unregist_ofs(ev.dp)
# for OpenFlow version1.0
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_0(self, ev):
self.stats_reply_handler(ev)
# for OpenFlow version1.2 or later
@set_ev_cls(ofp_event.EventOFPStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_2(self, ev):
self.stats_reply_handler(ev)
# for OpenFlow version1.2 or later
@set_ev_cls(ofp_event.EventOFPQueueStatsReply, MAIN_DISPATCHER)
def queue_stats_reply_handler_v1_2(self, ev):
self.stats_reply_handler(ev)
# for OpenFlow version1.2 or later
@set_ev_cls(ofp_event.EventOFPMeterStatsReply, MAIN_DISPATCHER)
def meter_stats_reply_handler_v1_2(self, ev):
self.stats_reply_handler(ev)
class QoSOfsList(dict):
def __init__(self):
super(QoSOfsList, self).__init__()
def get_ofs(self, dp_id):
if len(self) == 0:
raise ValueError('qos sw is not connected.')
dps = {}
if dp_id == REST_ALL:
dps = self
else:
try:
dpid = dpid_lib.str_to_dpid(dp_id)
except:
raise ValueError('Invalid switchID.')
if dpid in self:
dps = {dpid: self[dpid]}
else:
msg = 'qos sw is not connected. : switchID=%s' % dp_id
raise ValueError(msg)
return dps
class QoSController(ControllerBase):
_OFS_LIST = QoSOfsList()
_LOGGER = None
def __init__(self, req, link, data, **config):
super(QoSController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
@classmethod
def set_logger(cls, logger):
cls._LOGGER = logger
cls._LOGGER.propagate = False
hdlr = logging.StreamHandler()
fmt_str = '[QoS][%(levelname)s] %(message)s'
hdlr.setFormatter(logging.Formatter(fmt_str))
cls._LOGGER.addHandler(hdlr)
@staticmethod
def regist_ofs(dp, CONF):
if dp.id in QoSController._OFS_LIST:
return
dpid_str = dpid_lib.dpid_to_str(dp.id)
try:
f_ofs = QoS(dp, CONF)
f_ofs.set_default_flow()
except OFPUnknownVersion, message:
QoSController._LOGGER.info('dpid=%s: %s',
dpid_str, message)
return
QoSController._OFS_LIST.setdefault(dp.id, f_ofs)
QoSController._LOGGER.info('dpid=%s: Join qos switch.',
dpid_str)
@staticmethod
def unregist_ofs(dp):
if dp.id in QoSController._OFS_LIST:
del QoSController._OFS_LIST[dp.id]
QoSController._LOGGER.info('dpid=%s: Leave qos switch.',
dpid_lib.dpid_to_str(dp.id))
@staticmethod
def set_ovsdb_addr(dpid, value):
ofs = QoSController._OFS_LIST.get(dpid, None)
if ofs is not None:
ofs.set_ovsdb_addr(dpid, value)
@staticmethod
def delete_ovsdb_addr(dpid):
ofs = QoSController._OFS_LIST.get(dpid, None)
ofs.set_ovsdb_addr(dpid, None)
@route('qos_switch', BASE_URL + '/queue/{switchid}',
methods=['GET'], requirements=REQUIREMENTS)
def get_queue(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'get_queue', None)
@route('qos_switch', BASE_URL + '/queue/{switchid}',
methods=['POST'], requirements=REQUIREMENTS)
def set_queue(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'set_queue', None)
@route('qos_switch', BASE_URL + '/queue/{switchid}',
methods=['DELETE'], requirements=REQUIREMENTS)
def delete_queue(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'delete_queue', None)
@route('qos_switch', BASE_URL + '/queue/status/{switchid}',
methods=['GET'], requirements=REQUIREMENTS)
def get_status(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'get_status', self.waiters)
@route('qos_switch', BASE_URL + '/rules/{switchid}',
methods=['GET'], requirements=REQUIREMENTS)
def get_qos(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'get_qos', self.waiters)
@route('qos_switch', BASE_URL + '/rules/{switchid}/{vlanid}',
methods=['GET'], requirements=REQUIREMENTS)
def get_vlan_qos(self, req, switchid, vlanid, **_kwargs):
return self._access_switch(req, switchid, vlanid,
'get_qos', self.waiters)
@route('qos_switch', BASE_URL + '/rules/{switchid}',
methods=['POST'], requirements=REQUIREMENTS)
def set_qos(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'set_qos', self.waiters)
@route('qos_switch', BASE_URL + '/rules/{switchid}/{vlanid}',
methods=['POST'], requirements=REQUIREMENTS)
def set_vlan_qos(self, req, switchid, vlanid, **_kwargs):
return self._access_switch(req, switchid, vlanid,
'set_qos', self.waiters)
@route('qos_switch', BASE_URL + '/rules/{switchid}',
methods=['DELETE'], requirements=REQUIREMENTS)
def delete_qos(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'delete_qos', self.waiters)
@route('qos_switch', BASE_URL + '/rules/{switchid}/{vlanid}',
methods=['DELETE'], requirements=REQUIREMENTS)
def delete_vlan_qos(self, req, switchid, vlanid, **_kwargs):
return self._access_switch(req, switchid, vlanid,
'delete_qos', self.waiters)
@route('qos_switch', BASE_URL + '/meter/{switchid}',
methods=['GET'], requirements=REQUIREMENTS)
def get_meter(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'get_meter', self.waiters)
@route('qos_switch', BASE_URL + '/meter/{switchid}',
methods=['POST'], requirements=REQUIREMENTS)
def set_meter(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'set_meter', self.waiters)
@route('qos_switch', BASE_URL + '/meter/{switchid}',
methods=['DELETE'], requirements=REQUIREMENTS)
def delete_meter(self, req, switchid, **_kwargs):
return self._access_switch(req, switchid, VLANID_NONE,
'delete_meter', self.waiters)
def _access_switch(self, req, switchid, vlan_id, func, waiters):
try:
rest = eval(req.body) if req.body else {}
except SyntaxError:
QoSController._LOGGER.debug('invalid syntax %s', req.body)
return Response(status=400)
try:
dps = self._OFS_LIST.get_ofs(switchid)
vid = QoSController._conv_toint_vlanid(vlan_id)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = []
for f_ofs in dps.values():
function = getattr(f_ofs, func)
try:
if waiters is not None:
msg = function(rest, vid, waiters)
else:
msg = function(rest, vid)
except ValueError, message:
return Response(status=400, body=str(message))
msgs.append(msg)
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
@staticmethod
def _conv_toint_vlanid(vlan_id):
if vlan_id != REST_ALL:
vlan_id = int(vlan_id)
if (vlan_id != VLANID_NONE and
(vlan_id < VLANID_MIN or VLANID_MAX < vlan_id)):
msg = 'Invalid {vlan_id} value. Set [%d-%d]' % (VLANID_MIN,
VLANID_MAX)
raise ValueError(msg)
return vlan_id
class QoS(object):
_OFCTL = {ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3}
def __init__(self, dp, CONF):
super(QoS, self).__init__()
self.vlan_list = {}
self.vlan_list[VLANID_NONE] = 0 # for VLAN=None
self.dp = dp
self.version = dp.ofproto.OFP_VERSION
self.queue_list = {}
self.CONF = CONF
self.ovsdb_addr = None
self.ovs_bridge = None
if self.version not in self._OFCTL:
raise OFPUnknownVersion(version=self.version)
self.ofctl = self._OFCTL[self.version]
def set_default_flow(self):
if self.version == ofproto_v1_0.OFP_VERSION:
return
cookie = 0
priority = DEFAULT_FLOW_PRIORITY
actions = [{'type': 'GOTO_TABLE',
'table_id': QOS_TABLE_ID + 1}]
flow = self._to_of_flow(cookie=cookie,
priority=priority,
match={},
actions=actions)
cmd = self.dp.ofproto.OFPFC_ADD
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
def set_ovsdb_addr(self, dpid, ovsdb_addr):
# easy check if the address format valid
_proto, _host, _port = ovsdb_addr.split(':')
old_address = self.ovsdb_addr
if old_address == ovsdb_addr:
return
if ovsdb_addr is None:
if self.ovs_bridge:
self.ovs_bridge.del_controller()
self.ovs_bridge = None
return
self.ovsdb_addr = ovsdb_addr
if self.ovs_bridge is None:
ovs_bridge = bridge.OVSBridge(self.CONF, dpid, ovsdb_addr)
self.ovs_bridge = ovs_bridge
try:
ovs_bridge.init()
except:
raise ValueError('ovsdb addr is not available.')
def _update_vlan_list(self, vlan_list):
for vlan_id in self.vlan_list.keys():
if vlan_id is not VLANID_NONE and vlan_id not in vlan_list:
del self.vlan_list[vlan_id]
def _get_cookie(self, vlan_id):
if vlan_id == REST_ALL:
vlan_ids = self.vlan_list.keys()
else:
vlan_ids = [vlan_id]
cookie_list = []
for vlan_id in vlan_ids:
self.vlan_list.setdefault(vlan_id, 0)
self.vlan_list[vlan_id] += 1
self.vlan_list[vlan_id] &= ofproto_v1_3_parser.UINT32_MAX
cookie = (vlan_id << COOKIE_SHIFT_VLANID) + \
self.vlan_list[vlan_id]
cookie_list.append([cookie, vlan_id])
return cookie_list
@staticmethod
def _cookie_to_qosid(cookie):
return cookie & ofproto_v1_3_parser.UINT32_MAX
# REST command template
def rest_command(func):
def _rest_command(*args, **kwargs):
key, value = func(*args, **kwargs)
switch_id = dpid_lib.dpid_to_str(args[0].dp.id)
return {REST_SWITCHID: switch_id,
key: value}
return _rest_command
@rest_command
def get_status(self, req, vlan_id, waiters):
if self.version == ofproto_v1_0.OFP_VERSION:
raise ValueError('get_status operation is not supported')
msgs = self.ofctl.get_queue_stats(self.dp, waiters)
return REST_COMMAND_RESULT, msgs
@rest_command
def get_queue(self, rest, vlan_id):
if len(self.queue_list):
msg = {'result': 'success',
'details': self.queue_list}
else:
msg = {'result': 'failure',
'details': 'Queue is not exists.'}
return REST_COMMAND_RESULT, msg
@rest_command
def set_queue(self, rest, vlan_id):
if self.ovs_bridge is None:
msg = {'result': 'failure',
'details': 'ovs_bridge is not exists'}
return REST_COMMAND_RESULT, msg
self.queue_list.clear()
queue_type = rest.get(REST_QUEUE_TYPE, 'linux-htb')
parent_max_rate = rest.get(REST_QUEUE_MAX_RATE, None)
queues = rest.get(REST_QUEUES, [])
queue_id = 0
queue_config = []
for queue in queues:
max_rate = queue.get(REST_QUEUE_MAX_RATE, None)
min_rate = queue.get(REST_QUEUE_MIN_RATE, None)
if max_rate is None and min_rate is None:
raise ValueError('Required to specify max_rate or min_rate')
config = {}
if max_rate is not None:
config['max-rate'] = max_rate
if min_rate is not None:
config['min-rate'] = min_rate
if len(config):
queue_config.append(config)
self.queue_list[queue_id] = {'config': config}
queue_id += 1
port_name = rest.get(REST_PORT_NAME, None)
vif_ports = self.ovs_bridge.get_port_name_list()
if port_name is not None:
if port_name not in vif_ports:
raise ValueError('%s port is not exists' % port_name)
vif_ports = [port_name]
for port_name in vif_ports:
try:
self.ovs_bridge.set_qos(port_name, type=queue_type,
max_rate=parent_max_rate,
queues=queue_config)
except Exception, msg:
raise ValueError(msg)
msg = {'result': 'success',
'details': self.queue_list}
return REST_COMMAND_RESULT, msg
def _delete_queue(self):
if self.ovs_bridge is None:
return False
vif_ports = self.ovs_bridge.get_external_ports()
for port in vif_ports:
self.ovs_bridge.del_qos(port.port_name)
return True
@rest_command
def delete_queue(self, rest, vlan_id):
self.queue_list.clear()
if self._delete_queue():
msg = 'success'
else:
msg = 'failure'
return REST_COMMAND_RESULT, msg
@rest_command
def set_qos(self, rest, vlan_id, waiters):
msgs = []
cookie_list = self._get_cookie(vlan_id)
for cookie, vid in cookie_list:
msg = self._set_qos(cookie, rest, waiters, vid)
msgs.append(msg)
return REST_COMMAND_RESULT, msgs
def _set_qos(self, cookie, rest, waiters, vlan_id):
match_value = rest[REST_MATCH]
if vlan_id:
match_value[REST_DL_VLAN] = vlan_id
priority = int(match_value.get(REST_PRIORITY, QOS_PRIORITY_MIN))
if (QOS_PRIORITY_MAX < priority):
raise ValueError('Invalid priority value. Set [%d-%d]'
% (QOS_PRIORITY_MIN, QOS_PRIORITY_MAX))
match = Match.to_openflow(match_value)
actions = []
action = rest.get(REST_ACTION, None)
if action is not None:
if REST_ACTION_MARK in action:
actions.append({'type': 'SET_FIELD',
'field': REST_DSCP,
'value': int(action[REST_ACTION_MARK])})
if REST_ACTION_METER in action:
actions.append({'type': 'METER',
'meter_id': action[REST_ACTION_METER]})
if REST_ACTION_QUEUE in action:
actions.append({'type': 'SET_QUEUE',
'queue_id': action[REST_ACTION_QUEUE]})
else:
actions.append({'type': 'SET_QUEUE',
'queue_id': 0})
actions.append({'type': 'GOTO_TABLE',
'table_id': QOS_TABLE_ID + 1})
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
cmd = self.dp.ofproto.OFPFC_ADD
try:
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
except:
raise ValueError('Invalid rule parameter.')
qos_id = QoS._cookie_to_qosid(cookie)
msg = {'result': 'success',
'details': 'QoS added. : qos_id=%d' % qos_id}
if vlan_id != VLANID_NONE:
msg.setdefault(REST_VLANID, vlan_id)
return msg
@rest_command
def get_qos(self, rest, vlan_id, waiters):
rules = {}
msgs = self.ofctl.get_flow_stats(self.dp, waiters)
if str(self.dp.id) in msgs:
flow_stats = msgs[str(self.dp.id)]
for flow_stat in flow_stats:
if flow_stat['table_id'] != QOS_TABLE_ID:
continue
priority = flow_stat[REST_PRIORITY]
if priority != DEFAULT_FLOW_PRIORITY:
vid = flow_stat[REST_MATCH].get(REST_DL_VLAN, VLANID_NONE)
if vlan_id == REST_ALL or vlan_id == vid:
rule = self._to_rest_rule(flow_stat)
rules.setdefault(vid, [])
rules[vid].append(rule)
get_data = []
for vid, rule in rules.items():
if vid == VLANID_NONE:
vid_data = {REST_QOS: rule}
else:
vid_data = {REST_VLANID: vid, REST_QOS: rule}
get_data.append(vid_data)
return REST_COMMAND_RESULT, get_data
@rest_command
def delete_qos(self, rest, vlan_id, waiters):
try:
if rest[REST_QOS_ID] == REST_ALL:
qos_id = REST_ALL
else:
qos_id = int(rest[REST_QOS_ID])
except:
raise ValueError('Invalid qos id.')
vlan_list = []
delete_list = []
msgs = self.ofctl.get_flow_stats(self.dp, waiters)
if str(self.dp.id) in msgs:
flow_stats = msgs[str(self.dp.id)]
for flow_stat in flow_stats:
cookie = flow_stat[REST_COOKIE]
ruleid = QoS._cookie_to_qosid(cookie)
priority = flow_stat[REST_PRIORITY]
dl_vlan = flow_stat[REST_MATCH].get(REST_DL_VLAN, VLANID_NONE)
if priority != DEFAULT_FLOW_PRIORITY:
if ((qos_id == REST_ALL or qos_id == ruleid) and
(vlan_id == dl_vlan or vlan_id == REST_ALL)):
match = Match.to_mod_openflow(flow_stat[REST_MATCH])
delete_list.append([cookie, priority, match])
else:
if dl_vlan not in vlan_list:
vlan_list.append(dl_vlan)
self._update_vlan_list(vlan_list)
if len(delete_list) == 0:
msg_details = 'QoS rule is not exist.'
if qos_id != REST_ALL:
msg_details += ' : QoS ID=%d' % qos_id
msg = {'result': 'failure',
'details': msg_details}
else:
cmd = self.dp.ofproto.OFPFC_DELETE_STRICT
actions = []
delete_ids = {}
for cookie, priority, match in delete_list:
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
vid = match.get(REST_DL_VLAN, VLANID_NONE)
rule_id = QoS._cookie_to_qosid(cookie)
delete_ids.setdefault(vid, '')
delete_ids[vid] += (('%d' if delete_ids[vid] == ''
else ',%d') % rule_id)
msg = []
for vid, rule_ids in delete_ids.items():
del_msg = {'result': 'success',
'details': ' deleted. : QoS ID=%s' % rule_ids}
if vid != VLANID_NONE:
del_msg.setdefault(REST_VLANID, vid)
msg.append(del_msg)
return REST_COMMAND_RESULT, msg
@rest_command
def set_meter(self, rest, vlan_id, waiters):
if self.version == ofproto_v1_0.OFP_VERSION:
raise ValueError('set_meter operation is not supported')
msgs = []
msg = self._set_meter(rest, waiters)
msgs.append(msg)
return REST_COMMAND_RESULT, msgs
def _set_meter(self, rest, waiters):
cmd = self.dp.ofproto.OFPMC_ADD
try:
self.ofctl.mod_meter_entry(self.dp, rest, cmd)
except:
raise ValueError('Invalid meter parameter.')
msg = {'result': 'success',
'details': 'Meter added. : Meter ID=%s' %
rest[REST_METER_ID]}
return msg
@rest_command
def get_meter(self, rest, vlan_id, waiters):
if (self.version == ofproto_v1_0.OFP_VERSION or
self.version == ofproto_v1_2.OFP_VERSION):
raise ValueError('get_meter operation is not supported')
msgs = self.ofctl.get_meter_stats(self.dp, waiters)
return REST_COMMAND_RESULT, msgs
@rest_command
def delete_meter(self, rest, vlan_id, waiters):
if (self.version == ofproto_v1_0.OFP_VERSION or
self.version == ofproto_v1_2.OFP_VERSION):
raise ValueError('delete_meter operation is not supported')
cmd = self.dp.ofproto.OFPMC_DELETE
try:
self.ofctl.mod_meter_entry(self.dp, rest, cmd)
except:
raise ValueError('Invalid meter parameter.')
msg = {'result': 'success',
'details': 'Meter deleted. : Meter ID=%s' %
rest[REST_METER_ID]}
return REST_COMMAND_RESULT, msg
def _to_of_flow(self, cookie, priority, match, actions):
flow = {'cookie': cookie,
'priority': priority,
'flags': 0,
'idle_timeout': 0,
'hard_timeout': 0,
'match': match,
'actions': actions}
return flow
def _to_rest_rule(self, flow):
ruleid = QoS._cookie_to_qosid(flow[REST_COOKIE])
rule = {REST_QOS_ID: ruleid}
rule.update({REST_PRIORITY: flow[REST_PRIORITY]})
rule.update(Match.to_rest(flow))
rule.update(Action.to_rest(flow))
return rule
class Match(object):
_CONVERT = {REST_DL_TYPE:
{REST_DL_TYPE_ARP: ether.ETH_TYPE_ARP,
REST_DL_TYPE_IPV4: ether.ETH_TYPE_IP,
REST_DL_TYPE_IPV6: ether.ETH_TYPE_IPV6},
REST_NW_PROTO:
{REST_NW_PROTO_TCP: inet.IPPROTO_TCP,
REST_NW_PROTO_UDP: inet.IPPROTO_UDP,
REST_NW_PROTO_ICMP: inet.IPPROTO_ICMP,
REST_NW_PROTO_ICMPV6: inet.IPPROTO_ICMPV6}}
@staticmethod
def to_openflow(rest):
def __inv_combi(msg):
raise ValueError('Invalid combination: [%s]' % msg)
def __inv_2and1(*args):
__inv_combi('%s=%s and %s' % (args[0], args[1], args[2]))
def __inv_2and2(*args):
__inv_combi('%s=%s and %s=%s' % (
args[0], args[1], args[2], args[3]))
def __inv_1and1(*args):
__inv_combi('%s and %s' % (args[0], args[1]))
def __inv_1and2(*args):
__inv_combi('%s and %s=%s' % (args[0], args[1], args[2]))
match = {}
# error check
dl_type = rest.get(REST_DL_TYPE)
nw_proto = rest.get(REST_NW_PROTO)
if dl_type is not None:
if dl_type == REST_DL_TYPE_ARP:
if REST_SRC_IPV6 in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_ARP, REST_SRC_IPV6)
if REST_DST_IPV6 in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_ARP, REST_DST_IPV6)
if REST_DSCP in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_ARP, REST_DSCP)
if nw_proto:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_ARP, REST_NW_PROTO)
elif dl_type == REST_DL_TYPE_IPV4:
if REST_SRC_IPV6 in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_IPV4, REST_SRC_IPV6)
if REST_DST_IPV6 in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_IPV4, REST_DST_IPV6)
if nw_proto == REST_NW_PROTO_ICMPV6:
__inv_2and2(
REST_DL_TYPE, REST_DL_TYPE_IPV4,
REST_NW_PROTO, REST_NW_PROTO_ICMPV6)
elif dl_type == REST_DL_TYPE_IPV6:
if REST_SRC_IP in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_IPV6, REST_SRC_IP)
if REST_DST_IP in rest:
__inv_2and1(
REST_DL_TYPE, REST_DL_TYPE_IPV6, REST_DST_IP)
if nw_proto == REST_NW_PROTO_ICMP:
__inv_2and2(
REST_DL_TYPE, REST_DL_TYPE_IPV6,
REST_NW_PROTO, REST_NW_PROTO_ICMP)
else:
raise ValueError('Unknown dl_type : %s' % dl_type)
else:
if REST_SRC_IP in rest:
if REST_SRC_IPV6 in rest:
__inv_1and1(REST_SRC_IP, REST_SRC_IPV6)
if REST_DST_IPV6 in rest:
__inv_1and1(REST_SRC_IP, REST_DST_IPV6)
if nw_proto == REST_NW_PROTO_ICMPV6:
__inv_1and2(
REST_SRC_IP, REST_NW_PROTO, REST_NW_PROTO_ICMPV6)
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV4
elif REST_DST_IP in rest:
if REST_SRC_IPV6 in rest:
__inv_1and1(REST_DST_IP, REST_SRC_IPV6)
if REST_DST_IPV6 in rest:
__inv_1and1(REST_DST_IP, REST_DST_IPV6)
if nw_proto == REST_NW_PROTO_ICMPV6:
__inv_1and2(
REST_DST_IP, REST_NW_PROTO, REST_NW_PROTO_ICMPV6)
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV4
elif REST_SRC_IPV6 in rest:
if nw_proto == REST_NW_PROTO_ICMP:
__inv_1and2(
REST_SRC_IPV6, REST_NW_PROTO, REST_NW_PROTO_ICMP)
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV6
elif REST_DST_IPV6 in rest:
if nw_proto == REST_NW_PROTO_ICMP:
__inv_1and2(
REST_DST_IPV6, REST_NW_PROTO, REST_NW_PROTO_ICMP)
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV6
elif REST_DSCP in rest:
# Apply dl_type ipv4, if doesn't specify dl_type
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV4
else:
if nw_proto == REST_NW_PROTO_ICMP:
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV4
elif nw_proto == REST_NW_PROTO_ICMPV6:
rest[REST_DL_TYPE] = REST_DL_TYPE_IPV6
elif nw_proto == REST_NW_PROTO_TCP or \
nw_proto == REST_NW_PROTO_UDP:
raise ValueError('no dl_type was specified')
else:
raise ValueError('Unknown nw_proto: %s' % nw_proto)
for key, value in rest.items():
if key in Match._CONVERT:
if value in Match._CONVERT[key]:
match.setdefault(key, Match._CONVERT[key][value])
else:
raise ValueError('Invalid rule parameter. : key=%s' % key)
else:
match.setdefault(key, value)
return match
@staticmethod
def to_rest(openflow):
of_match = openflow[REST_MATCH]
mac_dontcare = mac.haddr_to_str(mac.DONTCARE)
ip_dontcare = '0.0.0.0'
ipv6_dontcare = '::'
match = {}
for key, value in of_match.items():
if key == REST_SRC_MAC or key == REST_DST_MAC:
if value == mac_dontcare:
continue
elif key == REST_SRC_IP or key == REST_DST_IP:
if value == ip_dontcare:
continue
elif key == REST_SRC_IPV6 or key == REST_DST_IPV6:
if value == ipv6_dontcare:
continue
elif value == 0:
continue
if key in Match._CONVERT:
conv = Match._CONVERT[key]
conv = dict((value, key) for key, value in conv.items())
match.setdefault(key, conv[value])
else:
match.setdefault(key, value)
return match
@staticmethod
def to_mod_openflow(of_match):
mac_dontcare = mac.haddr_to_str(mac.DONTCARE)
ip_dontcare = '0.0.0.0'
ipv6_dontcare = '::'
match = {}
for key, value in of_match.items():
if key == REST_SRC_MAC or key == REST_DST_MAC:
if value == mac_dontcare:
continue
elif key == REST_SRC_IP or key == REST_DST_IP:
if value == ip_dontcare:
continue
elif key == REST_SRC_IPV6 or key == REST_DST_IPV6:
if value == ipv6_dontcare:
continue
elif value == 0:
continue
match.setdefault(key, value)
return match
class Action(object):
@staticmethod
def to_rest(openflow):
if REST_ACTION in openflow:
actions = []
for action in openflow[REST_ACTION]:
field_value = re.search('SET_FIELD: {ip_dscp:(\d+)', action)
if field_value:
actions.append({REST_ACTION_MARK: field_value.group(1)})
meter_value = re.search('METER:(\d+)', action)
if meter_value:
actions.append({REST_ACTION_METER: meter_value.group(1)})
queue_value = re.search('SET_QUEUE:(\d+)', action)
if queue_value:
actions.append({REST_ACTION_QUEUE: queue_value.group(1)})
action = {REST_ACTION: actions}
else:
action = {REST_ACTION: 'Unknown action type.'}
return action
|
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from cloudbaseinit import conf as cloudbaseinit_conf
from cloudbaseinit.plugins.common.userdataplugins import cloudconfig
from cloudbaseinit.tests import testutils
CONF = cloudbaseinit_conf.CONF
class CloudConfigPluginTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.plugin = cloudconfig.CloudConfigPlugin()
def test_priority(self):
orig = CONF.cloud_config_plugins
CONF.cloud_config_plugins = ['write_file', 'dummy', 'dummy1']
expected = [
('write_file', 0),
('dummy', 1),
('dummy1', 2),
('invalid', 3),
]
try:
executor = cloudconfig.CloudConfigPluginExecutor(
dummy=1,
dummy1=2,
invalid=3,
write_file=0)
self.assertEqual(expected, executor._expected_plugins)
finally:
CONF.cloud_config_plugins = orig
def test_executor_from_yaml(self):
for invalid in (mock.sentinel.yaml, None, 1, int, '{}'):
with self.assertRaises(cloudconfig.CloudConfigError):
cloudconfig.CloudConfigPluginExecutor.from_yaml(invalid)
executor = cloudconfig.CloudConfigPluginExecutor.from_yaml('{f: 1}')
self.assertIsInstance(executor, cloudconfig.CloudConfigPluginExecutor)
def _test_invalid_type(self, part, err_msg):
with testutils.LogSnatcher('cloudbaseinit.plugins.common.'
'userdataplugins.cloudconfig') as snatcher:
self.plugin.process_non_multipart(part)
expected = ("Could not process part type %(type)r: %(err)r"
% {'type': type(part), 'err': err_msg})
self.assertEqual([expected], snatcher.output)
def test_invalid_type(self):
self._test_invalid_type({'unsupported'},
"Invalid yaml stream provided.")
def test_invalid_type_empty(self):
self._test_invalid_type('#comment',
'Empty yaml stream provided.')
|
import mock
from oslo.config import cfg
from neutron.common import constants as n_const
import neutron.db.api as ndb
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers.arista import db
from neutron.plugins.ml2.drivers.arista import exceptions as arista_exc
from neutron.plugins.ml2.drivers.arista import mechanism_arista as arista
from neutron.tests import base
def setup_arista_wrapper_config(value=''):
cfg.CONF.keystone_authtoken = fake_keystone_info_class()
cfg.CONF.set_override('eapi_host', value, "ml2_arista")
cfg.CONF.set_override('eapi_username', value, "ml2_arista")
def setup_valid_config():
# Config is not valid if value is not set
setup_arista_wrapper_config('value')
class AristaProvisionedVlansStorageTestCase(base.BaseTestCase):
"""Test storing and retriving functionality of Arista mechanism driver.
Tests all methods of this class by invoking them separately as well
as a group.
"""
def setUp(self):
super(AristaProvisionedVlansStorageTestCase, self).setUp()
ndb.configure_db()
self.addCleanup(ndb.clear_db)
def test_tenant_is_remembered(self):
tenant_id = 'test'
db.remember_tenant(tenant_id)
net_provisioned = db.is_tenant_provisioned(tenant_id)
self.assertTrue(net_provisioned, 'Tenant must be provisioned')
def test_tenant_is_removed(self):
tenant_id = 'test'
db.remember_tenant(tenant_id)
db.forget_tenant(tenant_id)
net_provisioned = db.is_tenant_provisioned(tenant_id)
self.assertFalse(net_provisioned, 'The Tenant should be deleted')
def test_network_is_remembered(self):
tenant_id = 'test'
network_id = '123'
segmentation_id = 456
db.remember_network(tenant_id, network_id, segmentation_id)
net_provisioned = db.is_network_provisioned(tenant_id,
network_id)
self.assertTrue(net_provisioned, 'Network must be provisioned')
def test_network_is_removed(self):
tenant_id = 'test'
network_id = '123'
db.remember_network(tenant_id, network_id, '123')
db.forget_network(tenant_id, network_id)
net_provisioned = db.is_network_provisioned(tenant_id, network_id)
self.assertFalse(net_provisioned, 'The network should be deleted')
def test_vm_is_remembered(self):
vm_id = 'VM-1'
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id)
self.assertTrue(vm_provisioned, 'VM must be provisioned')
def test_vm_is_removed(self):
vm_id = 'VM-1'
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
db.remember_vm(vm_id, host_id, port_id, network_id, tenant_id)
db.forget_vm(vm_id, host_id, port_id, network_id, tenant_id)
vm_provisioned = db.is_vm_provisioned(vm_id, host_id, port_id,
network_id, tenant_id)
self.assertFalse(vm_provisioned, 'The vm should be deleted')
def test_remembers_multiple_networks(self):
tenant_id = 'test'
expected_num_nets = 100
nets = ['id%s' % n for n in range(expected_num_nets)]
for net_id in nets:
db.remember_network(tenant_id, net_id, 123)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
def test_removes_all_networks(self):
tenant_id = 'test'
num_nets = 100
old_nets = db.num_nets_provisioned(tenant_id)
nets = ['id_%s' % n for n in range(num_nets)]
for net_id in nets:
db.remember_network(tenant_id, net_id, 123)
for net_id in nets:
db.forget_network(tenant_id, net_id)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
expected = old_nets
self.assertEqual(expected, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected, num_nets_provisioned))
def test_remembers_multiple_tenants(self):
expected_num_tenants = 100
tenants = ['id%s' % n for n in range(expected_num_tenants)]
for tenant_id in tenants:
db.remember_tenant(tenant_id)
num_tenants_provisioned = db.num_provisioned_tenants()
self.assertEqual(expected_num_tenants, num_tenants_provisioned,
'There should be %d tenants, not %d' %
(expected_num_tenants, num_tenants_provisioned))
def test_removes_multiple_tenants(self):
num_tenants = 100
tenants = ['id%s' % n for n in range(num_tenants)]
for tenant_id in tenants:
db.remember_tenant(tenant_id)
for tenant_id in tenants:
db.forget_tenant(tenant_id)
num_tenants_provisioned = db.num_provisioned_tenants()
expected = 0
self.assertEqual(expected, num_tenants_provisioned,
'There should be %d tenants, not %d' %
(expected, num_tenants_provisioned))
def test_num_vm_is_valid(self):
tenant_id = 'test'
network_id = '123'
port_id = 456
host_id = 'ubuntu1'
vm_to_remember = ['vm1', 'vm2', 'vm3']
vm_to_forget = ['vm2', 'vm1']
for vm in vm_to_remember:
db.remember_vm(vm, host_id, port_id, network_id, tenant_id)
for vm in vm_to_forget:
db.forget_vm(vm, host_id, port_id, network_id, tenant_id)
num_vms = len(db.get_vms(tenant_id))
expected = len(vm_to_remember) - len(vm_to_forget)
self.assertEqual(expected, num_vms,
'There should be %d records, '
'got %d records' % (expected, num_vms))
# clean up afterwards
db.forget_vm('vm3', host_id, port_id, network_id, tenant_id)
def test_get_network_list_returns_eos_compatible_data(self):
tenant = u'test-1'
segm_type = 'vlan'
network_id = u'123'
network2_id = u'1234'
vlan_id = 123
vlan2_id = 1234
expected_eos_net_list = {network_id: {u'networkId': network_id,
u'segmentationTypeId': vlan_id,
u'segmentationType': segm_type},
network2_id: {u'networkId': network2_id,
u'segmentationTypeId': vlan2_id,
u'segmentationType': segm_type}}
db.remember_network(tenant, network_id, vlan_id)
db.remember_network(tenant, network2_id, vlan2_id)
net_list = db.get_networks(tenant)
self.assertNotEqual(net_list != expected_eos_net_list, ('%s != %s' %
(net_list, expected_eos_net_list)))
class PositiveRPCWrapperValidConfigTestCase(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista Driver and EOS
"""
def setUp(self):
super(PositiveRPCWrapperValidConfigTestCase, self).setUp()
setup_valid_config()
self.drv = arista.AristaRPCWrapper()
self.region = 'RegionOne'
self.drv._server = mock.MagicMock()
def _get_exit_mode_cmds(self, modes):
return ['exit'] * len(modes)
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_plug_host_into_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
port_name = '123-port'
self.drv.plug_host_into_network(vm_id, host, port_id,
network_id, tenant_id, port_name)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'port id 123 name "123-port" network-id net-id',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_plug_dhcp_port_into_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
port_name = '123-port'
self.drv.plug_dhcp_port_into_network(vm_id, host, port_id,
network_id, tenant_id, port_name)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id',
'dhcp id vm-1 hostid host port-id 123 name "123-port"',
'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_unplug_host_from_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
self.drv.unplug_host_from_network(vm_id, host, port_id,
network_id, tenant_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'vm id vm-1 hostid host',
'no port id 123',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_unplug_dhcp_port_from_network(self):
tenant_id = 'ten-1'
vm_id = 'vm-1'
port_id = 123
network_id = 'net-id'
host = 'host'
self.drv.unplug_dhcp_port_from_network(vm_id, host, port_id,
network_id, tenant_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id',
'no dhcp id vm-1 port-id 123',
'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_network(self):
tenant_id = 'ten-1'
network = {
'network_id': 'net-id',
'network_name': 'net-name',
'segmentation_id': 123}
self.drv.create_network(tenant_id, network)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'network id net-id name "net-name"',
'segment 1 type vlan id 123',
'exit', 'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_network_bulk(self):
tenant_id = 'ten-2'
num_networks = 10
networks = [{
'network_id': 'net-id-%d' % net_id,
'network_name': 'net-name-%d' % net_id,
'segmentation_id': net_id} for net_id in range(1, num_networks)
]
self.drv.create_network_bulk(tenant_id, networks)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for net_id in range(1, num_networks):
cmds.append('network id net-id-%d name "net-name-%d"' %
(net_id, net_id))
cmds.append('segment 1 type vlan id %d' % net_id)
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure', 'enable']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_network(self):
tenant_id = 'ten-1'
network_id = 'net-id'
self.drv.delete_network(tenant_id, network_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'no network id net-id',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_network_bulk(self):
tenant_id = 'ten-2'
num_networks = 10
networks = [{
'network_id': 'net-id-%d' % net_id,
'network_name': 'net-name-%d' % net_id,
'segmentation_id': net_id} for net_id in range(1, num_networks)
]
networks = ['net-id-%d' % net_id for net_id in range(1, num_networks)]
self.drv.delete_network_bulk(tenant_id, networks)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for net_id in range(1, num_networks):
cmds.append('no network id net-id-%d' % net_id)
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_vm(self):
tenant_id = 'ten-1'
vm_id = 'vm-id'
self.drv.delete_vm(tenant_id, vm_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne',
'tenant ten-1', 'no vm id vm-id',
'exit', 'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_vm_bulk(self):
tenant_id = 'ten-2'
num_vms = 10
vm_ids = ['vm-id-%d' % vm_id for vm_id in range(1, num_vms)]
self.drv.delete_vm_bulk(tenant_id, vm_ids)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-2']
for vm_id in range(1, num_vms):
cmds.append('no vm id vm-id-%d' % vm_id)
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region', 'openstack',
'cvx', 'configure']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_create_vm_port_bulk(self):
tenant_id = 'ten-3'
num_vms = 10
num_ports_per_vm = 2
vms = dict(
('vm-id-%d' % vm_id, {
'vmId': 'vm-id-%d' % vm_id,
'host': 'host_%d' % vm_id,
}
) for vm_id in range(1, num_vms)
)
devices = [n_const.DEVICE_OWNER_DHCP, 'compute']
vm_port_list = []
net_count = 1
for vm_id in range(1, num_vms):
for port_id in range(1, num_ports_per_vm):
port = {
'id': 'port-id-%d-%d' % (vm_id, port_id),
'device_id': 'vm-id-%d' % vm_id,
'device_owner': devices[(vm_id + port_id) % 2],
'network_id': 'network-id-%d' % net_count,
'name': 'port-%d-%d' % (vm_id, port_id)
}
vm_port_list.append(port)
net_count += 1
self.drv.create_vm_port_bulk(tenant_id, vm_port_list, vms)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne',
'tenant ten-3']
net_count = 1
for vm_count in range(1, num_vms):
host = 'host_%s' % vm_count
for port_count in range(1, num_ports_per_vm):
vm_id = 'vm-id-%d' % vm_count
device_owner = devices[(vm_count + port_count) % 2]
port_name = '"port-%d-%d"' % (vm_count, port_count)
network_id = 'network-id-%d' % net_count
port_id = 'port-id-%d-%d' % (vm_count, port_count)
if device_owner == 'network:dhcp':
cmds.append('network id %s' % network_id)
cmds.append('dhcp id %s hostid %s port-id %s name %s' % (
vm_id, host, port_id, port_name))
elif device_owner == 'compute':
cmds.append('vm id %s hostid %s' % (vm_id, host))
cmds.append('port id %s name %s network-id %s' % (
port_id, port_name, network_id))
net_count += 1
cmds.extend(self._get_exit_mode_cmds(['tenant', 'region',
'openstack', 'cvx']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_tenant(self):
tenant_id = 'ten-1'
self.drv.delete_tenant(tenant_id)
cmds = ['enable', 'configure', 'cvx', 'service openstack',
'region RegionOne', 'no tenant ten-1',
'exit', 'exit', 'exit', 'exit']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_delete_tenant_bulk(self):
num_tenants = 10
tenant_list = ['ten-%d' % t_id for t_id in range(1, num_tenants)]
self.drv.delete_tenant_bulk(tenant_list)
cmds = ['enable',
'configure',
'cvx',
'service openstack',
'region RegionOne']
for ten_id in range(1, num_tenants):
cmds.append('no tenant ten-%d' % ten_id)
cmds.extend(self._get_exit_mode_cmds(['region', 'openstack',
'cvx', 'configure']))
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
def test_get_network_info_returns_none_when_no_such_net(self):
expected = []
self.drv.get_tenants = mock.MagicMock()
self.drv.get_tenants.return_value = []
net_info = self.drv.get_tenants()
self.drv.get_tenants.assert_called_once_with()
self.assertEqual(net_info, expected, ('Network info must be "None"'
'for unknown network'))
def test_get_network_info_returns_info_for_available_net(self):
valid_network_id = '12345'
valid_net_info = {'network_id': valid_network_id,
'some_info': 'net info'}
known_nets = valid_net_info
self.drv.get_tenants = mock.MagicMock()
self.drv.get_tenants.return_value = known_nets
net_info = self.drv.get_tenants()
self.assertEqual(net_info, valid_net_info,
('Must return network info for a valid net'))
def test_check_cli_commands(self):
self.drv.check_cli_commands()
cmds = ['show openstack config region RegionOne timestamp']
self.drv._server.runCmds.assert_called_once_with(version=1, cmds=cmds)
class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase):
"""Negative test cases to test the Arista Driver configuration."""
def setUp(self):
super(AristaRPCWrapperInvalidConfigTestCase, self).setUp()
self.setup_invalid_config() # Invalid config, required options not set
def setup_invalid_config(self):
setup_arista_wrapper_config('')
def test_raises_exception_on_wrong_configuration(self):
self.assertRaises(arista_exc.AristaConfigError,
arista.AristaRPCWrapper)
class NegativeRPCWrapperTestCase(base.BaseTestCase):
"""Negative test cases to test the RPC between Arista Driver and EOS."""
def setUp(self):
super(NegativeRPCWrapperTestCase, self).setUp()
setup_valid_config()
def test_exception_is_raised_on_json_server_error(self):
drv = arista.AristaRPCWrapper()
drv._server = mock.MagicMock()
drv._server.runCmds.side_effect = Exception('server error')
self.assertRaises(arista_exc.AristaRpcError, drv.get_tenants)
class RealNetStorageAristaDriverTestCase(base.BaseTestCase):
"""Main test cases for Arista Mechanism driver.
Tests all mechanism driver APIs supported by Arista Driver. It invokes
all the APIs as they would be invoked in real world scenarios and
verifies the functionality.
"""
def setUp(self):
super(RealNetStorageAristaDriverTestCase, self).setUp()
self.fake_rpc = mock.MagicMock()
ndb.configure_db()
self.drv = arista.AristaDriver(self.fake_rpc)
def tearDown(self):
super(RealNetStorageAristaDriverTestCase, self).tearDown()
self.drv.stop_synchronization_thread()
def test_create_and_delete_network(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
net_provisioned = db.is_network_provisioned(tenant_id, network_id)
self.assertTrue(net_provisioned, 'The network should be created')
expected_num_nets = 1
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
#Now test the delete network
self.drv.delete_network_precommit(network_context)
net_provisioned = db.is_network_provisioned(tenant_id, network_id)
self.assertFalse(net_provisioned, 'The network should be created')
expected_num_nets = 0
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
def test_create_and_delete_multiple_networks(self):
tenant_id = 'ten-1'
expected_num_nets = 100
segmentation_id = 1001
nets = ['id%s' % n for n in range(expected_num_nets)]
for net_id in nets:
network_context = self._get_network_context(tenant_id,
net_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
#now test the delete networks
for net_id in nets:
network_context = self._get_network_context(tenant_id,
net_id,
segmentation_id)
self.drv.delete_network_precommit(network_context)
num_nets_provisioned = db.num_nets_provisioned(tenant_id)
expected_num_nets = 0
self.assertEqual(expected_num_nets, num_nets_provisioned,
'There should be %d nets, not %d' %
(expected_num_nets, num_nets_provisioned))
def test_create_and_delete_ports(self):
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vms = ['vm1', 'vm2', 'vm3']
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id)
self.drv.create_network_precommit(network_context)
for vm_id in vms:
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
self.drv.create_port_precommit(port_context)
vm_list = db.get_vms(tenant_id)
provisioned_vms = len(vm_list)
expected_vms = len(vms)
self.assertEqual(expected_vms, provisioned_vms,
'There should be %d '
'hosts, not %d' % (expected_vms, provisioned_vms))
# Now test the delete ports
for vm_id in vms:
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
self.drv.delete_port_precommit(port_context)
vm_list = db.get_vms(tenant_id)
provisioned_vms = len(vm_list)
expected_vms = 0
self.assertEqual(expected_vms, provisioned_vms,
'There should be %d '
'VMs, not %d' % (expected_vms, provisioned_vms))
def _get_network_context(self, tenant_id, net_id, seg_id):
network = {'id': net_id,
'tenant_id': tenant_id}
network_segments = [{'segmentation_id': seg_id}]
return FakeNetworkContext(network, network_segments, network)
def _get_port_context(self, tenant_id, net_id, vm_id, network):
port = {'device_id': vm_id,
'device_owner': 'compute',
'binding:host_id': 'ubuntu1',
'tenant_id': tenant_id,
'id': 101,
'network_id': net_id
}
return FakePortContext(port, port, network)
class fake_keystone_info_class(object):
"""To generate fake Keystone Authentification token information
Arista Driver expects Keystone auth info. This fake information
is for testing only
"""
auth_protocol = 'abc'
auth_host = 'host'
auth_port = 5000
admin_user = 'neutron'
admin_password = 'fun'
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments=None, original_network=None):
self._network = network
self._original_network = original_network
self._segments = segments
@property
def current(self):
return self._network
@property
def original(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, original_port, network):
self._port = port
self._original_port = original_port
self._network_context = network
@property
def current(self):
return self._port
@property
def original(self):
return self._original_port
@property
def network(self):
return self._network_context
@property
def host(self):
return self._port.get(portbindings.HOST_ID)
@property
def original_host(self):
return self._original_port.get(portbindings.HOST_ID)
|
"""Unit tests to cover Logger."""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import logging
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.dfa.v1_19 import client
from tests.adspygoogle.dfa.v1_19 import HTTP_PROXY
from tests.adspygoogle.dfa.v1_19 import SERVER_V1_19
from tests.adspygoogle.dfa.v1_19 import VERSION_V1_19
class DfaLoggerTestV1_19(unittest.TestCase):
"""Unittest suite for Logger using v1_19."""
SERVER = SERVER_V1_19
VERSION = VERSION_V1_19
TMP_LOG = os.path.join('..', '..', '..', '..', 'logs', 'logger_unittest.log')
DEBUG_MSG1 = 'Message before call to an API method.'
DEBUG_MSG2 = 'Message after call to an API method.'
client.debug = False
def setUp(self):
"""Prepare unittest."""
print self.id()
def testUpperStackLogging(self):
"""Tests whether we can define logger at client level and log before and
after the API request is made.
"""
logger = logging.getLogger(self.__class__.__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.__class__.TMP_LOG)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# Clean up temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
logger.debug(self.__class__.DEBUG_MSG1)
advertiser_service = client.GetAdvertiserService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
advertiser_service.GetAdvertisers({})
logger.debug(self.__class__.DEBUG_MSG2)
data = Utils.ReadFile(self.__class__.TMP_LOG)
self.assertEqual(data.find(self.__class__.DEBUG_MSG1), 0)
self.assertEqual(data.find(self.__class__.DEBUG_MSG2),
len(self.__class__.DEBUG_MSG1) + 1)
# Clean up and remove temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
os.remove(self.__class__.TMP_LOG)
if __name__ == '__main__':
unittest.main()
|
import copy
from tempest import auth
from tempest import exceptions
from tempest.services.identity.v2.json import token_client as v2_client
from tempest.services.identity.v3.json import token_client as v3_client
from tempest.tests import base
from tempest.tests import fake_identity
class CredentialsTests(base.TestCase):
attributes = {}
credentials_class = auth.Credentials
def _get_credentials(self, attributes=None):
if attributes is None:
attributes = self.attributes
return self.credentials_class(**attributes)
def _check(self, credentials, credentials_class, filled):
# Check the right version of credentials has been returned
self.assertIsInstance(credentials, credentials_class)
# Check the id attributes are filled in
attributes = [x for x in credentials.ATTRIBUTES if (
'_id' in x and x != 'domain_id')]
for attr in attributes:
if filled:
self.assertIsNotNone(getattr(credentials, attr))
else:
self.assertIsNone(getattr(credentials, attr))
def test_create(self):
creds = self._get_credentials()
self.assertEqual(self.attributes, creds._initial)
def test_create_invalid_attr(self):
self.assertRaises(exceptions.InvalidCredentials,
self._get_credentials,
attributes=dict(invalid='fake'))
def test_is_valid(self):
creds = self._get_credentials()
self.assertRaises(NotImplementedError, creds.is_valid)
class KeystoneV2CredentialsTests(CredentialsTests):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'tenant_name': 'fake_tenant_name'
}
identity_response = fake_identity._fake_v2_response
credentials_class = auth.KeystoneV2Credentials
tokenclient_class = v2_client.TokenClientJSON
identity_version = 'v2'
def setUp(self):
super(KeystoneV2CredentialsTests, self).setUp()
self.stubs.Set(self.tokenclient_class, 'raw_request',
self.identity_response)
def _verify_credentials(self, credentials_class, creds_dict, filled=True):
creds = auth.get_credentials(fake_identity.FAKE_AUTH_URL,
fill_in=filled,
identity_version=self.identity_version,
**creds_dict)
self._check(creds, credentials_class, filled)
def test_get_credentials(self):
self._verify_credentials(credentials_class=self.credentials_class,
creds_dict=self.attributes)
def test_get_credentials_not_filled(self):
self._verify_credentials(credentials_class=self.credentials_class,
creds_dict=self.attributes,
filled=False)
def test_is_valid(self):
creds = self._get_credentials()
self.assertTrue(creds.is_valid())
def _test_is_not_valid(self, ignore_key):
creds = self._get_credentials()
for attr in self.attributes.keys():
if attr == ignore_key:
continue
temp_attr = getattr(creds, attr)
delattr(creds, attr)
self.assertFalse(creds.is_valid(),
"Credentials should be invalid without %s" % attr)
setattr(creds, attr, temp_attr)
def test_is_not_valid(self):
# NOTE(mtreinish): A KeystoneV2 credential object is valid without
# a tenant_name. So skip that check. See tempest.auth for the valid
# credential requirements
self._test_is_not_valid('tenant_name')
def test_reset_all_attributes(self):
creds = self._get_credentials()
initial_creds = copy.deepcopy(creds)
set_attr = creds.__dict__.keys()
missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
# Set all unset attributes, then reset
for attr in missing_attr:
setattr(creds, attr, 'fake' + attr)
creds.reset()
# Check reset credentials are same as initial ones
self.assertEqual(creds, initial_creds)
def test_reset_single_attribute(self):
creds = self._get_credentials()
initial_creds = copy.deepcopy(creds)
set_attr = creds.__dict__.keys()
missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
# Set one unset attributes, then reset
for attr in missing_attr:
setattr(creds, attr, 'fake' + attr)
creds.reset()
# Check reset credentials are same as initial ones
self.assertEqual(creds, initial_creds)
class KeystoneV3CredentialsTests(KeystoneV2CredentialsTests):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'project_name': 'fake_project_name',
'user_domain_name': 'fake_domain_name'
}
credentials_class = auth.KeystoneV3Credentials
identity_response = fake_identity._fake_v3_response
tokenclient_class = v3_client.V3TokenClientJSON
identity_version = 'v3'
def test_is_not_valid(self):
# NOTE(mtreinish) For a Keystone V3 credential object a project name
# is not required to be valid, so we skip that check. See tempest.auth
# for the valid credential requirements
self._test_is_not_valid('project_name')
def test_synced_attributes(self):
attributes = self.attributes
# Create V3 credentials with tenant instead of project, and user_domain
for attr in ['project_id', 'user_domain_id']:
attributes[attr] = 'fake_' + attr
creds = self._get_credentials(attributes)
self.assertEqual(creds.project_name, creds.tenant_name)
self.assertEqual(creds.project_id, creds.tenant_id)
self.assertEqual(creds.user_domain_name, creds.project_domain_name)
self.assertEqual(creds.user_domain_id, creds.project_domain_id)
# Replace user_domain with project_domain
del attributes['user_domain_name']
del attributes['user_domain_id']
del attributes['project_name']
del attributes['project_id']
for attr in ['project_domain_name', 'project_domain_id',
'tenant_name', 'tenant_id']:
attributes[attr] = 'fake_' + attr
self.assertEqual(creds.tenant_name, creds.project_name)
self.assertEqual(creds.tenant_id, creds.project_id)
self.assertEqual(creds.project_domain_name, creds.user_domain_name)
self.assertEqual(creds.project_domain_id, creds.user_domain_id)
|
from oslo_config import cfg
from nova import db
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ExtendedVolumesSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-extended-volumes"
def _get_flags(self):
f = super(ExtendedVolumesSampleJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_volumes.'
'Extended_volumes')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.keypairs.'
'Keypairs')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips_mac.'
'Extended_ips_mac')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.extended_ips.'
'Extended_ips')
return f
def test_show(self):
uuid = self._post_server()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance_uuids',
fakes.stub_bdm_get_all_by_instance_uuids)
response = self._do_get('servers/%s' % uuid)
subs = self._get_regexes()
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('server-get-resp', subs, response, 200)
def test_detail(self):
uuid = self._post_server()
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance_uuids',
fakes.stub_bdm_get_all_by_instance_uuids)
response = self._do_get('servers/detail')
subs = self._get_regexes()
subs['id'] = uuid
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
self._verify_response('servers-detail-resp', subs, response, 200)
|
"""Tests for the image export front-end."""
import os
import shutil
import tempfile
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.frontend import image_export
from tests.frontend import test_lib
class DateTimeFileEntryFilter(test_lib.FrontendTestCase):
"""Tests for the date time file entry filter."""
def testAddDateTimeRange(self):
"""Tests the AddDateTimeRange function."""
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'ctime', start_time_string=u'2012-05-25 15:59:20',
end_time_string=u'2012-05-25 15:59:25')
# Testing adding a badly formatter filter.
with self.assertRaises(ValueError):
date_filter.AddDateTimeRange(
u'foobar', start_time_string=u'2012-02-01 01:01:01')
# Testing adding a badly formatter filter, no date set.
with self.assertRaises(ValueError):
date_filter.AddDateTimeRange(u'atime')
def testMatches(self):
"""Tests the Matches function."""
test_path = self._GetTestFilePath([u'ímynd.dd'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
tsk_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=16,
location=u'/a_directory/another_file', parent=os_path_spec)
file_entry = path_spec_resolver.Resolver.OpenFileEntry(tsk_path_spec)
# Timestamps of file:
# Modified: 2012-05-25T15:59:23+00:00
# Accessed: 2012-05-25T15:59:23+00:00
# Created: 2012-05-25T15:59:23+00:00
date_filter = image_export.DateTimeFileEntryFilter()
# When no date time ranges are specified the filter returns None.
self.assertEqual(date_filter.Matches(file_entry), None)
# Add a date to the date filter.
date_filter.AddDateTimeRange(
u'ctime', start_time_string=u'2012-05-25 15:59:20',
end_time_string=u'2012-05-25 15:59:25')
self.assertTrue(date_filter.Matches(file_entry))
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'ctime', start_time_string=u'2012-05-25 15:59:24',
end_time_string=u'2012-05-25 15:59:55')
self.assertFalse(date_filter.Matches(file_entry))
# Testing a timestamp that does not exist in the stat object.
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'bkup', start_time_string=u'2012-02-02 12:12:12')
self.assertTrue(date_filter.Matches(file_entry))
# Just end date set.
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'mtime', end_time_string=u'2012-05-25 15:59:55')
self.assertTrue(date_filter.Matches(file_entry))
# Just with a start date but within range.
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'atime', start_time_string=u'2012-03-25 15:59:55')
self.assertTrue(date_filter.Matches(file_entry))
# And now with a start date, but out of range.
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'ctime', start_time_string=u'2012-05-25 15:59:55')
self.assertFalse(date_filter.Matches(file_entry))
# Test with more than one date filter.
date_filter = image_export.DateTimeFileEntryFilter()
date_filter.AddDateTimeRange(
u'ctime', start_time_string=u'2012-05-25 15:59:55',
end_time_string=u'2012-05-25 17:34:12')
date_filter.AddDateTimeRange(
u'atime', start_time_string=u'2012-05-25 15:59:20',
end_time_string=u'2012-05-25 15:59:25')
date_filter.AddDateTimeRange(
u'mtime', start_time_string=u'2012-05-25 15:59:24',
end_time_string=u'2012-05-25 15:59:55')
self.assertFalse(date_filter.Matches(file_entry))
# pylint: disable=protected-access
self.assertEqual(len(date_filter._date_time_ranges), 3)
class ImageExportFrontendTest(test_lib.FrontendTestCase):
"""Tests for the image export front-end."""
def _RecursiveList(self, path):
"""Recursively lists a file or directory.
Args:
path: the path of the file or directory to list.
Returns:
A list of files and sub directories within the path.
"""
results = []
for sub_path, _, files in os.walk(path):
if sub_path != path:
results.append(sub_path)
for file_entry in files:
results.append(os.path.join(sub_path, file_entry))
return results
def setUp(self):
"""Sets up the objects used by an individual test."""
self._temp_directory = tempfile.mkdtemp()
# TODO: do not use a class attribute here.
# We need to flush the MD5 dict in FileSaver before each test.
image_export.FileSaver.md5_dict = {}
def tearDown(self):
"""Cleans up the objects used an individual test."""
shutil.rmtree(self._temp_directory, True)
self._temp_directory = None
def _GetTestScanNode(self, scan_context):
"""Retrieves the scan node for testing.
Retrieves the first scan node, from the root upwards, with more or less
than 1 sub node.
Args:
scan_context: scan context (instance of dfvfs.ScanContext).
Returns:
A scan node (instance of dfvfs.ScanNode).
"""
scan_node = scan_context.GetRootScanNode()
while len(scan_node.sub_nodes) == 1:
scan_node = scan_node.sub_nodes[0]
return scan_node
def testProcessSourcesExtractWithDateTimeFilter(self):
"""Tests the ProcessSources function with a date time filter."""
test_front_end = image_export.ImageExportFrontend()
test_front_end.ParseDateFilters([
u'ctime, 2012-05-25 15:59:00, 2012-05-25 15:59:20'])
# TODO: move to corresponding CLI test.
output_writer = test_lib.StringIOOutputWriter()
test_front_end.PrintFilterCollection(output_writer)
expected_value = (
u'Filters:\n'
u'\tctime between 2012-05-25T15:59:00+00:00 and '
u'2012-05-25T15:59:20+00:00\n')
value = output_writer.GetValue()
self.assertEqual(value, expected_value)
test_path = self._GetTestFilePath([u'image.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location=u'/',
parent=qcow_path_spec)
test_front_end.ProcessSources([path_spec], self._temp_directory)
expected_extracted_files = sorted([
os.path.join(self._temp_directory, u'a_directory'),
os.path.join(self._temp_directory, u'a_directory', u'a_file')])
extracted_files = self._RecursiveList(self._temp_directory)
self.assertEqual(sorted(extracted_files), expected_extracted_files)
def testProcessSourcesExtractWithExtensionsFilter(self):
"""Tests the ProcessSources function with an extensions filter."""
test_front_end = image_export.ImageExportFrontend()
test_front_end.ParseExtensionsString(u'txt')
test_path = self._GetTestFilePath([u'image.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location=u'/',
parent=qcow_path_spec)
test_front_end.ProcessSources([path_spec], self._temp_directory)
expected_extracted_files = sorted([
os.path.join(self._temp_directory, u'passwords.txt')])
extracted_files = self._RecursiveList(self._temp_directory)
self.assertEqual(sorted(extracted_files), expected_extracted_files)
def testProcessSourcesExtractWithNamesFilter(self):
"""Tests the ProcessSources function with a names filter."""
test_front_end = image_export.ImageExportFrontend()
test_front_end.ParseNamesString(u'another_file')
test_path = self._GetTestFilePath([u'image.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location=u'/',
parent=qcow_path_spec)
test_front_end.ProcessSources([path_spec], self._temp_directory)
expected_extracted_files = sorted([
os.path.join(self._temp_directory, u'a_directory'),
os.path.join(self._temp_directory, u'a_directory', u'another_file')])
extracted_files = self._RecursiveList(self._temp_directory)
self.assertEqual(sorted(extracted_files), expected_extracted_files)
def testProcessSourcesExtractWithFilter(self):
"""Tests the ProcessSources function with a filter file."""
test_front_end = image_export.ImageExportFrontend()
filter_file = os.path.join(self._temp_directory, u'filter.txt')
with open(filter_file, 'wb') as file_object:
file_object.write(b'/a_directory/.+_file\n')
test_path = self._GetTestFilePath([u'image.qcow2'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
qcow_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_QCOW, parent=os_path_spec)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location=u'/',
parent=qcow_path_spec)
test_front_end.ProcessSources(
[path_spec], self._temp_directory, filter_file=filter_file)
expected_extracted_files = sorted([
os.path.join(self._temp_directory, u'filter.txt'),
os.path.join(self._temp_directory, u'a_directory'),
os.path.join(self._temp_directory, u'a_directory', u'another_file'),
os.path.join(self._temp_directory, u'a_directory', u'a_file')])
extracted_files = self._RecursiveList(self._temp_directory)
self.assertEqual(sorted(extracted_files), expected_extracted_files)
def testProcessSourcesExtractWithSignaturesFilter(self):
"""Tests the ProcessSources function with a signatures filter."""
test_front_end = image_export.ImageExportFrontend()
test_front_end.ParseSignatureIdentifiers(self._DATA_PATH, u'gzip')
test_path = self._GetTestFilePath([u'syslog_image.dd'])
os_path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location=test_path)
path_spec = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, location=u'/',
parent=os_path_spec)
test_front_end.ProcessSources([path_spec], self._temp_directory)
expected_extracted_files = sorted([
os.path.join(self._temp_directory, u'logs'),
os.path.join(self._temp_directory, u'logs', u'sys.tgz')])
extracted_files = self._RecursiveList(self._temp_directory)
self.assertEqual(sorted(extracted_files), expected_extracted_files)
# TODO: add test with remove duplicates disabled.
if __name__ == '__main__':
unittest.main()
|
from django.core.urlresolvers import reverse
from django import forms
from django import http
from django import shortcuts
from django.template import defaultfilters
from mox import IsA # noqa
from horizon import tables
from horizon.tables import formset as table_formset
from horizon.tables import views as table_views
from horizon.test import helpers as test
class FakeObject(object):
def __init__(self, id, name, value, status, optional=None, excluded=None):
self.id = id
self.name = name
self.value = value
self.status = status
self.optional = optional
self.excluded = excluded
self.extra = "extra"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
TEST_DATA = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
FakeObject('2', 'object_2', '<strong>evil</strong>', 'down', 'optional_2'),
FakeObject('3', 'object_3', 'value_3', 'up'),
)
TEST_DATA_2 = (
FakeObject('1', 'object_1', 'value_1', 'down', 'optional_1', 'excluded_1'),
)
TEST_DATA_3 = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
)
TEST_DATA_4 = (
FakeObject('1', 'object_1', 2, 'up'),
FakeObject('2', 'object_2', 4, 'up'),
)
TEST_DATA_5 = (
FakeObject('1', 'object_1', 'value_1',
'A Status that is longer than 35 characters!', 'optional_1'),
)
TEST_DATA_6 = (
FakeObject('1', 'object_1', 'DELETED', 'down'),
FakeObject('2', 'object_2', 'CREATED', 'up'),
FakeObject('3', 'object_3', 'STANDBY', 'standby'),
)
TEST_DATA_7 = (
FakeObject('1', 'wrapped name', 'wrapped value', 'status',
'not wrapped optional'),
)
class MyLinkAction(tables.LinkAction):
name = "login"
verbose_name = "Log In"
url = "login"
attrs = {
"class": "ajax-modal",
}
def get_link_url(self, datum=None, *args, **kwargs):
return reverse(self.url)
class MyAction(tables.Action):
name = "delete"
verbose_name = "Delete Me"
verbose_name_plural = "Delete Them"
def allowed(self, request, obj=None):
return getattr(obj, 'status', None) != 'down'
def handle(self, data_table, request, object_ids):
return shortcuts.redirect('http://example.com/?ids=%s'
% ",".join(object_ids))
class MyColumn(tables.Column):
pass
class MyRowSelectable(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.value != 'DELETED'
class MyRow(tables.Row):
ajax = True
@classmethod
def get_data(cls, request, obj_id):
return TEST_DATA_2[0]
class MyBatchAction(tables.BatchAction):
name = "batch"
action_present = "Batch"
action_past = "Batched"
data_type_singular = "Item"
data_type_plural = "Items"
def action(self, request, object_ids):
pass
class MyBatchActionWithHelpText(MyBatchAction):
name = "batch_help"
help_text = "this is help."
action_present = "BatchHelp"
action_past = "BatchedHelp"
class MyToggleAction(tables.BatchAction):
name = "toggle"
action_present = ("Down", "Up")
action_past = ("Downed", "Upped")
data_type_singular = "Item"
data_type_plural = "Items"
def allowed(self, request, obj=None):
if not obj:
return False
self.down = getattr(obj, 'status', None) == 'down'
if self.down:
self.current_present_action = 1
return self.down or getattr(obj, 'status', None) == 'up'
def action(self, request, object_ids):
if self.down:
# up it
self.current_past_action = 1
class MyDisabledAction(MyToggleAction):
def allowed(self, request, obj=None):
return False
class MyFilterAction(tables.FilterAction):
def filter(self, table, objs, filter_string):
q = filter_string.lower()
def comp(obj):
if q in obj.name.lower():
return True
return False
return filter(comp, objs)
class MyServerFilterAction(tables.FilterAction):
filter_type = 'server'
filter_choices = (('name', 'Name', False),
('status', 'Status', True))
needs_preloading = True
def filter(self, table, items, filter_string):
filter_field = table.get_filter_field()
if filter_field == 'name' and filter_string:
return [item for item in items
if filter_string in item.name]
return items
class MyUpdateAction(tables.UpdateAction):
def allowed(self, *args):
return True
def update_cell(self, *args):
pass
class MyUpdateActionNotAllowed(MyUpdateAction):
def allowed(self, *args):
return False
def get_name(obj):
return "custom %s" % obj.name
def get_link(obj):
return reverse('login')
class MyTable(tables.DataTable):
tooltip_dict = {'up': {'title': 'service is up and running',
'style': 'color:green;cursor:pointer'},
'down': {'title': 'service is not available',
'style': 'color:red;cursor:pointer'}}
id = tables.Column('id', hidden=True, sortable=False)
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
value = tables.Column('value',
sortable=True,
link='http://example.com/',
attrs={'class': 'green blue'},
summation="average",
link_classes=('link-modal',),
link_attrs={'data-type': 'modal dialog',
'data-tip': 'click for dialog'})
status = tables.Column('status', link=get_link, truncate=35,
cell_attributes_getter=tooltip_dict.get)
optional = tables.Column('optional', empty_value='N/A')
excluded = tables.Column('excluded')
class Meta(object):
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyFilterAction, MyAction, MyBatchAction,
MyBatchActionWithHelpText)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction,
MyBatchActionWithHelpText)
class MyServerFilterTable(MyTable):
class Meta(object):
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyServerFilterAction, MyAction, MyBatchAction)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction,
MyBatchActionWithHelpText)
class MyTableSelectable(MyTable):
class Meta(object):
name = "my_table"
columns = ('id', 'name', 'value', 'status')
row_class = MyRowSelectable
status_columns = ["status"]
multi_select = True
class MyTableNotAllowedInlineEdit(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateActionNotAllowed)
class Meta(object):
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
class MyTableWrapList(MyTable):
name = tables.Column('name',
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateActionNotAllowed,
wrap_list=True)
value = tables.Column('value',
wrap_list=True)
optional = tables.Column('optional',
wrap_list=False)
class NoActionsTable(tables.DataTable):
id = tables.Column('id')
class Meta(object):
name = "no_actions_table"
verbose_name = "No Actions Table"
table_actions = ()
row_actions = ()
class DisabledActionsTable(tables.DataTable):
id = tables.Column('id')
class Meta(object):
name = "disabled_actions_table"
verbose_name = "Disabled Actions Table"
table_actions = (MyDisabledAction,)
row_actions = ()
multi_select = True
class DataTableTests(test.TestCase):
def test_table_instantiation(self):
"""Tests everything that happens when the table is instantiated."""
self.table = MyTable(self.request, TEST_DATA)
# Properties defined on the table
self.assertEqual(TEST_DATA, self.table.data)
self.assertEqual("my_table", self.table.name)
# Verify calculated options that weren't specified explicitly
self.assertTrue(self.table._meta.actions_column)
self.assertTrue(self.table._meta.multi_select)
# Test for verbose_name
self.assertEqual(u"My Table", unicode(self.table))
# Column ordering and exclusion.
# This should include auto-columns for multi_select and actions,
# but should not contain the excluded column.
# Additionally, auto-generated columns should use the custom
# column class specified on the table.
self.assertQuerysetEqual(self.table.columns.values(),
['<MyColumn: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<MyColumn: actions>'])
# Actions (these also test ordering)
self.assertQuerysetEqual(self.table.base_actions.values(),
['<MyBatchAction: batch>',
'<MyBatchActionWithHelpText: batch_help>',
'<MyAction: delete>',
'<MyFilterAction: filter>',
'<MyLinkAction: login>',
'<MyToggleAction: toggle>'])
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>',
'<MyBatchAction: batch>',
'<MyBatchActionWithHelpText: batch_help>'])
self.assertQuerysetEqual(self.table.get_row_actions(TEST_DATA[0]),
['<MyAction: delete>',
'<MyLinkAction: login>',
'<MyBatchAction: batch>',
'<MyToggleAction: toggle>',
'<MyBatchActionWithHelpText: batch_help>'])
# Auto-generated columns
multi_select = self.table.columns['multi_select']
self.assertEqual("multi_select", multi_select.auto)
self.assertEqual("multi_select_column",
multi_select.get_final_attrs().get('class', ""))
actions = self.table.columns['actions']
self.assertEqual("actions", actions.auto)
self.assertEqual("actions_column",
actions.get_final_attrs().get('class', ""))
# In-line edit action on column.
name_column = self.table.columns['name']
self.assertEqual(MyUpdateAction, name_column.update_action)
self.assertEqual(forms.CharField, name_column.form_field.__class__)
self.assertEqual({'class': 'test'}, name_column.form_field_attributes)
def test_table_force_no_multiselect(self):
class TempTable(MyTable):
class Meta(object):
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
multi_select = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_force_no_actions_column(self):
class TempTable(MyTable):
class Meta(object):
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
actions_column = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_inline_editing(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True)
class Meta(object):
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_column = self.table.columns['name']
self.assertIsNone(name_column.update_action)
self.assertIsNone(name_column.form_field)
self.assertEqual({}, name_column.form_field_attributes)
def test_table_natural_no_actions_column(self):
class TempTable(MyTable):
class Meta(object):
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_multiselect(self):
class TempTable(MyTable):
class Meta(object):
columns = ('id',)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_column_inheritance(self):
class TempTable(MyTable):
extra = tables.Column('extra')
class Meta(object):
name = "temp_table"
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: status>',
'<Column: optional>',
'<Column: excluded>',
'<Column: extra>',
'<Column: actions>'])
def test_table_construction(self):
self.table = MyTable(self.request, TEST_DATA)
# Verify we retrieve the right columns for headers
columns = self.table.get_columns()
self.assertQuerysetEqual(columns, ['<MyColumn: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<MyColumn: actions>'])
# Verify we retrieve the right rows from our data
rows = self.table.get_rows()
self.assertQuerysetEqual(rows, ['<MyRow: my_table__row__1>',
'<MyRow: my_table__row__2>',
'<MyRow: my_table__row__3>'])
# Verify each row contains the right cells
self.assertQuerysetEqual(rows[0].get_cells(),
['<Cell: multi_select, my_table__row__1>',
'<Cell: id, my_table__row__1>',
'<Cell: name, my_table__row__1>',
'<Cell: value, my_table__row__1>',
'<Cell: optional, my_table__row__1>',
'<Cell: status, my_table__row__1>',
'<Cell: actions, my_table__row__1>'])
def test_table_column(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
row3 = self.table.get_rows()[2]
id_col = self.table.columns['id']
name_col = self.table.columns['name']
value_col = self.table.columns['value']
# transform
self.assertEqual('1', row.cells['id'].data) # Standard attr access
self.assertEqual('custom object_1', row.cells['name'].data) # Callable
# name and verbose_name
self.assertEqual("Id", unicode(id_col))
self.assertEqual("Verbose Name", unicode(name_col))
# sortable
self.assertEqual(False, id_col.sortable)
self.assertNotIn("sortable", id_col.get_final_attrs().get('class', ""))
self.assertEqual(True, name_col.sortable)
self.assertIn("sortable", name_col.get_final_attrs().get('class', ""))
# hidden
self.assertEqual(True, id_col.hidden)
self.assertIn("hide", id_col.get_final_attrs().get('class', ""))
self.assertEqual(False, name_col.hidden)
self.assertNotIn("hide", name_col.get_final_attrs().get('class', ""))
# link, link_classes, link_attrs, and get_link_url
self.assertIn('href="http://example.com/"', row.cells['value'].value)
self.assertIn('class="link-modal"', row.cells['value'].value)
self.assertIn('data-type="modal dialog"', row.cells['value'].value)
self.assertIn('data-tip="click for dialog"', row.cells['value'].value)
self.assertIn('href="/auth/login/"', row.cells['status'].value)
# empty_value
self.assertEqual("N/A", row3.cells['optional'].value)
# classes
self.assertEqual("green blue sortable anchor normal_column",
value_col.get_final_attrs().get('class', ""))
# status
cell_status = row.cells['status'].status
self.assertEqual(True, cell_status)
self.assertEqual('status_up',
row.cells['status'].get_status_class(cell_status))
# status_choices
id_col.status = True
id_col.status_choices = (('1', False), ('2', True), ('3', None))
cell_status = row.cells['id'].status
self.assertEqual(False, cell_status)
self.assertEqual('status_down',
row.cells['id'].get_status_class(cell_status))
cell_status = row3.cells['id'].status
self.assertIsNone(cell_status)
self.assertEqual('status_unknown',
row.cells['id'].get_status_class(cell_status))
# Ensure data is not cached on the column across table instances
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
self.assertTrue("down" in row.cells['status'].value)
def test_table_row(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
self.assertEqual(self.table, row.table)
self.assertEqual(TEST_DATA[0], row.datum)
self.assertEqual('my_table__row__1', row.id)
# Verify row status works even if status isn't set on the column
self.assertEqual(True, row.status)
self.assertEqual('status_up', row.status_class)
# Check the cells as well
cell_status = row.cells['status'].status
self.assertEqual(True, cell_status)
self.assertEqual('status_up',
row.cells['status'].get_status_class(cell_status))
def test_table_column_truncation(self):
self.table = MyTable(self.request, TEST_DATA_5)
row = self.table.get_rows()[0]
self.assertEqual(35, len(row.cells['status'].data))
self.assertEqual(u'A Status that is longer than 35 ...',
row.cells['status'].data)
def test_table_rendering(self):
self.table = MyTable(self.request, TEST_DATA)
# Table actions
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 1)
self.assertContains(resp, "my_table__filter__q", 1)
self.assertContains(resp, "my_table__delete", 1)
self.assertContains(resp, 'id="my_table__action_delete"', 1)
# Table BatchActions
self.assertContains(resp, 'id="my_table__action_batch_help"', 1)
self.assertContains(resp, 'help_text="this is help."', 1)
self.assertContains(resp, 'BatchHelp Item', 1)
# Row actions
row_actions = self.table.render_row_actions(TEST_DATA[0])
resp = http.HttpResponse(row_actions)
self.assertContains(resp, "<li", 4)
self.assertContains(resp, "my_table__delete__1", 1)
self.assertContains(resp, "my_table__toggle__1", 1)
self.assertContains(resp, "/auth/login/", 1)
self.assertContains(resp, "ajax-modal", 1)
self.assertContains(resp, 'id="my_table__row_1__action_delete"', 1)
# Row BatchActions
row_actions = self.table.render_row_actions(TEST_DATA[0])
resp = http.HttpResponse(row_actions)
self.assertContains(resp, 'id="my_table__row_1__action_batch_help"', 1)
self.assertContains(resp, 'help_text="this is help."', 1)
self.assertContains(resp, 'value="my_table__batch_help__1"', 1)
self.assertContains(resp, 'BatchHelp Item', 1)
# Whole table
resp = http.HttpResponse(self.table.render())
self.assertContains(resp, '<table id="my_table"', 1)
self.assertContains(resp, '<th ', 8)
self.assertContains(resp, 'id="my_table__row__1"', 1)
self.assertContains(resp, 'id="my_table__row__2"', 1)
self.assertContains(resp, 'id="my_table__row__3"', 1)
update_string = "action=row_update&table=my_table&obj_id="
self.assertContains(resp, update_string, 3)
self.assertContains(resp, "data-update-interval", 3)
# Verify no table heading
self.assertNotContains(resp, "<h3 class='table_title'")
# Verify our XSS protection
self.assertContains(resp, '<a href="http://example.com/" '
'data-tip="click for dialog" '
'data-type="modal dialog" '
'class="link-modal">'
'<strong>evil</strong></a>', 1)
# Hidden Title = False shows the table title
self.table._meta.hidden_title = False
resp = http.HttpResponse(self.table.render())
self.assertContains(resp, "<h3 class='table_title'", 1)
# Filter = False hides the search box
self.table._meta.filter = False
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 0)
def test_wrap_list_rendering(self):
self.table = MyTableWrapList(self.request, TEST_DATA_7)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
value_cell = row.cells['value']
optional_cell = row.cells['optional']
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
value_cell_rendered = value_cell.render()
optional_cell_rendered = optional_cell.render()
resp_name = http.HttpResponse(name_cell_rendered)
resp_value = http.HttpResponse(value_cell_rendered)
resp_optional = http.HttpResponse(optional_cell_rendered)
self.assertContains(resp_name, '<ul>wrapped name</ul>', 1)
self.assertContains(resp_value, '<ul>wrapped value</ul>', 1)
self.assertContains(resp_optional, 'not wrapped optional', 1)
self.assertNotContains(resp_optional, '<ul>')
self.assertNotContains(resp_optional, '</ul>')
def test_inline_edit_available_cell_rendering(self):
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
# Check if in-line edit is available in the cell,
# but is not in inline_edit_mod.
self.assertEqual(True,
name_cell.inline_edit_available)
self.assertEqual(False,
name_cell.inline_edit_mod)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'table_cell_data_wrapper', 1)
self.assertContains(resp, 'table_cell_action', 1)
self.assertContains(resp, 'ajax-inline-edit', 1)
def test_inline_edit_available_not_allowed_cell_rendering(self):
self.table = MyTableNotAllowedInlineEdit(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
# Check if in-line edit is available in the cell,
# but is not in inline_edit_mod.
self.assertEqual(True,
name_cell.inline_edit_available)
self.assertEqual(False,
name_cell.inline_edit_mod)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 0)
self.assertContains(resp, 'table_cell_data_wrapper', 0)
self.assertContains(resp, 'table_cell_action', 0)
self.assertContains(resp, 'ajax-inline-edit', 0)
def test_inline_edit_mod_cell_rendering(self):
self.table = MyTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if in-line edit is available in the cell,
# and is in inline_edit_mod, also column auto must be
# set as form_field.
self.assertEqual(True,
name_cell.inline_edit_available)
self.assertEqual(True,
name_cell.inline_edit_mod)
self.assertEqual('form_field',
name_col.auto)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<input class="test" id="name__1" name="name__1"'
' type="text" value="custom object_1" />',
count=1, html=True)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'inline-edit-error', 1)
self.assertContains(resp, 'inline-edit-form', 1)
self.assertContains(resp, 'inline-edit-actions', 1)
self.assertContains(resp, 'inline-edit-submit', 1)
self.assertContains(resp, 'inline-edit-cancel', 1)
def test_inline_edit_mod_checkbox_with_label(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.BooleanField(
required=True,
label="Verbose Name"),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
class Meta(object):
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<input checked="checked" class="test" '
'id="name__1" name="name__1" type="checkbox" '
'value="custom object_1" />',
count=1, html=True)
self.assertContains(resp,
'<label class="inline-edit-label" for="name__1">'
'Verbose Name</label>',
count=1, html=True)
def test_inline_edit_mod_textarea(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(
widget=forms.Textarea(),
required=False),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
class Meta(object):
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<textarea class="test" cols="40" id="name__1" '
'name="name__1" rows="10">\r\ncustom object_1'
'</textarea>',
count=1, html=True)
def test_table_actions(self):
# Single object action
action_string = "my_table__delete__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', '1'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("http://example.com/?ids=1", handled["location"])
# Batch action (without toggle) conjugation behavior
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[2]
self.assertEqual("Batch Item", unicode(toggle_action.verbose_name))
# Batch action with custom help text
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[4]
self.assertEqual("BatchHelp Item", unicode(toggle_action.verbose_name))
# Single object toggle action
# GET page - 'up' to 'down'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
self.assertEqual(5, len(self.table.get_row_actions(TEST_DATA_3[0])))
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[3]
self.assertEqual("Down Item", unicode(toggle_action.verbose_name))
# Toggle from status 'up' to 'down'
# POST page
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'toggle', '1'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("/my_url/", handled["location"])
self.assertEqual(u"Downed Item: object_1",
list(req._messages)[0].message)
# Toggle from status 'down' to 'up'
# GET page - 'down' to 'up'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertEqual(4, len(self.table.get_row_actions(TEST_DATA_2[0])))
toggle_action = self.table.get_row_actions(TEST_DATA_2[0])[2]
self.assertEqual("Up Item", unicode(toggle_action.verbose_name))
# POST page
action_string = "my_table__toggle__2"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'toggle', '2'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("/my_url/", handled["location"])
self.assertEqual(u"Upped Item: object_2",
list(req._messages)[0].message)
# Multiple object action
action_string = "my_table__delete"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', None),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("http://example.com/?ids=1,2", handled["location"])
# Action with nothing selected
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', None),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertEqual("Please select a row before taking that action.",
list(req._messages)[0].message)
# Action with specific id and multiple ids favors single id
action_string = "my_table__delete__3"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', '3'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("http://example.com/?ids=3",
handled["location"])
# At least one object in table
# BatchAction is available
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>',
'<MyBatchAction: batch>',
'<MyBatchActionWithHelpText: batch_help>'])
# Zero objects in table
# BatchAction not available
req = self.factory.get('/my_url/')
self.table = MyTable(req, None)
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>'])
# Filtering
action_string = "my_table__filter__q"
req = self.factory.post('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_2>'])
# Ensure fitering respects the request method, e.g. no filter here
req = self.factory.get('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
# Updating and preemptive actions
params = {"table": "my_table", "action": "row_update", "obj_id": "1"}
req = self.factory.get('/my_url/',
params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertEqual(200, resp.status_code)
# Make sure the data returned differs from the original
self.assertContains(resp, "my_table__row__1")
self.assertContains(resp, "status_down")
# Verify that we don't get a response for a valid action with the
# wrong method.
params = {"table": "my_table", "action": "delete", "obj_id": "1"}
req = self.factory.get('/my_url/', params)
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertIsNone(resp)
resp = self.table.maybe_handle()
self.assertIsNone(resp)
# Verbose names
table_actions = self.table.get_table_actions()
self.assertEqual("Filter", unicode(table_actions[0].verbose_name))
self.assertEqual("Delete Me", unicode(table_actions[1].verbose_name))
row_actions = self.table.get_row_actions(TEST_DATA[0])
self.assertEqual("Delete Me", unicode(row_actions[0].verbose_name))
self.assertEqual("Log In", unicode(row_actions[1].verbose_name))
def test_server_filtering(self):
filter_value_param = "my_table__filter__q"
filter_field_param = '%s_field' % filter_value_param
# Server Filtering
req = self.factory.post('/my_url/')
req.session[filter_value_param] = '2'
req.session[filter_field_param] = 'name'
self.table = MyServerFilterTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_2>'])
# Ensure API filtering does not filter on server, e.g. no filter here
req = self.factory.post('/my_url/')
req.session[filter_value_param] = 'up'
req.session[filter_field_param] = 'status'
self.table = MyServerFilterTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
def test_inline_edit_update_action_get_non_ajax(self):
# Non ajax inline edit request should return None.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {})
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertIsNone(handled)
def test_inline_edit_update_action_get(self):
# Get request should return td field with data.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(200, handled.status_code)
# Checking the response content.
resp = handled
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(
resp,
'data-update-url="/my_url/?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'table_cell_data_wrapper', 1)
self.assertContains(resp, 'table_cell_action', 1)
self.assertContains(resp, 'ajax-inline-edit', 1)
def test_inline_edit_update_action_get_not_allowed(self):
# Name column has required validation, sending blank
# will return error.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {})
self.table = MyTableNotAllowedInlineEdit(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(401, handled.status_code)
def test_inline_edit_update_action_get_inline_edit_mod(self):
# Get request in inline_edit_mode should return td with form field.
url = ('/my_url/?inline_edit_mod=true&action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(200, handled.status_code)
# Checking the response content.
resp = handled
self.assertContains(resp,
'<input class="test" id="name__1" name="name__1"'
' type="text" value="custom object_1" />',
count=1, html=True)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(
resp,
'data-update-url="/my_url/?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'inline-edit-error', 1)
self.assertContains(resp, 'inline-edit-form', 1)
self.assertContains(resp, 'inline-edit-actions', 1)
self.assertContains(resp, '<button', 2)
self.assertContains(resp, 'inline-edit-submit', 1)
self.assertContains(resp, 'inline-edit-cancel', 1)
def test_inline_edit_update_action_post(self):
# Post request should invoke the cell update table action.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {'name__1': 'test_name'})
self.table = MyTable(req, TEST_DATA_2)
# checking the response header
handled = self.table.maybe_preempt()
self.assertEqual(200, handled.status_code)
def test_inline_edit_update_action_post_not_allowed(self):
# Post request should invoke the cell update table action.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {'name__1': 'test_name'})
self.table = MyTableNotAllowedInlineEdit(req, TEST_DATA_2)
# checking the response header
handled = self.table.maybe_preempt()
self.assertEqual(401, handled.status_code)
def test_inline_edit_update_action_post_validation_error(self):
# Name column has required validation, sending blank
# will return error.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {})
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(400, handled.status_code)
self.assertEqual(('Content-Type', 'application/json'),
handled._headers['content-type'])
# Checking the response content.
resp = handled
self.assertContains(resp,
'"message": "This field is required."',
count=1, status_code=400)
def test_column_uniqueness(self):
table1 = MyTable(self.request)
table2 = MyTable(self.request)
# Regression test for launchpad bug 964345.
self.assertNotEqual(id(table1), id(table2))
self.assertNotEqual(id(table1.columns), id(table2.columns))
t1cols = table1.columns.values()
t2cols = table2.columns.values()
self.assertEqual(t1cols[0].name, t2cols[0].name)
self.assertNotEqual(id(t1cols[0]), id(t2cols[0]))
self.assertNotEqual(id(t1cols[0].table),
id(t2cols[0].table))
self.assertNotEqual(id(t1cols[0].table._data_cache),
id(t2cols[0].table._data_cache))
def test_summation_row(self):
# Test with the "average" method.
table = MyTable(self.request, TEST_DATA_4)
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"', 1)
self.assertContains(res, '<td>Summary</td>', 1)
self.assertContains(res, '<td>3.0</td>', 1)
# Test again with the "sum" method.
table.columns['value'].summation = "sum"
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"', 1)
self.assertContains(res, '<td>Summary</td>', 1)
self.assertContains(res, '<td>6</td>', 1)
# One last test with no summation.
table.columns['value'].summation = None
table.needs_summary_row = False
res = http.HttpResponse(table.render())
self.assertNotContains(res, '<tr class="summation"')
self.assertNotContains(res, '<td>3.0</td>')
self.assertNotContains(res, '<td>6</td>')
# Even if "average" summation method is specified,
# we have summation fields but no value is provided
# if the provided data cannot be summed.
table = MyTable(self.request, TEST_DATA)
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"')
self.assertNotContains(res, '<td>3.0</td>')
self.assertNotContains(res, '<td>6</td>')
def test_table_action_attributes(self):
table = MyTable(self.request, TEST_DATA)
self.assertTrue(table.has_actions)
self.assertTrue(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertContains(res, "<form")
table = MyTable(self.request, TEST_DATA, needs_form_wrapper=False)
self.assertTrue(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertNotContains(res, "<form")
table = NoActionsTable(self.request, TEST_DATA)
self.assertFalse(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertNotContains(res, "<form")
def test_table_actions_not_allowed_hide_multiselect(self):
table = DisabledActionsTable(self.request, TEST_DATA)
self.assertFalse(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertContains(res, "multi_select_column hidden")
def test_table_action_object_display_is_none(self):
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.mox.StubOutWithMock(self.table, 'get_object_display')
self.table.get_object_display(IsA(FakeObject)).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(('my_table', 'toggle', '1'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("/my_url/", handled["location"])
self.assertEqual(u"Downed Item: N/A",
list(req._messages)[0].message)
def test_table_column_can_be_selected(self):
self.table = MyTableSelectable(self.request, TEST_DATA_6)
# non selectable row
row = self.table.get_rows()[0]
# selectable
row1 = self.table.get_rows()[1]
row2 = self.table.get_rows()[2]
id_col = self.table.columns['id']
name_col = self.table.columns['name']
value_col = self.table.columns['value']
# transform
self.assertEqual('1', row.cells['id'].data) # Standard attr access
self.assertEqual('custom object_1', row.cells['name'].data) # Callable
# name and verbose_name
self.assertEqual("Id", unicode(id_col))
self.assertEqual("Verbose Name", unicode(name_col))
self.assertIn("sortable", name_col.get_final_attrs().get('class', ""))
# hidden
self.assertEqual(True, id_col.hidden)
self.assertIn("hide", id_col.get_final_attrs().get('class', ""))
self.assertEqual(False, name_col.hidden)
self.assertNotIn("hide", name_col.get_final_attrs().get('class', ""))
# link, link_classes, link_attrs and get_link_url
self.assertIn('href="http://example.com/"', row.cells['value'].value)
self.assertIn('class="link-modal"', row.cells['value'].value)
self.assertIn('data-type="modal dialog"', row.cells['value'].value)
self.assertIn('data-tip="click for dialog"', row.cells['value'].value)
self.assertIn('href="/auth/login/"', row.cells['status'].value)
# classes
self.assertEqual("green blue sortable anchor normal_column",
value_col.get_final_attrs().get('class', ""))
self.assertQuerysetEqual(row.get_cells(),
['<Cell: multi_select, my_table__row__1>',
'<Cell: id, my_table__row__1>',
'<Cell: name, my_table__row__1>',
'<Cell: value, my_table__row__1>',
'<Cell: status, my_table__row__1>',
])
# can_be_selected = False
self.assertTrue(row.get_cells()[0].data == "")
# can_be_selected = True
self.assertIn('checkbox', row1.get_cells()[0].data)
# status
cell_status = row.cells['status'].status
self.assertEqual('status_down',
row.cells['status'].get_status_class(cell_status))
self.assertEqual(row.cells['status'].data, 'down')
self.assertEqual(row.cells['status'].attrs,
{'title': 'service is not available',
'style': 'color:red;cursor:pointer'})
self.assertEqual(row1.cells['status'].data, 'up')
self.assertEqual(row1.cells['status'].attrs,
{'title': 'service is up and running',
'style': 'color:green;cursor:pointer'})
self.assertEqual(row2.cells['status'].data, 'standby')
self.assertEqual(row2.cells['status'].attrs, {})
status_rendered = row.cells['status'].render()
resp = http.HttpResponse(status_rendered)
self.assertContains(resp, 'style="color:red;cursor:pointer"', 1)
self.assertContains(resp, 'title="service is not available"', 1)
status_rendered = row1.cells['status'].render()
resp = http.HttpResponse(status_rendered)
self.assertContains(resp, 'style="color:green;cursor:pointer"', 1)
self.assertContains(resp, 'title="service is up and running"', 1)
# status_choices
id_col.status = True
id_col.status_choices = (('1', False), ('2', True))
cell_status = row.cells['id'].status
self.assertEqual(False, cell_status)
self.assertEqual('status_down',
row.cells['id'].get_status_class(cell_status))
# Ensure data is not cached on the column across table instances
self.table = MyTable(self.request, TEST_DATA_6)
row = self.table.get_rows()[0]
self.assertTrue("down" in row.cells['status'].value)
def test_broken_filter(self):
class MyTableBrokenFilter(MyTable):
value = tables.Column('value',
filters=(defaultfilters.timesince,))
value = "not_a_date"
data = TEST_DATA[0]
data.value = value
table = MyTableBrokenFilter(self.request, [data])
resp = http.HttpResponse(table.render())
self.assertContains(resp, value)
class SingleTableView(table_views.DataTableView):
table_class = MyTable
name = "Single Table"
slug = "single"
template_name = "horizon/common/_detail_table.html"
def get_data(self):
return TEST_DATA
class APIFilterTableView(SingleTableView):
table_class = MyServerFilterTable
class TableWithPermissions(tables.DataTable):
id = tables.Column('id')
class Meta(object):
name = "table_with_permissions"
permissions = ('horizon.test',)
class SingleTableViewWithPermissions(SingleTableView):
table_class = TableWithPermissions
class MultiTableView(tables.MultiTableView):
table_classes = (TableWithPermissions, MyTable)
def get_table_with_permissions_data(self):
return TEST_DATA
def get_my_table_data(self):
return TEST_DATA
class DataTableViewTests(test.TestCase):
def _prepare_view(self, cls, *args, **kwargs):
req = self.factory.get('/my_url/')
req.user = self.user
view = cls()
view.request = req
view.args = args
view.kwargs = kwargs
return view
def test_data_table_view(self):
view = self._prepare_view(SingleTableView)
context = view.get_context_data()
self.assertEqual(SingleTableView.table_class,
context['table'].__class__)
def test_data_table_view_not_authorized(self):
view = self._prepare_view(SingleTableViewWithPermissions)
context = view.get_context_data()
self.assertNotIn('table', context)
def test_data_table_view_authorized(self):
view = self._prepare_view(SingleTableViewWithPermissions)
self.set_permissions(permissions=['test'])
context = view.get_context_data()
self.assertIn('table', context)
self.assertEqual(SingleTableViewWithPermissions.table_class,
context['table'].__class__)
def test_multi_table_view_not_authorized(self):
view = self._prepare_view(MultiTableView)
context = view.get_context_data()
self.assertEqual(MyTable, context['my_table_table'].__class__)
self.assertNotIn('table_with_permissions_table', context)
def test_multi_table_view_authorized(self):
view = self._prepare_view(MultiTableView)
self.set_permissions(permissions=['test'])
context = view.get_context_data()
self.assertEqual(MyTable, context['my_table_table'].__class__)
self.assertEqual(TableWithPermissions,
context['table_with_permissions_table'].__class__)
fil_value_param = "my_table__filter__q"
fil_field_param = '%s_field' % fil_value_param
def _test_filter_setup_view(self, request):
view = APIFilterTableView()
view.request = request
view.kwargs = {}
view.handle_server_filter(request)
return view
def test_api_filter_table_view(self):
req = self.factory.post('/my_url/', {self.fil_value_param: 'up',
self.fil_field_param: 'status'})
req.user = self.user
view = self._test_filter_setup_view(req)
data = view.get_data()
context = view.get_context_data()
self.assertEqual(context['table'].__class__, MyServerFilterTable)
self.assertQuerysetEqual(data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
self.assertEqual(req.session.get(self.fil_value_param), 'up')
self.assertEqual(req.session.get(self.fil_field_param), 'status')
def test_filter_changed_deleted(self):
req = self.factory.post('/my_url/', {self.fil_value_param: '',
self.fil_field_param: 'status'})
req.session[self.fil_value_param] = 'up'
req.session[self.fil_field_param] = 'status'
req.user = self.user
view = self._test_filter_setup_view(req)
context = view.get_context_data()
self.assertEqual(context['table'].__class__, MyServerFilterTable)
self.assertEqual(req.session.get(self.fil_value_param), '')
self.assertEqual(req.session.get(self.fil_field_param), 'status')
def test_filter_changed_nothing_sent(self):
req = self.factory.post('/my_url/', {})
req.session[self.fil_value_param] = 'up'
req.session[self.fil_field_param] = 'status'
req.user = self.user
view = self._test_filter_setup_view(req)
context = view.get_context_data()
self.assertEqual(context['table'].__class__, MyServerFilterTable)
self.assertEqual(req.session.get(self.fil_value_param), 'up')
self.assertEqual(req.session.get(self.fil_field_param), 'status')
def test_filter_changed_new_filter_sent(self):
req = self.factory.post('/my_url/', {self.fil_value_param: 'down',
self.fil_field_param: 'status'})
req.session[self.fil_value_param] = 'up'
req.session[self.fil_field_param] = 'status'
req.user = self.user
view = self._test_filter_setup_view(req)
context = view.get_context_data()
self.assertEqual(context['table'].__class__, MyServerFilterTable)
self.assertEqual(req.session.get(self.fil_value_param), 'down')
self.assertEqual(req.session.get(self.fil_field_param), 'status')
class FormsetTableTests(test.TestCase):
def test_populate(self):
"""Create a FormsetDataTable and populate it with data."""
class TableForm(forms.Form):
name = forms.CharField()
value = forms.IntegerField()
TableFormset = forms.formsets.formset_factory(TableForm, extra=0)
class Table(table_formset.FormsetDataTable):
formset_class = TableFormset
name = tables.Column('name')
value = tables.Column('value')
class Meta(object):
name = 'table'
table = Table(self.request)
table.data = TEST_DATA_4
formset = table.get_formset()
self.assertEqual(2, len(formset))
form = formset[0]
form_data = form.initial
self.assertEqual('object_1', form_data['name'])
self.assertEqual(2, form_data['value'])
|
"""This file contains a class to provide a hashing framework to Plaso.
This class contains a base framework class for parsing files.
"""
import abc
class BaseHasher(object):
"""Class that provides the interface for hashing functionality."""
NAME = u'base_hasher'
DESCRIPTION = u''
@abc.abstractmethod
def Update(self, data):
"""Updates the current state of the hasher with a new block of data.
Repeated calls to update are equivalent to one single call with the
concatenation of the arguments.
Args:
data: a string of data with which to update the context of the hasher.
"""
raise NotImplementedError
@abc.abstractmethod
def GetBinaryDigest(self):
"""Retrieves the digest of the hash function as a binary string.
Returns:
A binary string hash digest calculated over the data blocks passed to
Update().
"""
raise NotImplementedError
@abc.abstractmethod
def GetStringDigest(self):
"""Retrieves the digest of the hash function expressed as a unicode string.
Returns:
A string hash digest calculated over the data blocks passed to
Update(). The string will consist of printable Unicode characters.
"""
raise NotImplementedError
|
import json
from tests.api import test_api
class TestAPICompanies(test_api.TestAPI):
def test_get_companies(self):
with test_api.make_runtime_storage(
{'repos': [{'module': 'nova', 'project_type': 'openstack',
'organization': 'openstack',
'uri': 'git://github.com/openstack/nova.git'},
{'module': 'glance', 'project_type': 'openstack',
'organization': 'openstack',
'uri': 'git://github.com/openstack/glance.git'}],
'project_types': [
{'id': 'openstack', 'title': 'OpenStack',
'modules': ['nova', 'glance']}
],
'module_groups': {
'openstack': {'module_group_name': 'openstack',
'modules': ['nova', 'glance']},
'nova': {'module_group_name': 'nova',
'modules': ['nova']},
'glance': {'module_group_name': 'glance',
'modules': ['glance']},
}},
test_api.make_records(record_type=['commit'],
loc=[10, 20, 30],
module=['glance'],
company_name=['NEC', 'IBM', 'NTT']),
test_api.make_records(record_type=['review'],
primary_key=['0123456789', '9876543210'],
module=['glance'],
company_name=['IBM']),
test_api.make_records(record_type=['mark'],
review_id=['0123456789', '9876543210'],
module=['glance'],
company_name=['IBM']),
test_api.make_records(record_type=['mark'],
review_id=['0123456789'],
module=['glance'],
company_name=['NEC'])):
response = self.app.get('/api/1.0/companies?metric=commits&'
'module=glance')
companies = json.loads(response.data)['companies']
self.assertEqual([{'id': 'ibm', 'text': 'IBM'},
{'id': 'nec', 'text': 'NEC'},
{'id': 'ntt', 'text': 'NTT'}], companies)
response = self.app.get('/api/1.0/companies?metric=marks&'
'module=glance')
companies = json.loads(response.data)['companies']
self.assertEqual([{'id': 'ibm', 'text': 'IBM'},
{'id': 'nec', 'text': 'NEC'}], companies)
response = self.app.get('/api/1.0/companies?metric=commits&'
'company_name=ib&module=glance')
companies = json.loads(response.data)['companies']
self.assertEqual([{'id': 'ibm', 'text': 'IBM'}], companies)
def test_get_company(self):
with test_api.make_runtime_storage(
{'repos': [{'module': 'nova', 'project_type': 'openstack',
'organization': 'openstack',
'uri': 'git://github.com/openstack/nova.git'},
{'module': 'glance', 'project_type': 'openstack',
'organization': 'openstack',
'uri': 'git://github.com/openstack/glance.git'}]},
test_api.make_records(record_type=['commit'],
loc=[10, 20, 30],
module=['glance'],
company_name=['NEC', 'IBM', 'NTT'])):
response = self.app.get('/api/1.0/companies/nec?module=glance')
company = json.loads(response.data)['company']
self.assertEqual({'id': 'nec', 'text': 'NEC'}, company)
response = self.app.get('/api/1.0/companies/google?module=glance')
self.assertEqual(404, response.status_code)
|
input = """
num(2).
node(a).
p(N) :- num(N), #count{Y:node(Y)} = N1, <=(N,N1).
"""
output = """
{node(a), num(2)}
"""
|
"""Operations for np_box_mask_list.BoxMaskList.
Example box operations that are supported:
* Areas: compute bounding box areas
* IOU: pairwise intersection-over-union scores
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import range
from object_detection.utils import np_box_list_ops
from object_detection.utils import np_box_mask_list
from object_detection.utils import np_mask_ops
def box_list_to_box_mask_list(boxlist):
"""Converts a BoxList containing 'masks' into a BoxMaskList.
Args:
boxlist: An np_box_list.BoxList object.
Returns:
An np_box_mask_list.BoxMaskList object.
Raises:
ValueError: If boxlist does not contain `masks` as a field.
"""
if not boxlist.has_field('masks'):
raise ValueError('boxlist does not contain mask field.')
box_mask_list = np_box_mask_list.BoxMaskList(
box_data=boxlist.get(),
mask_data=boxlist.get_field('masks'))
extra_fields = boxlist.get_extra_fields()
for key in extra_fields:
if key != 'masks':
box_mask_list.data[key] = boxlist.get_field(key)
return box_mask_list
def area(box_mask_list):
"""Computes area of masks.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes and masks
Returns:
a numpy array with shape [N*1] representing mask areas
"""
return np_mask_ops.area(box_mask_list.get_masks())
def intersection(box_mask_list1, box_mask_list2):
"""Compute pairwise intersection areas between masks.
Args:
box_mask_list1: BoxMaskList holding N boxes and masks
box_mask_list2: BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N*M] representing pairwise intersection area
"""
return np_mask_ops.intersection(box_mask_list1.get_masks(),
box_mask_list2.get_masks())
def iou(box_mask_list1, box_mask_list2):
"""Computes pairwise intersection-over-union between box and mask collections.
Args:
box_mask_list1: BoxMaskList holding N boxes and masks
box_mask_list2: BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N, M] representing pairwise iou scores.
"""
return np_mask_ops.iou(box_mask_list1.get_masks(),
box_mask_list2.get_masks())
def ioa(box_mask_list1, box_mask_list2):
"""Computes pairwise intersection-over-area between box and mask collections.
Intersection-over-area (ioa) between two masks mask1 and mask2 is defined as
their intersection area over mask2's area. Note that ioa is not symmetric,
that is, IOA(mask1, mask2) != IOA(mask2, mask1).
Args:
box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks
box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks
Returns:
a numpy array with shape [N, M] representing pairwise ioa scores.
"""
return np_mask_ops.ioa(box_mask_list1.get_masks(), box_mask_list2.get_masks())
def gather(box_mask_list, indices, fields=None):
"""Gather boxes from np_box_mask_list.BoxMaskList according to indices.
By default, gather returns boxes corresponding to the input index list, as
well as all additional fields stored in the box_mask_list (indexing into the
first dimension). However one can optionally only gather from a
subset of fields.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes
indices: a 1-d numpy array of type int_
fields: (optional) list of fields to also gather from. If None (default),
all fields are gathered from. Pass an empty fields list to only gather
the box coordinates.
Returns:
subbox_mask_list: a np_box_mask_list.BoxMaskList corresponding to the subset
of the input box_mask_list specified by indices
Raises:
ValueError: if specified field is not contained in box_mask_list or if the
indices are not of type int_
"""
if fields is not None:
if 'masks' not in fields:
fields.append('masks')
return box_list_to_box_mask_list(
np_box_list_ops.gather(
boxlist=box_mask_list, indices=indices, fields=fields))
def sort_by_field(box_mask_list, field,
order=np_box_list_ops.SortOrder.DESCEND):
"""Sort boxes and associated fields according to a scalar field.
A common use case is reordering the boxes according to descending scores.
Args:
box_mask_list: BoxMaskList holding N boxes.
field: A BoxMaskList field for sorting and reordering the BoxMaskList.
order: (Optional) 'descend' or 'ascend'. Default is descend.
Returns:
sorted_box_mask_list: A sorted BoxMaskList with the field in the specified
order.
"""
return box_list_to_box_mask_list(
np_box_list_ops.sort_by_field(
boxlist=box_mask_list, field=field, order=order))
def non_max_suppression(box_mask_list,
max_output_size=10000,
iou_threshold=1.0,
score_threshold=-10.0):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. In each iteration, the detected bounding box with
highest score in the available pool is selected.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain
a 'scores' field representing detection scores. All scores belong to the
same class.
max_output_size: maximum number of retained boxes
iou_threshold: intersection over union threshold.
score_threshold: minimum score threshold. Remove the boxes with scores
less than this value. Default value is set to -10. A very
low threshold to pass pretty much all the boxes, unless
the user sets a different score threshold.
Returns:
an np_box_mask_list.BoxMaskList holding M boxes where M <= max_output_size
Raises:
ValueError: if 'scores' field does not exist
ValueError: if threshold is not in [0, 1]
ValueError: if max_output_size < 0
"""
if not box_mask_list.has_field('scores'):
raise ValueError('Field scores does not exist')
if iou_threshold < 0. or iou_threshold > 1.0:
raise ValueError('IOU threshold must be in [0, 1]')
if max_output_size < 0:
raise ValueError('max_output_size must be bigger than 0.')
box_mask_list = filter_scores_greater_than(box_mask_list, score_threshold)
if box_mask_list.num_boxes() == 0:
return box_mask_list
box_mask_list = sort_by_field(box_mask_list, 'scores')
# Prevent further computation if NMS is disabled.
if iou_threshold == 1.0:
if box_mask_list.num_boxes() > max_output_size:
selected_indices = np.arange(max_output_size)
return gather(box_mask_list, selected_indices)
else:
return box_mask_list
masks = box_mask_list.get_masks()
num_masks = box_mask_list.num_boxes()
# is_index_valid is True only for all remaining valid boxes,
is_index_valid = np.full(num_masks, 1, dtype=bool)
selected_indices = []
num_output = 0
for i in range(num_masks):
if num_output < max_output_size:
if is_index_valid[i]:
num_output += 1
selected_indices.append(i)
is_index_valid[i] = False
valid_indices = np.where(is_index_valid)[0]
if valid_indices.size == 0:
break
intersect_over_union = np_mask_ops.iou(
np.expand_dims(masks[i], axis=0), masks[valid_indices])
intersect_over_union = np.squeeze(intersect_over_union, axis=0)
is_index_valid[valid_indices] = np.logical_and(
is_index_valid[valid_indices],
intersect_over_union <= iou_threshold)
return gather(box_mask_list, np.array(selected_indices))
def multi_class_non_max_suppression(box_mask_list, score_thresh, iou_thresh,
max_output_size):
"""Multi-class version of non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. It operates independently for each class for
which scores are provided (via the scores field of the input box_list),
pruning boxes with score less than a provided threshold prior to
applying NMS.
Args:
box_mask_list: np_box_mask_list.BoxMaskList holding N boxes. Must contain a
'scores' field representing detection scores. This scores field is a
tensor that can be 1 dimensional (in the case of a single class) or
2-dimensional, in which case we assume that it takes the
shape [num_boxes, num_classes]. We further assume that this rank is known
statically and that scores.shape[1] is also known (i.e., the number of
classes is fixed and known at graph construction time).
score_thresh: scalar threshold for score (low scoring boxes are removed).
iou_thresh: scalar threshold for IOU (boxes that that high IOU overlap
with previously selected boxes are removed).
max_output_size: maximum number of retained boxes per class.
Returns:
a box_mask_list holding M boxes with a rank-1 scores field representing
corresponding scores for each box with scores sorted in decreasing order
and a rank-1 classes field representing a class label for each box.
Raises:
ValueError: if iou_thresh is not in [0, 1] or if input box_mask_list does
not have a valid scores field.
"""
if not 0 <= iou_thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList):
raise ValueError('box_mask_list must be a box_mask_list')
if not box_mask_list.has_field('scores'):
raise ValueError('input box_mask_list must have \'scores\' field')
scores = box_mask_list.get_field('scores')
if len(scores.shape) == 1:
scores = np.reshape(scores, [-1, 1])
elif len(scores.shape) == 2:
if scores.shape[1] is None:
raise ValueError('scores field must have statically defined second '
'dimension')
else:
raise ValueError('scores field must be of rank 1 or 2')
num_boxes = box_mask_list.num_boxes()
num_scores = scores.shape[0]
num_classes = scores.shape[1]
if num_boxes != num_scores:
raise ValueError('Incorrect scores field length: actual vs expected.')
selected_boxes_list = []
for class_idx in range(num_classes):
box_mask_list_and_class_scores = np_box_mask_list.BoxMaskList(
box_data=box_mask_list.get(),
mask_data=box_mask_list.get_masks())
class_scores = np.reshape(scores[0:num_scores, class_idx], [-1])
box_mask_list_and_class_scores.add_field('scores', class_scores)
box_mask_list_filt = filter_scores_greater_than(
box_mask_list_and_class_scores, score_thresh)
nms_result = non_max_suppression(
box_mask_list_filt,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh)
nms_result.add_field(
'classes',
np.zeros_like(nms_result.get_field('scores')) + class_idx)
selected_boxes_list.append(nms_result)
selected_boxes = np_box_list_ops.concatenate(selected_boxes_list)
sorted_boxes = np_box_list_ops.sort_by_field(selected_boxes, 'scores')
return box_list_to_box_mask_list(boxlist=sorted_boxes)
def prune_non_overlapping_masks(box_mask_list1, box_mask_list2, minoverlap=0.0):
"""Prunes the boxes in list1 that overlap less than thresh with list2.
For each mask in box_mask_list1, we want its IOA to be more than minoverlap
with at least one of the masks in box_mask_list2. If it does not, we remove
it. If the masks are not full size image, we do the pruning based on boxes.
Args:
box_mask_list1: np_box_mask_list.BoxMaskList holding N boxes and masks.
box_mask_list2: np_box_mask_list.BoxMaskList holding M boxes and masks.
minoverlap: Minimum required overlap between boxes, to count them as
overlapping.
Returns:
A pruned box_mask_list with size [N', 4].
"""
intersection_over_area = ioa(box_mask_list2, box_mask_list1) # [M, N] tensor
intersection_over_area = np.amax(intersection_over_area, axis=0) # [N] tensor
keep_bool = np.greater_equal(intersection_over_area, np.array(minoverlap))
keep_inds = np.nonzero(keep_bool)[0]
new_box_mask_list1 = gather(box_mask_list1, keep_inds)
return new_box_mask_list1
def concatenate(box_mask_lists, fields=None):
"""Concatenate list of box_mask_lists.
This op concatenates a list of input box_mask_lists into a larger
box_mask_list. It also
handles concatenation of box_mask_list fields as long as the field tensor
shapes are equal except for the first dimension.
Args:
box_mask_lists: list of np_box_mask_list.BoxMaskList objects
fields: optional list of fields to also concatenate. By default, all
fields from the first BoxMaskList in the list are included in the
concatenation.
Returns:
a box_mask_list with number of boxes equal to
sum([box_mask_list.num_boxes() for box_mask_list in box_mask_list])
Raises:
ValueError: if box_mask_lists is invalid (i.e., is not a list, is empty, or
contains non box_mask_list objects), or if requested fields are not
contained in all box_mask_lists
"""
if fields is not None:
if 'masks' not in fields:
fields.append('masks')
return box_list_to_box_mask_list(
np_box_list_ops.concatenate(boxlists=box_mask_lists, fields=fields))
def filter_scores_greater_than(box_mask_list, thresh):
"""Filter to keep only boxes and masks with score exceeding a given threshold.
This op keeps the collection of boxes and masks whose corresponding scores are
greater than the input threshold.
Args:
box_mask_list: BoxMaskList holding N boxes and masks. Must contain a
'scores' field representing detection scores.
thresh: scalar threshold
Returns:
a BoxMaskList holding M boxes and masks where M <= N
Raises:
ValueError: if box_mask_list not a np_box_mask_list.BoxMaskList object or
if it does not have a scores field
"""
if not isinstance(box_mask_list, np_box_mask_list.BoxMaskList):
raise ValueError('box_mask_list must be a BoxMaskList')
if not box_mask_list.has_field('scores'):
raise ValueError('input box_mask_list must have \'scores\' field')
scores = box_mask_list.get_field('scores')
if len(scores.shape) > 2:
raise ValueError('Scores should have rank 1 or 2')
if len(scores.shape) == 2 and scores.shape[1] != 1:
raise ValueError('Scores should have rank 1 or have shape '
'consistent with [None, 1]')
high_score_indices = np.reshape(np.where(np.greater(scores, thresh)),
[-1]).astype(np.int32)
return gather(box_mask_list, high_score_indices)
|
import unittest
import cbs
class AttrSettings():
PROJECT_NAME = 'fancy_project'
class MethodSettings():
def PROJECT_NAME(self):
return 'fancy_project'
class TestApply(unittest.TestCase):
def test_apply_settings_attr(self):
cbs.apply(AttrSettings, globals())
self.assertEqual(PROJECT_NAME, 'fancy_project')
def test_apply_settings_method(self):
cbs.apply(MethodSettings, globals())
self.assertEqual(PROJECT_NAME, 'fancy_project')
def test_apply_settings_string_reference(self):
cbs.apply(__name__ + '.AttrSettings', globals())
self.assertEqual(PROJECT_NAME, 'fancy_project')
def test_apply_settings_invalid_string_reference(self):
self.assertRaises(ValueError, cbs.apply, 'invalid.Class', globals())
|
"""
Given a non-negative integer num, repeatedly add all its digits until the
result has only one digit.
For example:
Given num = 38, the process is like: 3 + 8 = 11, 1 + 1 = 2. Since 2 has only
one digit, return it.
Follow up:
Could you do it without any loop/recursion in O(1) runtime?
"""
class Solution(object):
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
while num / 10:
d = 0
while num > 0:
d += num % 10
num /= 10
num = d
return num
s = Solution()
print(s.addDigits(38))
|
"""
==========================================================================
Gaussian processes on discrete data structures
==========================================================================
This example illustrates the use of Gaussian processes for regression and
classification tasks on data that are not in fixed-length feature vector form.
This is achieved through the use of kernel functions that operates directly
on discrete structures such as variable-length sequences, trees, and graphs.
Specifically, here the input variables are some gene sequences stored as
variable-length strings consisting of letters 'A', 'T', 'C', and 'G',
while the output variables are floating point numbers and True/False labels
in the regression and classification tasks, respectively.
A kernel between the gene sequences is defined using R-convolution [1]_ by
integrating a binary letter-wise kernel over all pairs of letters among a pair
of strings.
This example will generate three figures.
In the first figure, we visualize the value of the kernel, i.e. the similarity
of the sequences, using a colormap. Brighter color here indicates higher
similarity.
In the second figure, we show some regression result on a dataset of 6
sequences. Here we use the 1st, 2nd, 4th, and 5th sequences as the training set
to make predictions on the 3rd and 6th sequences.
In the third figure, we demonstrate a classification model by training on 6
sequences and make predictions on another 5 sequences. The ground truth here is
simply whether there is at least one 'A' in the sequence. Here the model makes
four correct classifications and fails on one.
.. [1] Haussler, D. (1999). Convolution kernels on discrete structures
(Vol. 646). Technical report, Department of Computer Science, University
of California at Santa Cruz.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process.kernels import Kernel, Hyperparameter
from sklearn.gaussian_process.kernels import GenericKernelMixin
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.base import clone
class SequenceKernel(GenericKernelMixin, Kernel):
"""
A minimal (but valid) convolutional kernel for sequences of variable
lengths."""
def __init__(self, baseline_similarity=0.5, baseline_similarity_bounds=(1e-5, 1)):
self.baseline_similarity = baseline_similarity
self.baseline_similarity_bounds = baseline_similarity_bounds
@property
def hyperparameter_baseline_similarity(self):
return Hyperparameter(
"baseline_similarity", "numeric", self.baseline_similarity_bounds
)
def _f(self, s1, s2):
"""
kernel value between a pair of sequences
"""
return sum(
[1.0 if c1 == c2 else self.baseline_similarity for c1 in s1 for c2 in s2]
)
def _g(self, s1, s2):
"""
kernel derivative between a pair of sequences
"""
return sum([0.0 if c1 == c2 else 1.0 for c1 in s1 for c2 in s2])
def __call__(self, X, Y=None, eval_gradient=False):
if Y is None:
Y = X
if eval_gradient:
return (
np.array([[self._f(x, y) for y in Y] for x in X]),
np.array([[[self._g(x, y)] for y in Y] for x in X]),
)
else:
return np.array([[self._f(x, y) for y in Y] for x in X])
def diag(self, X):
return np.array([self._f(x, x) for x in X])
def is_stationary(self):
return False
def clone_with_theta(self, theta):
cloned = clone(self)
cloned.theta = theta
return cloned
kernel = SequenceKernel()
"""
Sequence similarity matrix under the kernel
===========================================
"""
X = np.array(["AGCT", "AGC", "AACT", "TAA", "AAA", "GAACA"])
K = kernel(X)
D = kernel.diag(X)
plt.figure(figsize=(8, 5))
plt.imshow(np.diag(D ** -0.5).dot(K).dot(np.diag(D ** -0.5)))
plt.xticks(np.arange(len(X)), X)
plt.yticks(np.arange(len(X)), X)
plt.title("Sequence similarity under the kernel")
"""
Regression
==========
"""
X = np.array(["AGCT", "AGC", "AACT", "TAA", "AAA", "GAACA"])
Y = np.array([1.0, 1.0, 2.0, 2.0, 3.0, 3.0])
training_idx = [0, 1, 3, 4]
gp = GaussianProcessRegressor(kernel=kernel)
gp.fit(X[training_idx], Y[training_idx])
plt.figure(figsize=(8, 5))
plt.bar(np.arange(len(X)), gp.predict(X), color="b", label="prediction")
plt.bar(training_idx, Y[training_idx], width=0.2, color="r", alpha=1, label="training")
plt.xticks(np.arange(len(X)), X)
plt.title("Regression on sequences")
plt.legend()
"""
Classification
==============
"""
X_train = np.array(["AGCT", "CGA", "TAAC", "TCG", "CTTT", "TGCT"])
Y_train = np.array([True, True, True, False, False, False])
gp = GaussianProcessClassifier(kernel)
gp.fit(X_train, Y_train)
X_test = ["AAA", "ATAG", "CTC", "CT", "C"]
Y_test = [True, True, False, False, False]
plt.figure(figsize=(8, 5))
plt.scatter(
np.arange(len(X_train)),
[1.0 if c else -1.0 for c in Y_train],
s=100,
marker="o",
edgecolor="none",
facecolor=(1, 0.75, 0),
label="training",
)
plt.scatter(
len(X_train) + np.arange(len(X_test)),
[1.0 if c else -1.0 for c in Y_test],
s=100,
marker="o",
edgecolor="none",
facecolor="r",
label="truth",
)
plt.scatter(
len(X_train) + np.arange(len(X_test)),
[1.0 if c else -1.0 for c in gp.predict(X_test)],
s=100,
marker="x",
edgecolor=(0, 1.0, 0.3),
linewidth=2,
label="prediction",
)
plt.xticks(np.arange(len(X_train) + len(X_test)), np.concatenate((X_train, X_test)))
plt.yticks([-1, 1], [False, True])
plt.title("Classification on sequences")
plt.legend()
plt.show()
|
from __future__ import print_function
import os
import sys
import ast
import textwrap
import warnings
import functools
import traceback
import collections
from . import crypt
from .str import format
from .file import mktemp
from . import minisix
from . import internationalization as _
def warn_non_constant_time(f):
@functools.wraps(f)
def newf(*args, **kwargs):
# This method takes linear time whereas the subclass could probably
# do it in constant time.
warnings.warn('subclass of IterableMap does provide an efficient '
'implementation of %s' % f.__name__,
DeprecationWarning)
return f(*args, **kwargs)
return newf
def abbrev(strings, d=None):
"""Returns a dictionary mapping unambiguous abbreviations to full forms."""
def eachSubstring(s):
for i in range(1, len(s)+1):
yield s[:i]
if len(strings) != len(set(strings)):
raise ValueError(
'strings given to utils.abbrev have duplicates: %r' % strings)
if d is None:
d = {}
for s in strings:
for abbreviation in eachSubstring(s):
if abbreviation not in d:
d[abbreviation] = s
else:
if abbreviation not in strings:
d[abbreviation] = None
removals = []
for key in d:
if d[key] is None:
removals.append(key)
for key in removals:
del d[key]
return d
def timeElapsed(elapsed, short=False, leadingZeroes=False, years=True,
weeks=True, days=True, hours=True, minutes=True, seconds=True):
"""Given <elapsed> seconds, returns a string with an English description of
the amount of time passed. leadingZeroes determines whether 0 days, 0
hours, etc. will be printed; the others determine what larger time periods
should be used.
"""
ret = []
before = False
def Format(s, i):
if i or leadingZeroes or ret:
if short:
ret.append('%s%s' % (i, s[0]))
else:
ret.append(format('%n', (i, s)))
elapsed = int(elapsed)
# Handle negative times
if elapsed < 0:
before = True
elapsed = -elapsed
assert years or weeks or days or \
hours or minutes or seconds, 'One flag must be True'
if years:
(yrs, elapsed) = (elapsed // 31536000, elapsed % 31536000)
Format(_('year'), yrs)
if weeks:
(wks, elapsed) = (elapsed // 604800, elapsed % 604800)
Format(_('week'), wks)
if days:
(ds, elapsed) = (elapsed // 86400, elapsed % 86400)
Format(_('day'), ds)
if hours:
(hrs, elapsed) = (elapsed // 3600, elapsed % 3600)
Format(_('hour'), hrs)
if minutes or seconds:
(mins, secs) = (elapsed // 60, elapsed % 60)
if leadingZeroes or mins:
Format(_('minute'), mins)
if seconds:
leadingZeroes = True
Format(_('second'), secs)
if not ret:
raise ValueError('Time difference not great enough to be noted.')
result = ''
if short:
result = ' '.join(ret)
else:
result = format('%L', ret)
if before:
result = _('%s ago') % result
return result
def findBinaryInPath(s):
"""Return full path of a binary if it's in PATH, otherwise return None."""
cmdLine = None
for dir in os.getenv('PATH').split(':'):
filename = os.path.join(dir, s)
if os.path.exists(filename):
cmdLine = filename
break
return cmdLine
def sortBy(f, L):
"""Uses the decorate-sort-undecorate pattern to sort L by function f."""
for (i, elt) in enumerate(L):
L[i] = (f(elt), i, elt)
L.sort()
for (i, elt) in enumerate(L):
L[i] = L[i][2]
def saltHash(password, salt=None, hash='sha'):
if salt is None:
salt = mktemp()[:8]
if hash == 'sha':
hasher = crypt.sha
elif hash == 'md5':
hasher = crypt.md5
return '|'.join([salt, hasher((salt + password).encode('utf8')).hexdigest()])
_astStr2 = ast.Str if minisix.PY2 else ast.Bytes
def safeEval(s, namespace={'True': True, 'False': False, 'None': None}):
"""Evaluates s, safely. Useful for turning strings into tuples/lists/etc.
without unsafely using eval()."""
try:
node = ast.parse(s)
except SyntaxError as e:
raise ValueError('Invalid string: %s.' % e)
nodes = ast.parse(s).body
if not nodes:
if node.__class__ is ast.Module:
return node.doc
else:
raise ValueError(format('Unsafe string: %q', s))
node = nodes[0]
def checkNode(node):
if node.__class__ is ast.Expr:
node = node.value
if node.__class__ in (ast.Num,
ast.Str,
_astStr2):
return True
elif node.__class__ in (ast.List,
ast.Tuple):
return all([checkNode(x) for x in node.elts])
elif node.__class__ is ast.Dict:
return all([checkNode(x) for x in node.values]) and \
all([checkNode(x) for x in node.values])
elif node.__class__ is ast.Name:
if node.id in namespace:
return True
else:
return False
elif sys.version_info[0:2] >= (3, 4) and \
node.__class__ is ast.NameConstant:
return True
else:
return False
if checkNode(node):
return eval(s, namespace, namespace)
else:
raise ValueError(format('Unsafe string: %q', s))
def exnToString(e):
"""Turns a simple exception instance into a string (better than str(e))"""
strE = str(e)
if strE:
return '%s: %s' % (e.__class__.__name__, strE)
else:
return e.__class__.__name__
class IterableMap(object):
"""Define .items() in a class and subclass this to get the other iters.
"""
def items(self):
if minisix.PY3 and hasattr(self, 'items'):
# For old plugins
return getattr(self, 'items')() # avoid 2to3
else:
raise NotImplementedError()
__iter__ = items
def keys(self):
for (key, __) in self.items():
yield key
def values(self):
for (__, value) in self.items():
yield value
@warn_non_constant_time
def __len__(self):
ret = 0
for __ in self.items():
ret += 1
return ret
@warn_non_constant_time
def __bool__(self):
for __ in self.items():
return True
return False
__nonzero__ = __bool__
class InsensitivePreservingDict(collections.MutableMapping):
def key(self, s):
"""Override this if you wish."""
if s is not None:
s = s.lower()
return s
def __init__(self, dict=None, key=None):
if key is not None:
self.key = key
self.data = {}
if dict is not None:
self.update(dict)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.data)
def fromkeys(cls, keys, s=None, dict=None, key=None):
d = cls(dict=dict, key=key)
for key in keys:
d[key] = s
return d
fromkeys = classmethod(fromkeys)
def __getitem__(self, k):
return self.data[self.key(k)][1]
def __setitem__(self, k, v):
self.data[self.key(k)] = (k, v)
def __delitem__(self, k):
del self.data[self.key(k)]
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
def items(self):
return self.data.values()
def items(self):
return self.data.values()
def keys(self):
L = []
for (k, __) in self.items():
L.append(k)
return L
def __reduce__(self):
return (self.__class__, (dict(self.data.values()),))
class NormalizingSet(set):
def __init__(self, iterable=()):
iterable = list(map(self.normalize, iterable))
super(NormalizingSet, self).__init__(iterable)
def normalize(self, x):
return x
def add(self, x):
return super(NormalizingSet, self).add(self.normalize(x))
def remove(self, x):
return super(NormalizingSet, self).remove(self.normalize(x))
def discard(self, x):
return super(NormalizingSet, self).discard(self.normalize(x))
def __contains__(self, x):
return super(NormalizingSet, self).__contains__(self.normalize(x))
has_key = __contains__
def stackTrace(frame=None, compact=True):
if frame is None:
frame = sys._getframe()
if compact:
L = []
while frame:
lineno = frame.f_lineno
funcname = frame.f_code.co_name
filename = os.path.basename(frame.f_code.co_filename)
L.append('[%s|%s|%s]' % (filename, funcname, lineno))
frame = frame.f_back
return textwrap.fill(' '.join(L))
else:
return traceback.format_stack(frame)
def callTracer(fd=None, basename=True):
if fd is None:
fd = sys.stdout
def tracer(frame, event, __):
if event == 'call':
code = frame.f_code
lineno = frame.f_lineno
funcname = code.co_name
filename = code.co_filename
if basename:
filename = os.path.basename(filename)
print('%s: %s(%s)' % (filename, funcname, lineno), file=fd)
return tracer
|
import numpy as np
import os
def get_b777_engine():
this_dir = os.path.split(__file__)[0]
nt = 12 * 11 * 8
xt = np.loadtxt(os.path.join(this_dir, "b777_engine_inputs.dat")).reshape((nt, 3))
yt = np.loadtxt(os.path.join(this_dir, "b777_engine_outputs.dat")).reshape((nt, 2))
dyt_dxt = np.loadtxt(os.path.join(this_dir, "b777_engine_derivs.dat")).reshape(
(nt, 2, 3)
)
xlimits = np.array([[0, 0.9], [0, 15], [0, 1.0]])
return xt, yt, dyt_dxt, xlimits
def plot_b777_engine(xt, yt, limits, interp):
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
val_M = np.array(
[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.75, 0.8, 0.85, 0.9]
) # 12
val_h = np.array(
[0.0, 0.6096, 1.524, 3.048, 4.572, 6.096, 7.62, 9.144, 10.668, 11.8872, 13.1064]
) # 11
val_t = np.array([0.05, 0.2, 0.3, 0.4, 0.6, 0.8, 0.9, 1.0]) # 8
def get_pts(xt, yt, iy, ind_M=None, ind_h=None, ind_t=None):
eps = 1e-5
if ind_M is not None:
M = val_M[ind_M]
keep = abs(xt[:, 0] - M) < eps
xt = xt[keep, :]
yt = yt[keep, :]
if ind_h is not None:
h = val_h[ind_h]
keep = abs(xt[:, 1] - h) < eps
xt = xt[keep, :]
yt = yt[keep, :]
if ind_t is not None:
t = val_t[ind_t]
keep = abs(xt[:, 2] - t) < eps
xt = xt[keep, :]
yt = yt[keep, :]
if ind_M is None:
data = xt[:, 0], yt[:, iy]
elif ind_h is None:
data = xt[:, 1], yt[:, iy]
elif ind_t is None:
data = xt[:, 2], yt[:, iy]
if iy == 0:
data = data[0], data[1] / 1e6
elif iy == 1:
data = data[0], data[1] / 1e-4
return data
num = 100
x = np.zeros((num, 3))
lins_M = np.linspace(0.0, 0.9, num)
lins_h = np.linspace(0.0, 13.1064, num)
lins_t = np.linspace(0.05, 1.0, num)
def get_x(ind_M=None, ind_h=None, ind_t=None):
x = np.zeros((num, 3))
x[:, 0] = lins_M
x[:, 1] = lins_h
x[:, 2] = lins_t
if ind_M:
x[:, 0] = val_M[ind_M]
if ind_h:
x[:, 1] = val_h[ind_h]
if ind_t:
x[:, 2] = val_t[ind_t]
return x
nrow = 6
ncol = 2
ind_M_1 = -2
ind_M_2 = -5
ind_t_1 = 1
ind_t_2 = -1
plt.close()
# --------------------
fig, axs = plt.subplots(6, 2, gridspec_kw={"hspace": 0.5}, figsize=(15, 25))
axs[0, 0].set_title("M={}".format(val_M[ind_M_1]))
axs[0, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)")
axs[0, 1].set_title("M={}".format(val_M[ind_M_1]))
axs[0, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)")
axs[1, 0].set_title("M={}".format(val_M[ind_M_2]))
axs[1, 0].set(xlabel="throttle", ylabel="thrust (x 1e6 N)")
axs[1, 1].set_title("M={}".format(val_M[ind_M_2]))
axs[1, 1].set(xlabel="throttle", ylabel="SFC (x 1e-3 N/N/s)")
# --------------------
axs[2, 0].set_title("throttle={}".format(val_t[ind_t_1]))
axs[2, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)")
axs[2, 1].set_title("throttle={}".format(val_t[ind_t_1]))
axs[2, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)")
axs[3, 0].set_title("throttle={}".format(val_t[ind_t_2]))
axs[3, 0].set(xlabel="altitude (km)", ylabel="thrust (x 1e6 N)")
axs[3, 1].set_title("throttle={}".format(val_t[ind_t_2]))
axs[3, 1].set(xlabel="altitude (km)", ylabel="SFC (x 1e-3 N/N/s)")
# --------------------
axs[4, 0].set_title("throttle={}".format(val_t[ind_t_1]))
axs[4, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)")
axs[4, 1].set_title("throttle={}".format(val_t[ind_t_1]))
axs[4, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)")
axs[5, 0].set_title("throttle={}".format(val_t[ind_t_2]))
axs[5, 0].set(xlabel="Mach number", ylabel="thrust (x 1e6 N)")
axs[5, 1].set_title("throttle={}".format(val_t[ind_t_2]))
axs[5, 1].set(xlabel="Mach number", ylabel="SFC (x 1e-3 N/N/s)")
ind_h_list = [0, 4, 7, 10]
ind_h_list = [4, 7, 10]
ind_M_list = [0, 3, 6, 11]
ind_M_list = [3, 6, 11]
colors = ["b", "r", "g", "c", "m"]
# -----------------------------------------------------------------------------
# Throttle slices
for k, ind_h in enumerate(ind_h_list):
ind_M = ind_M_1
x = get_x(ind_M=ind_M, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h)
axs[0, 0].plot(xt_, yt_, "o" + colors[k])
axs[0, 0].plot(lins_t, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h)
axs[0, 1].plot(xt_, yt_, "o" + colors[k])
axs[0, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k])
ind_M = ind_M_2
x = get_x(ind_M=ind_M, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_h=ind_h)
axs[1, 0].plot(xt_, yt_, "o" + colors[k])
axs[1, 0].plot(lins_t, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_h=ind_h)
axs[1, 1].plot(xt_, yt_, "o" + colors[k])
axs[1, 1].plot(lins_t, y[:, 1] / 1e-4, colors[k])
# -----------------------------------------------------------------------------
# Altitude slices
for k, ind_M in enumerate(ind_M_list):
ind_t = ind_t_1
x = get_x(ind_M=ind_M, ind_t=ind_t)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t)
axs[2, 0].plot(xt_, yt_, "o" + colors[k])
axs[2, 0].plot(lins_h, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t)
axs[2, 1].plot(xt_, yt_, "o" + colors[k])
axs[2, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k])
ind_t = ind_t_2
x = get_x(ind_M=ind_M, ind_t=ind_t)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_M=ind_M, ind_t=ind_t)
axs[3, 0].plot(xt_, yt_, "o" + colors[k])
axs[3, 0].plot(lins_h, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_M=ind_M, ind_t=ind_t)
axs[3, 1].plot(xt_, yt_, "o" + colors[k])
axs[3, 1].plot(lins_h, y[:, 1] / 1e-4, colors[k])
# -----------------------------------------------------------------------------
# Mach number slices
for k, ind_h in enumerate(ind_h_list):
ind_t = ind_t_1
x = get_x(ind_t=ind_t, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t)
axs[4, 0].plot(xt_, yt_, "o" + colors[k])
axs[4, 0].plot(lins_M, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t)
axs[4, 1].plot(xt_, yt_, "o" + colors[k])
axs[4, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k])
ind_t = ind_t_2
x = get_x(ind_t=ind_t, ind_h=ind_h)
y = interp.predict_values(x)
xt_, yt_ = get_pts(xt, yt, 0, ind_h=ind_h, ind_t=ind_t)
axs[5, 0].plot(xt_, yt_, "o" + colors[k])
axs[5, 0].plot(lins_M, y[:, 0] / 1e6, colors[k])
xt_, yt_ = get_pts(xt, yt, 1, ind_h=ind_h, ind_t=ind_t)
axs[5, 1].plot(xt_, yt_, "o" + colors[k])
axs[5, 1].plot(lins_M, y[:, 1] / 1e-4, colors[k])
# -----------------------------------------------------------------------------
for k in range(2):
legend_entries = []
for ind_h in ind_h_list:
legend_entries.append("h={}".format(val_h[ind_h]))
legend_entries.append("")
axs[k, 0].legend(legend_entries)
axs[k, 1].legend(legend_entries)
axs[k + 4, 0].legend(legend_entries)
axs[k + 4, 1].legend(legend_entries)
legend_entries = []
for ind_M in ind_M_list:
legend_entries.append("M={}".format(val_M[ind_M]))
legend_entries.append("")
axs[k + 2, 0].legend(legend_entries)
axs[k + 2, 1].legend(legend_entries)
plt.show()
|
import os
import click
from .. import multiprocess
def fetch_config(ctx):
""" Fetch `config_file` from context
"""
config = ctx.obj and ctx.obj.get('config', None)
if not config:
_opts = dict((o.name, o) for o in ctx.parent.command.params)
raise click.BadParameter('Must specify configuration file',
ctx=ctx.parent, param=_opts['config_file'])
return config
def callback_dict(ctx, param, value):
""" Call back for dict style arguments (e.g., KEY=VALUE)
"""
# TODO: support KEY(operator)VALUE where operator in > >= = <= <
if not value:
return {}
else:
d = {}
for val in value:
if '=' not in val:
raise click.BadParameter(
'Must specify {p} as KEY=VALUE ({v} given)'.format(
p=param, v=value))
else:
k, v = val.split('=', 1)
d[k] = v
return d
def callback_db_table(ctx, param, value):
""" Return database table class
"""
from ..db import TABLES
if value not in TABLES.keys():
raise click.BadParameter('Unknown table ({}). Available tables are: {}'
.format(value, TABLES.keys()), param=param)
return TABLES[value]
def callback_from_stdin(ctx, param, value):
""" If `value` is empty, try to parse this arg from `stdin`
"""
if not value:
stdin = click.get_text_stream('stdin')
if not stdin:
_type = ('argument' if isinstance(param, click.core.Argument)
else 'option')
raise click.BadParameter(
'Must specify parameter via stdin or as {}'.format(_type),
param=param)
value = (v.strip('\n ') for v in stdin if v)
return param.process_value(ctx, value)
return value
arg_config = click.argument(
'config',
type=click.Path(readable=True, resolve_path=True, dir_okay=False))
arg_sources = click.argument(
'sources',
nargs=-1,
type=click.Path(readable=True, resolve_path=True, dir_okay=True))
def arg_db_table(f):
from ..db import TABLES
return click.argument(
'table',
type=click.Choice(TABLES.keys()),
callback=callback_db_table
)(f)
opt_config_file = click.option(
'--config', '-C', 'config_file',
default=lambda: os.environ.get('TILEZILLA_CONFIG', None),
allow_from_autoenv=True,
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
help='Configuration file')
opt_db_filter = click.option(
'--filter', 'filter_',
type=str,
multiple=True,
help='Filter TABLE by [ATTR][OPERATOR][VALUE...]'
)
opt_db_distinct = click.option(
'--distinct', type=str, default=None, show_default=True,
help='Select distinct entries of column specified'
)
opt_db_groupby = click.option(
'--group_by', type=str, default=None, show_default=True,
help='Group entries by column specified'
)
opt_db_select = click.option(
'--select', type=str, default=None, show_default=True, multiple=True,
help='Print (select) one or more columns'
)
opt_creation_options = click.option(
'--co',
'creation_options',
metavar='OPTION=VALUE',
multiple=True,
default=None,
show_default=True,
callback=callback_dict,
help='Driver creation options')
opt_format = click.option(
'-of', '--format', 'driver',
default='GTiff',
show_default=True,
help='Output format driver')
opt_nodata = click.option(
'--ndv',
type=float,
default=None,
show_default=True,
help='Override source nodata value')
opt_overwrite = click.option(
'--overwrite',
is_flag=True,
help='Overwrite destination file')
def opt_multiprocess_method(f):
def _callback(ctx, param, value):
return multiprocess.get_executor(value, ctx.params['njob'])
return click.option(
'--parallel-executor', '-pe',
'executor',
type=click.Choice(multiprocess.MULTIPROC_METHODS),
default='serial',
callback=_callback,
help='Method of parallel execution')(f)
opt_multiprocess_njob = click.option(
'-j', '--njob',
type=int,
default=1,
is_eager=True,
help='Number of jobs for parallel execution'
)
|
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
import six
from bokeh.document import Document
import bokeh.application.handlers.code as bahc
script_adds_two_roots = """
from bokeh.io import curdoc
from bokeh.model import Model
from bokeh.core.properties import Int, Instance
class AnotherModelInTestScript(Model):
bar = Int(1)
class SomeModelInTestScript(Model):
foo = Int(2)
child = Instance(Model)
curdoc().add_root(AnotherModelInTestScript())
curdoc().add_root(SomeModelInTestScript())
"""
class TestCodeHandler(object):
# Public methods ----------------------------------------------------------
def test_empty_script(self):
doc = Document()
handler = bahc.CodeHandler(source="# This script does nothing", filename="path/to/test_filename")
handler.modify_document(doc)
if handler.failed:
raise RuntimeError(handler.error)
assert not doc.roots
@pytest.mark.skipif(six.PY3, reason="this test doesn't have a Python 3 equivalent")
def test_exec_and___future___flags(self):
doc = Document()
handler = bahc.CodeHandler(source="exec(\"print \\\"XXX\\\"\")", filename="path/to/test_filename")
handler.modify_document(doc)
if handler.failed:
raise RuntimeError(handler.error)
assert not doc.roots
def test_script_adds_roots(self):
doc = Document()
handler = bahc.CodeHandler(source=script_adds_two_roots, filename="path/to/test_filename")
handler.modify_document(doc)
if handler.failed:
raise RuntimeError(handler.error)
assert len(doc.roots) == 2
def test_script_bad_syntax(self):
doc = Document()
handler = bahc.CodeHandler(source="This is a syntax error", filename="path/to/test_filename")
handler.modify_document(doc)
assert handler.error is not None
assert 'Invalid syntax' in handler.error
def test_script_runtime_error(self):
doc = Document()
handler = bahc.CodeHandler(source="raise RuntimeError('nope')", filename="path/to/test_filename")
handler.modify_document(doc)
assert handler.error is not None
assert 'nope' in handler.error
def test_script_sys_path(self):
doc = Document()
handler = bahc.CodeHandler(source="""import sys; raise RuntimeError("path: '%s'" % sys.path[0])""", filename="path/to/test_filename")
handler.modify_document(doc)
assert handler.error is not None
assert "path: 'path/to'" in handler.error
def test_script_argv(self):
doc = Document()
handler = bahc.CodeHandler(source="""import sys; raise RuntimeError("argv: %r" % sys.argv)""", filename=str("path/to/test_filename")) # str needed for py2.7
handler.modify_document(doc)
assert handler.error is not None
assert "argv: ['test_filename']" in handler.error
doc = Document()
handler = bahc.CodeHandler(source="""import sys; raise RuntimeError("argv: %r" % sys.argv)""",
filename=str("path/to/test_filename"), argv=[10, 20, 30]) # str needed for py2.7
handler.modify_document(doc)
assert handler.error is not None
assert "argv: ['test_filename', 10, 20, 30]" in handler.error
def test_safe_to_fork(self):
doc = Document()
handler = bahc.CodeHandler(source="# This script does nothing", filename="path/to/test_filename")
assert handler.safe_to_fork
handler.modify_document(doc)
if handler.failed:
raise RuntimeError(handler.error)
assert not handler.safe_to_fork
|
"""
A top level harness to run all unit-tests in a specific engine build.
"""
import argparse
import glob
import os
import re
import subprocess
import sys
import time
buildroot_dir = os.path.abspath(os.path.join(os.path.realpath(__file__), '..', '..', '..'))
out_dir = os.path.join(buildroot_dir, 'out')
golden_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'resources')
fonts_dir = os.path.join(buildroot_dir, 'flutter', 'third_party', 'txt', 'third_party', 'fonts')
roboto_font_path = os.path.join(fonts_dir, 'Roboto-Regular.ttf')
font_subset_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'font-subset')
fml_unittests_filter = '--gtest_filter=-*TimeSensitiveTest*'
def PrintDivider(char='='):
print('\n')
for _ in range(4):
print(''.join([char for _ in range(80)]))
print('\n')
def RunCmd(cmd, forbidden_output=[], expect_failure=False, env=None, **kwargs):
command_string = ' '.join(cmd)
PrintDivider('>')
print('Running command "%s"' % command_string)
start_time = time.time()
stdout_pipe = sys.stdout if not forbidden_output else subprocess.PIPE
stderr_pipe = sys.stderr if not forbidden_output else subprocess.PIPE
process = subprocess.Popen(cmd, stdout=stdout_pipe, stderr=stderr_pipe, env=env, universal_newlines=True, **kwargs)
stdout, stderr = process.communicate()
end_time = time.time()
if process.returncode != 0 and not expect_failure:
PrintDivider('!')
print('Failed Command:\n\n%s\n\nExit Code: %d\n' % (command_string, process.returncode))
if stdout:
print('STDOUT: \n%s' % stdout)
if stderr:
print('STDERR: \n%s' % stderr)
PrintDivider('!')
raise Exception('Command "%s" exited with code %d.' % (command_string, process.returncode))
if stdout or stderr:
print(stdout)
print(stderr)
for forbidden_string in forbidden_output:
if (stdout and forbidden_string in stdout) or (stderr and forbidden_string in stderr):
raise Exception('command "%s" contained forbidden string %s' % (command_string, forbidden_string))
PrintDivider('<')
print('Command run successfully in %.2f seconds: %s' % (end_time - start_time, command_string))
def IsMac():
return sys.platform == 'darwin'
def IsLinux():
return sys.platform.startswith('linux')
def IsWindows():
return sys.platform.startswith(('cygwin', 'win'))
def ExecutableSuffix():
return '.exe' if IsWindows() else ''
def FindExecutablePath(path):
if os.path.exists(path):
return path
if IsWindows():
exe_path = path + '.exe'
if os.path.exists(exe_path):
return exe_path
bat_path = path + '.bat'
if os.path.exists(bat_path):
return bat_path
raise Exception('Executable %s does not exist!' % path)
def RunEngineExecutable(build_dir, executable_name, filter, flags=[],
cwd=buildroot_dir, forbidden_output=[], expect_failure=False, coverage=False):
if filter is not None and executable_name not in filter:
print('Skipping %s due to filter.' % executable_name)
return
unstripped_exe = os.path.join(build_dir, 'exe.unstripped', executable_name)
# We cannot run the unstripped binaries directly when coverage is enabled.
if IsLinux() and os.path.exists(unstripped_exe) and not coverage:
# Use unstripped executables in order to get better symbolized crash
# stack traces on Linux.
executable = unstripped_exe
# Some tests depend on the EGL/GLES libraries placed in the build directory.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = os.path.join(build_dir, 'lib.unstripped')
else:
executable = FindExecutablePath(os.path.join(build_dir, executable_name))
env = None
coverage_script = os.path.join(buildroot_dir, 'flutter', 'build', 'generate_coverage.py')
print('Running %s in %s' % (executable_name, cwd))
if coverage:
coverage_flags = ['-t', executable, '-o', os.path.join(build_dir, 'coverage', executable_name), '-f', 'html']
updated_flags = ['--args=%s' % ' '.join(flags)]
test_command = [ coverage_script ] + coverage_flags + updated_flags
else:
test_command = [ executable ] + flags
if not env:
env = os.environ.copy()
env['FLUTTER_BUILD_DIRECTORY'] = build_dir
try:
RunCmd(test_command, cwd=cwd, forbidden_output=forbidden_output, expect_failure=expect_failure, env=env)
except:
# The LUCI environment may provide a variable containing a directory path
# for additional output files that will be uploaded to cloud storage.
# If the command generated a core dump, then run a script to analyze
# the dump and output a report that will be uploaded.
luci_test_outputs_path = os.environ.get('FLUTTER_TEST_OUTPUTS_DIR')
core_path = os.path.join(cwd, 'core')
if luci_test_outputs_path and os.path.exists(core_path) and os.path.exists(unstripped_exe):
dump_path = os.path.join(luci_test_outputs_path, '%s_%s.txt' % (executable_name, sys.platform))
print('Writing core dump analysis to %s' % dump_path)
subprocess.call([
os.path.join(buildroot_dir, 'flutter', 'testing', 'analyze_core_dump.sh'),
buildroot_dir, unstripped_exe, core_path, dump_path,
])
os.unlink(core_path)
raise
def RunCCTests(build_dir, filter, coverage, capture_core_dump):
print("Running Engine Unit-tests.")
if capture_core_dump and IsLinux():
import resource
resource.setrlimit(resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
shuffle_flags = [
"--gtest_repeat=2",
"--gtest_shuffle",
]
RunEngineExecutable(build_dir, 'client_wrapper_glfw_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'common_cpp_core_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'common_cpp_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'client_wrapper_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'embedder_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'embedder_proctable_unittests', filter, shuffle_flags, coverage=coverage)
if IsWindows():
RunEngineExecutable(build_dir, 'flutter_windows_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'client_wrapper_windows_unittests', filter, shuffle_flags, coverage=coverage)
flow_flags = ['--gtest_filter=-PerformanceOverlayLayer.Gold']
if IsLinux():
flow_flags = [
'--golden-dir=%s' % golden_dir,
'--font-file=%s' % roboto_font_path,
]
RunEngineExecutable(build_dir, 'flow_unittests', filter, flow_flags + shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'fml_unittests', filter, [ fml_unittests_filter ] + shuffle_flags)
RunEngineExecutable(build_dir, 'runtime_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'tonic_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'no_dart_plugin_registrant_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'dart_plugin_registrant_unittests', filter, shuffle_flags, coverage=coverage)
if not IsWindows():
# https://github.com/flutter/flutter/issues/36295
RunEngineExecutable(build_dir, 'shell_unittests', filter, shuffle_flags, coverage=coverage)
# https://github.com/google/googletest/issues/2490
RunEngineExecutable(build_dir, 'android_external_view_embedder_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'jni_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'platform_view_android_delegate_unittests', filter, shuffle_flags, coverage=coverage)
# The image release unit test can take a while on slow machines.
RunEngineExecutable(build_dir, 'ui_unittests', filter, shuffle_flags + ['--timeout=90'], coverage=coverage)
RunEngineExecutable(build_dir, 'testing_unittests', filter, shuffle_flags, coverage=coverage)
# The accessibility library only supports Mac and Windows.
if IsMac() or IsWindows():
RunEngineExecutable(build_dir, 'accessibility_unittests', filter, shuffle_flags, coverage=coverage)
# These unit-tests are Objective-C and can only run on Darwin.
if IsMac():
RunEngineExecutable(build_dir, 'flutter_channels_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'flutter_desktop_darwin_unittests', filter, shuffle_flags, coverage=coverage)
# https://github.com/flutter/flutter/issues/36296
if IsLinux():
icu_flags = ['--icu-data-file-path=%s' % os.path.join(build_dir, 'icudtl.dat')]
RunEngineExecutable(build_dir, 'txt_unittests', filter, icu_flags + shuffle_flags, coverage=coverage)
if IsLinux():
RunEngineExecutable(build_dir, 'flutter_linux_unittests', filter, shuffle_flags, coverage=coverage)
RunEngineExecutable(build_dir, 'flutter_glfw_unittests', filter, shuffle_flags, coverage=coverage)
# Impeller tests are only supported on macOS for now.
if IsMac():
RunEngineExecutable(build_dir, 'impeller_unittests', filter, shuffle_flags, coverage=coverage)
def RunEngineBenchmarks(build_dir, filter):
print("Running Engine Benchmarks.")
icu_flags = ['--icu-data-file-path=%s' % os.path.join(build_dir, 'icudtl.dat')]
RunEngineExecutable(build_dir, 'shell_benchmarks', filter, icu_flags)
RunEngineExecutable(build_dir, 'fml_benchmarks', filter, icu_flags)
RunEngineExecutable(build_dir, 'ui_benchmarks', filter, icu_flags)
if IsLinux():
RunEngineExecutable(build_dir, 'txt_benchmarks', filter, icu_flags)
def RunDartTest(build_dir, test_packages, dart_file, verbose_dart_snapshot, multithreaded,
enable_observatory=False, expect_failure=False):
kernel_file_name = os.path.basename(dart_file) + '.dill'
kernel_file_output = os.path.join(build_dir, 'gen', kernel_file_name)
error_message = "%s doesn't exist. Please run the build that populates %s" % (
kernel_file_output, build_dir)
assert os.path.isfile(kernel_file_output), error_message
command_args = []
if not enable_observatory:
command_args.append('--disable-observatory')
dart_file_contents = open(dart_file, 'r')
custom_options = re.findall("// FlutterTesterOptions=(.*)", dart_file_contents.read())
dart_file_contents.close()
command_args.extend(custom_options)
command_args += [
'--use-test-fonts',
'--icu-data-file-path=%s' % os.path.join(build_dir, 'icudtl.dat'),
kernel_file_output,
]
if multithreaded:
threading = 'multithreaded'
command_args.insert(0, '--force-multithreading')
else:
threading = 'single-threaded'
print("Running test '%s' using 'flutter_tester' (%s)" % (kernel_file_name, threading))
forbidden_output = [] if 'unopt' in build_dir or expect_failure else ['[ERROR']
RunEngineExecutable(build_dir, 'flutter_tester', None, command_args,
forbidden_output=forbidden_output, expect_failure=expect_failure)
def EnsureDebugUnoptSkyPackagesAreBuilt():
variant_out_dir = os.path.join(out_dir, 'host_debug_unopt')
message = []
message.append('gn --runtime-mode debug --unopt --no-lto')
message.append('ninja -C %s flutter/sky/packages' % variant_out_dir)
final_message = '%s doesn\'t exist. Please run the following commands: \n%s' % (
variant_out_dir, '\n'.join(message))
assert os.path.exists(variant_out_dir), final_message
def EnsureIosTestsAreBuilt(ios_out_dir):
"""Builds the engine variant and the test dylib containing the XCTests"""
tmp_out_dir = os.path.join(out_dir, ios_out_dir)
ios_test_lib = os.path.join(tmp_out_dir, 'libios_test_flutter.dylib')
message = []
message.append('gn --ios --unoptimized --runtime-mode=debug --no-lto --simulator')
message.append('autoninja -C %s ios_test_flutter' % ios_out_dir)
final_message = '%s or %s doesn\'t exist. Please run the following commands: \n%s' % (
ios_out_dir, ios_test_lib, '\n'.join(message))
assert os.path.exists(tmp_out_dir) and os.path.exists(ios_test_lib), final_message
def AssertExpectedXcodeVersion():
"""Checks that the user has a version of Xcode installed"""
version_output = subprocess.check_output(['xcodebuild', '-version'])
match = re.match(b"Xcode (\d+)", version_output)
message = "Xcode must be installed to run the iOS embedding unit tests"
assert match, message
def JavaHome():
script_path = os.path.dirname(os.path.realpath(__file__))
if IsMac():
return os.path.join(script_path, '..', '..', 'third_party', 'java', 'openjdk', 'Contents', 'Home')
else:
return os.path.join(script_path, '..', '..', 'third_party', 'java', 'openjdk')
def JavaBin():
return os.path.join(JavaHome(), 'bin', 'java.exe' if IsWindows() else 'java')
def RunJavaTests(filter, android_variant='android_debug_unopt'):
"""Runs the Java JUnit unit tests for the Android embedding"""
test_runner_dir = os.path.join(buildroot_dir, 'flutter', 'shell', 'platform', 'android', 'test_runner')
gradle_bin = os.path.join(buildroot_dir, 'gradle', 'bin', 'gradle.bat' if IsWindows() else 'gradle')
flutter_jar = os.path.join(out_dir, android_variant, 'flutter.jar')
android_home = os.path.join(buildroot_dir, 'third_party', 'android_tools', 'sdk')
build_dir = os.path.join(out_dir, android_variant, 'robolectric_tests', 'build')
gradle_cache_dir = os.path.join(out_dir, android_variant, 'robolectric_tests', '.gradle')
test_class = filter if filter else '*'
command = [
gradle_bin,
'-Pflutter_jar=%s' % flutter_jar,
'-Pbuild_dir=%s' % build_dir,
'testDebugUnitTest',
'--tests=%s' % test_class,
'--rerun-tasks',
'--no-daemon',
'--project-cache-dir=%s' % gradle_cache_dir,
'--gradle-user-home=%s' % gradle_cache_dir,
]
env = dict(os.environ, ANDROID_HOME=android_home, JAVA_HOME=JavaHome())
RunCmd(command, cwd=test_runner_dir, env=env)
def RunAndroidTests(android_variant='android_debug_unopt', adb_path=None):
test_runner_name = 'flutter_shell_native_unittests'
tests_path = os.path.join(out_dir, android_variant, test_runner_name)
remote_path = '/data/local/tmp'
remote_tests_path = os.path.join(remote_path, test_runner_name)
if adb_path == None:
adb_path = 'adb'
RunCmd([adb_path, 'push', tests_path, remote_path], cwd=buildroot_dir)
RunCmd([adb_path, 'shell', remote_tests_path])
systrace_test = os.path.join(buildroot_dir, 'flutter', 'testing',
'android_systrace_test.py')
scenario_apk = os.path.join(out_dir, android_variant, 'firebase_apks',
'scenario_app.apk')
RunCmd([systrace_test, '--adb-path', adb_path, '--apk-path', scenario_apk,
'--package-name', 'dev.flutter.scenarios',
'--activity-name', '.TextPlatformViewActivity'])
def RunObjcTests(ios_variant='ios_debug_sim_unopt', test_filter=None):
"""Runs Objective-C XCTest unit tests for the iOS embedding"""
AssertExpectedXcodeVersion()
ios_out_dir = os.path.join(out_dir, ios_variant)
EnsureIosTestsAreBuilt(ios_out_dir)
ios_unit_test_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'ios', 'IosUnitTests')
# Avoid using xcpretty unless the following can be addressed:
# - Make sure all relevant failure output is printed on a failure.
# - Make sure that a failing exit code is set for CI.
# See https://github.com/flutter/flutter/issues/63742
command = [
'xcodebuild '
'-sdk iphonesimulator '
'-scheme IosUnitTests '
"-destination platform='iOS Simulator,name=iPhone 11' "
'test '
'FLUTTER_ENGINE=' + ios_variant
]
if test_filter != None:
command[0] = command[0] + " -only-testing:%s" % test_filter
RunCmd(command, cwd=ios_unit_test_dir, shell=True)
def RunDartTests(build_dir, filter, verbose_dart_snapshot):
dart_tests_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'dart',)
# This one is a bit messy. The pubspec.yaml at flutter/testing/dart/pubspec.yaml
# has dependencies that are hardcoded to point to the sky packages at host_debug_unopt/
# Before running Dart tests, make sure to run just that target (NOT the whole engine)
EnsureDebugUnoptSkyPackagesAreBuilt()
# Now that we have the Sky packages at the hardcoded location, run `dart pub get`.
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=['pub', 'get', '--offline'],
cwd=dart_tests_dir,
)
dart_observatory_tests = glob.glob('%s/observatory/*_test.dart' % dart_tests_dir)
dart_tests = glob.glob('%s/*_test.dart' % dart_tests_dir)
test_packages = os.path.join(dart_tests_dir, '.packages')
if 'release' not in build_dir:
for dart_test_file in dart_observatory_tests:
if filter is not None and os.path.basename(dart_test_file) not in filter:
print("Skipping %s due to filter." % dart_test_file)
else:
print("Testing dart file %s with observatory enabled" % dart_test_file)
RunDartTest(build_dir, test_packages, dart_test_file, verbose_dart_snapshot, True, True)
RunDartTest(build_dir, test_packages, dart_test_file, verbose_dart_snapshot, False, True)
for dart_test_file in dart_tests:
if filter is not None and os.path.basename(dart_test_file) not in filter:
print("Skipping %s due to filter." % dart_test_file)
else:
print("Testing dart file %s" % dart_test_file)
RunDartTest(build_dir, test_packages, dart_test_file, verbose_dart_snapshot, True)
RunDartTest(build_dir, test_packages, dart_test_file, verbose_dart_snapshot, False)
def RunDartSmokeTest(build_dir, verbose_dart_snapshot):
smoke_test = os.path.join(buildroot_dir, "flutter", "testing", "smoke_test_failure", "fail_test.dart")
test_packages = os.path.join(buildroot_dir, "flutter", "testing", "smoke_test_failure", ".packages")
RunDartTest(build_dir, test_packages, smoke_test, verbose_dart_snapshot, True, expect_failure=True)
RunDartTest(build_dir, test_packages, smoke_test, verbose_dart_snapshot, False, expect_failure=True)
def RunFrontEndServerTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'flutter_frontend_server')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev',
dart_test_file,
build_dir,
os.path.join(build_dir, 'gen', 'frontend_server.dart.snapshot'),
os.path.join(build_dir, 'flutter_patched_sdk')]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir)
def RunConstFinderTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'const_finder', 'test')
opts = [
'--disable-dart-dev',
os.path.join(test_dir, 'const_finder_test.dart'),
os.path.join(build_dir, 'gen', 'frontend_server.dart.snapshot'),
os.path.join(build_dir, 'flutter_patched_sdk')]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir)
def RunLitetestTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'litetest')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev',
dart_test_file]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir)
def RunBenchmarkTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'testing', 'benchmark')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev',
dart_test_file]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir)
def RunGithooksTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'githooks')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev',
dart_test_file]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir)
def RunClangTidyTests(build_dir):
test_dir = os.path.join(buildroot_dir, 'flutter', 'tools', 'clang_tidy')
dart_tests = glob.glob('%s/test/*_test.dart' % test_dir)
for dart_test_file in dart_tests:
opts = [
'--disable-dart-dev',
dart_test_file,
os.path.join(build_dir, 'compile_commands.json'),
os.path.join(buildroot_dir, 'flutter')]
RunEngineExecutable(
build_dir,
os.path.join('dart-sdk', 'bin', 'dart'),
None,
flags=opts,
cwd=test_dir)
def main():
parser = argparse.ArgumentParser()
all_types = ['engine', 'dart', 'benchmarks', 'java', 'android', 'objc', 'font-subset']
parser.add_argument('--variant', dest='variant', action='store',
default='host_debug_unopt', help='The engine build variant to run the tests for.')
parser.add_argument('--type', type=str, default='all', help='A list of test types, default is "all" (equivalent to "%s")' % (','.join(all_types)))
parser.add_argument('--engine-filter', type=str, default='',
help='A list of engine test executables to run.')
parser.add_argument('--dart-filter', type=str, default='',
help='A list of Dart test scripts to run.')
parser.add_argument('--java-filter', type=str, default='',
help='A single Java test class to run (example: "io.flutter.SmokeTest")')
parser.add_argument('--android-variant', dest='android_variant', action='store',
default='android_debug_unopt',
help='The engine build variant to run java or android tests for')
parser.add_argument('--ios-variant', dest='ios_variant', action='store',
default='ios_debug_sim_unopt',
help='The engine build variant to run objective-c tests for')
parser.add_argument('--verbose-dart-snapshot', dest='verbose_dart_snapshot', action='store_true',
default=False, help='Show extra dart snapshot logging.')
parser.add_argument('--objc-filter', type=str, default=None,
help='Filter parameter for which objc tests to run (example: "IosUnitTestsTests/SemanticsObjectTest/testShouldTriggerAnnouncement")')
parser.add_argument('--coverage', action='store_true', default=None,
help='Generate coverage reports for each unit test framework run.')
parser.add_argument('--engine-capture-core-dump', dest='engine_capture_core_dump', action='store_true',
default=False, help='Capture core dumps from crashes of engine tests.')
parser.add_argument('--use-sanitizer-suppressions', dest='sanitizer_suppressions', action='store_true',
default=False, help='Provide the sanitizer suppressions lists to the via environment to the tests.')
parser.add_argument('--adb-path', dest='adb_path', action='store',
default=None, help='Provide the path of adb used for android tests. By default it looks on $PATH.')
args = parser.parse_args()
if args.type == 'all':
types = all_types
else:
types = args.type.split(',')
build_dir = os.path.join(out_dir, args.variant)
if args.type != 'java' and args.type != 'android':
assert os.path.exists(build_dir), 'Build variant directory %s does not exist!' % build_dir
if args.sanitizer_suppressions:
assert IsLinux() or IsMac(), "The sanitizer suppressions flag is only supported on Linux and Mac."
file_dir = os.path.dirname(os.path.abspath(__file__))
command = [
"env", "-i", "bash",
"-c", "source {}/sanitizer_suppressions.sh >/dev/null && env".format(file_dir)
]
process = subprocess.Popen(command, stdout=subprocess.PIPE)
for line in process.stdout:
key, _, value = line.decode('ascii').strip().partition("=")
os.environ[key] = value
process.communicate() # Avoid pipe deadlock while waiting for termination.
engine_filter = args.engine_filter.split(',') if args.engine_filter else None
if 'engine' in types:
RunCCTests(build_dir, engine_filter, args.coverage, args.engine_capture_core_dump)
if 'dart' in types:
assert not IsWindows(), "Dart tests can't be run on windows. https://github.com/flutter/flutter/issues/36301."
dart_filter = args.dart_filter.split(',') if args.dart_filter else None
RunDartSmokeTest(build_dir, args.verbose_dart_snapshot)
RunLitetestTests(build_dir)
RunGithooksTests(build_dir)
RunClangTidyTests(build_dir)
RunDartTests(build_dir, dart_filter, args.verbose_dart_snapshot)
RunConstFinderTests(build_dir)
RunFrontEndServerTests(build_dir)
if 'java' in types:
assert not IsWindows(), "Android engine files can't be compiled on Windows."
java_filter = args.java_filter
if ',' in java_filter or '*' in java_filter:
print('Can only filter JUnit4 tests by single entire class name, eg "io.flutter.SmokeTest". Ignoring filter=' + java_filter)
java_filter = None
RunJavaTests(java_filter, args.android_variant)
if 'android' in types:
assert not IsWindows(), "Android engine files can't be compiled on Windows."
RunAndroidTests(args.android_variant, args.adb_path)
if 'objc' in types:
assert IsMac(), "iOS embedding tests can only be run on macOS."
RunObjcTests(args.ios_variant, args.objc_filter)
# https://github.com/flutter/flutter/issues/36300
if 'benchmarks' in types and not IsWindows():
RunBenchmarkTests(build_dir)
RunEngineBenchmarks(build_dir, engine_filter)
variants_to_skip = ['host_release', 'host_profile']
if ('engine' in types or 'font-subset' in types) and args.variant not in variants_to_skip:
RunCmd(['python', 'test.py'], cwd=font_subset_dir)
if __name__ == '__main__':
sys.exit(main())
|
import sys
import exceptions
import vtk
import array
from vtk.test import Testing
class TestDataEncoder(Testing.vtkTest):
def testEncodings(self):
# Render something
cylinder = vtk.vtkCylinderSource()
cylinder.SetResolution(8)
cylinderMapper = vtk.vtkPolyDataMapper()
cylinderMapper.SetInputConnection(cylinder.GetOutputPort())
cylinderActor = vtk.vtkActor()
cylinderActor.SetMapper(cylinderMapper)
cylinderActor.RotateX(30.0)
cylinderActor.RotateY(-45.0)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
ren.AddActor(cylinderActor)
renWin.SetSize(200, 200)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.5)
renWin.Render()
# Get a vtkImageData with the rendered output
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.SetShouldRerender(1)
w2if.SetReadFrontBuffer(0)
w2if.Update()
imgData = w2if.GetOutput()
# Use vtkDataEncoder to convert the image to PNG format and Base64 encode it
encoder = vtk.vtkDataEncoder()
base64String = encoder.EncodeAsBase64Png(imgData)
# Now Base64 decode the string back to PNG image data bytes
outputBuffer = bytearray(120000)
inputArray = array.array('B', base64String)
utils = vtk.vtkIOCore.vtkBase64Utilities()
actualLength = utils.Decode(inputArray, 120000, outputBuffer)
outputArray = bytearray(actualLength)
outputArray[:] = outputBuffer[0:actualLength]
# And write those bytes to the disk as an actual PNG image file
with open('TestDataEncoder.png', 'wb') as fd:
fd.write(outputArray)
# Create a vtkTesting object and specify a baseline image
rtTester = vtk.vtkTesting()
for arg in sys.argv[1:]:
rtTester.AddArgument(arg)
rtTester.AddArgument("-V")
rtTester.AddArgument("TestDataEncoder.png")
# Perform the image comparison test and print out the result.
result = rtTester.RegressionTest("TestDataEncoder.png", 0.0)
if result == 0:
raise Exception("TestDataEncoder failed.")
if __name__ == "__main__":
Testing.main([(TestDataEncoder, 'test')])
|
from geode import *
import ast
def test_list():
x = [1,2,3]
y = list_convert_test(x)
assert type(x)==type(y)
assert x==y
def test_set():
x = set([1,2,3])
y = set_convert_test(x)
assert type(x)==type(y)
def test_dict():
x = {1:'a',2:'b',3:'c'}
y = dict_convert_test(x)
assert type(x)==type(y)
assert x==y
def test_enum():
a = EnumTestA
aa = enum_convert_test(a)
assert a==aa
assert a is aa
assert str(a)=='EnumTestA'
def test_str_repr():
for i in xrange(256):
c = chr(i)
r = str_repr_test(c)
assert ast.literal_eval(r)==c
if c != "'":
assert repr(c)==r
|
import unittest
import sys
sys.path.insert(0, '..')
sys.path.insert(0, '../.libs')
sys.path.insert(0, '../../../build/bindings/python')
from pywsman import *
class TestAddSelector(unittest.TestCase):
def test_add_selector(self):
client = Client( "http://wsman:secret@localhost:5985/wsman" )
assert client is not None
client.transport().set_auth_method(BASIC_AUTH_STR) # Windows winrm needs this
options = ClientOptions()
assert options is not None
options.set_dump_request()
uri_query = "k1=v1,k2=v2,k3=v3,k4=v4"
for selector_str in uri_query.split(","):
(key_asdf, value_asdf) = selector_str.split("=")
assert key_asdf is not None
assert value_asdf is not None
print( "Calling add_selector(%r, %r)" % (key_asdf, value_asdf) )
options.add_selector(str(key_asdf), str(value_asdf))
dummy = client.invoke(options, "http://uri", "method", XmlDoc("dummy doc"))
if __name__ == '__main__':
unittest.main()
|
from django.test import TestCase
import uuid
from dimagi.utils.parsing import json_format_datetime
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.models import CommCareCase
from casexml.apps.case.util import post_case_blocks
from casexml.apps.case.xml import V2
from corehq.apps.domain.shortcuts import create_domain
from corehq.apps.groups.models import Group
from corehq.apps.users.cases import get_wrapped_owner, get_owning_users, reconcile_ownership, get_owner_id
from corehq.apps.users.models import CommCareUser
class CaseUtilsTestCase(TestCase):
def setUp(self):
self.domain = 'test'
def test_get_wrapped_user(self):
user = CommCareUser.create(self.domain, 'wrapped-user-test', 'password')
user.save()
wrapped = get_wrapped_owner(user._id)
self.assertTrue(isinstance(wrapped, CommCareUser))
def test_get_wrapped_group(self):
group = Group(domain=self.domain, name='wrapped-group-test')
group.save()
wrapped = get_wrapped_owner(group._id)
self.assertTrue(isinstance(wrapped, Group))
def test_owned_by_user(self):
user = CommCareUser.create(self.domain, 'owned-user-test', 'password')
user.save()
owners = get_owning_users(user._id)
self.assertEqual(1, len(owners))
self.assertEqual(owners[0]._id, user._id)
self.assertTrue(isinstance(owners[0], CommCareUser))
def test_owned_by_group(self):
ids = []
for i in range(5):
user = CommCareUser.create(self.domain, 'owned-group-test-user-%s' % i, 'password')
user.save()
ids.append(user._id)
group = Group(domain=self.domain, name='owned-group-test-group', users=ids)
group.save()
owners = get_owning_users(group._id)
self.assertEqual(5, len(owners))
ids_back = []
for o in owners:
self.assertTrue(isinstance(o, CommCareUser))
ids_back.append(o._id)
self.assertEqual(set(ids), set(ids_back))
class CaseReconciliationTestCase(TestCase):
def setUp(self):
self.domain = "test-domain"
create_domain(self.domain)
self.user = CommCareUser.create(self.domain, 'reconciliation-test', 'password')
self.user.save()
self.other_user = CommCareUser.create(self.domain, 'reconciliation-test-other', 'password')
self.other_user.save()
def tearDown(self):
self.user.delete()
self.other_user.delete()
def _make_case(self, user_id, owner_id, **kwargs):
id = uuid.uuid4().hex
case_block = CaseBlock(
create=True,
case_id=id,
case_name='Some Name',
case_type='rectest',
user_id=user_id,
owner_id=owner_id,
version=V2,
**kwargs
).as_xml(format_datetime=json_format_datetime)
post_case_blocks([case_block], {'domain': self.domain})
return CommCareCase.get(id)
def testNoChange(self):
# 0. If the case is owned by the user, do nothing.
case = self._make_case(self.user._id, self.user._id)
self.assertEqual(case.owner_id, self.user._id)
reconcile_ownership(case, self.user)
case = CommCareCase.get(case._id)
self.assertEqual(case.owner_id, self.user._id)
def testNoOwner(self):
# 1. If the case has no owner, make the user the owner.
case = self._make_case('', '')
self.assertFalse(case.owner_id)
reconcile_ownership(case, self.user)
case = CommCareCase.get(case._id)
self.assertEqual(case.owner_id, self.user._id)
def testUserToGroup(self):
# 2. If the case has an owner that is a user create a new case sharing group,
# add that user and the new user to the case sharing group make the group the owner.
case = self._make_case(self.other_user._id, self.other_user._id)
self.assertEqual(self.other_user._id, case.owner_id)
reconcile_ownership(case, self.user)
case = CommCareCase.get(case._id)
self.assertNotEqual(self.other_user._id, case.owner_id)
owner = get_wrapped_owner(get_owner_id(case))
self.assertTrue(isinstance(owner, Group))
self.assertTrue(self.other_user._id in owner.users)
self.assertTrue(self.user._id in owner.users)
self.assertTrue(owner.case_sharing)
self.assertFalse(owner.reporting)
def testUserAlreadyInGroup(self):
# 3. If the case has an owner that is a group, and the user is in the group, do nothing.
group = Group(
domain=self.domain,
name='reconciliation test group',
users=[self.other_user._id, self.user._id],
case_sharing=True,
)
group.save()
case = self._make_case(self.other_user._id, group._id)
self.assertEqual(group._id, case.owner_id)
reconcile_ownership(case, self.user)
case = CommCareCase.get(case._id)
self.assertEqual(group._id, case.owner_id)
def testUserAddedToGroup(self):
# 4. If the case has an owner that is a group, and the user is not in the group,
# add the user to the group and the leave the owner untouched.
group = Group(
domain=self.domain,
name='reconciliation test group',
users=[self.other_user._id],
case_sharing=True,
)
group.save()
case = self._make_case(self.other_user._id, group._id)
self.assertEqual(group._id, case.owner_id)
reconcile_ownership(case, self.user)
case = CommCareCase.get(case._id)
self.assertEqual(group._id, case.owner_id)
group = Group.get(group._id)
self.assertTrue(self.user._id in group.users)
def testRecursiveUpdates(self):
parent_case = self._make_case(self.other_user._id, self.other_user._id)
case = self._make_case(self.other_user._id, self.other_user._id,
index={'parent': ('parent-case', parent_case._id)})
subcase1 = self._make_case(self.other_user._id, self.other_user._id,
index={'parent': ('parent-case', case._id)})
subcase2 = self._make_case(self.other_user._id, self.other_user._id,
index={'parent': ('parent-case', case._id)})
subsub1 = self._make_case(self.other_user._id, self.other_user._id,
index={'parent': ('parent-case', subcase1._id)})
subsub2 = self._make_case(self.other_user._id, self.other_user._id,
index={'parent': ('parent-case', subcase1._id)})
cases = [case, subcase1, subcase2, subsub1, subsub2]
for c in cases:
self.assertEqual(self.other_user._id, c.owner_id)
reconcile_ownership(case, self.user, recursive=True)
case = CommCareCase.get(case._id)
owner = get_wrapped_owner(get_owner_id(case))
self.assertTrue(isinstance(owner, Group))
self.assertTrue(self.other_user._id in owner.users)
self.assertTrue(self.user._id in owner.users)
self.assertTrue(owner.case_sharing)
self.assertFalse(owner.reporting)
for c in cases:
c = CommCareCase.get(c._id)
self.assertEqual(owner._id, c.owner_id)
parent_case = CommCareCase.get(parent_case._id)
self.assertEqual(self.other_user._id, parent_case.owner_id)
|
import unittest
import thread_cert
BR = 1
ROUTER = 2
HOST = 3
SMALL_NAT64_PREFIX = "fd00:00:00:01:00:00::/96"
class Nat64SingleBorderRouter(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
BR: {
'name': 'BR',
'allowlist': [ROUTER],
'is_otbr': True,
'version': '1.2',
},
ROUTER: {
'name': 'Router',
'allowlist': [BR],
'version': '1.2',
},
HOST: {
'name': 'Host',
'is_host': True
},
}
def test(self):
br = self.nodes[BR]
router = self.nodes[ROUTER]
host = self.nodes[HOST]
host.start(start_radvd=False)
self.simulator.go(5)
br.start()
self.simulator.go(5)
self.assertEqual('leader', br.get_state())
router.start()
self.simulator.go(5)
self.assertEqual('router', router.get_state())
#
# Case 1. Border router advertises its local NAT64 prefix.
#
self.simulator.go(5)
local_nat64_prefix = br.get_br_nat64_prefix()
self.assertEqual(len(br.get_netdata_nat64_prefix()), 1)
nat64_prefix = br.get_netdata_nat64_prefix()[0]
self.assertEqual(nat64_prefix, local_nat64_prefix)
#
# Case 2.
# User adds a smaller NAT64 prefix and the local prefix is withdrawn.
# User removes the smaller NAT64 prefix and the local prefix is re-added.
#
br.add_route(SMALL_NAT64_PREFIX, stable=False, nat64=True)
br.register_netdata()
self.simulator.go(5)
self.assertEqual(len(br.get_netdata_nat64_prefix()), 1)
self.assertNotEqual(local_nat64_prefix, br.get_netdata_nat64_prefix()[0])
br.remove_route(SMALL_NAT64_PREFIX)
br.register_netdata()
self.simulator.go(5)
self.assertEqual(len(br.get_netdata_nat64_prefix()), 1)
self.assertEqual(local_nat64_prefix, br.get_netdata_nat64_prefix()[0])
#
# Case 3. Disable and re-enable border routing on the border router.
#
br.disable_br()
self.simulator.go(5)
# NAT64 prefix is withdrawn from Network Data.
self.assertEqual(len(br.get_netdata_nat64_prefix()), 0)
br.enable_br()
self.simulator.go(5)
# Same NAT64 prefix is advertised to Network Data.
self.assertEqual(len(br.get_netdata_nat64_prefix()), 1)
self.assertEqual(nat64_prefix, br.get_netdata_nat64_prefix()[0])
#
# Case 4. Disable and re-enable ethernet on the border router.
#
br.disable_ether()
self.simulator.go(5)
# NAT64 prefix is withdrawn from Network Data.
self.assertEqual(len(br.get_netdata_nat64_prefix()), 0)
br.enable_ether()
self.simulator.go(80)
# Same NAT64 prefix is advertised to Network Data.
self.assertEqual(len(br.get_netdata_nat64_prefix()), 1)
self.assertEqual(nat64_prefix, br.get_netdata_nat64_prefix()[0])
if __name__ == '__main__':
unittest.main()
|
from logging import Logger
from os.path import join
from django.utils.timezone import now
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.conf import settings
from django.db import models
from django.shortcuts import get_object_or_404
from django.utils.translation import get_language, ugettext_lazy as _
from mptt.models import MPTTModel
from cms import constants
from cms.constants import PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DIRTY, TEMPLATE_INHERITANCE_MAGIC
from cms.exceptions import PublicIsUnmodifiable, LanguageError, PublicVersionNeeded
from cms.models.managers import PageManager, PagePermissionsPermissionManager
from cms.models.metaclasses import PageMetaClass
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.publisher.errors import MpttPublisherCantPublish
from cms.utils import i18n, page as page_utils
from cms.utils.compat import DJANGO_1_5
from cms.utils.compat.dj import force_unicode, python_2_unicode_compatible
from cms.utils.compat.metaclasses import with_metaclass
from cms.utils.conf import get_cms_setting
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.helpers import reversion_register
from menus.menu_pool import menu_pool
@python_2_unicode_compatible
class Page(with_metaclass(PageMetaClass, MPTTModel)):
"""
A simple hierarchical page model
"""
LIMIT_VISIBILITY_IN_MENU_CHOICES = (
(1, _('for logged in users only')),
(2, _('for anonymous users only')),
)
TEMPLATE_DEFAULT = TEMPLATE_INHERITANCE_MAGIC if get_cms_setting('TEMPLATE_INHERITANCE') else get_cms_setting('TEMPLATES')[0][0]
X_FRAME_OPTIONS_INHERIT = 0
X_FRAME_OPTIONS_DENY = 1
X_FRAME_OPTIONS_SAMEORIGIN = 2
X_FRAME_OPTIONS_ALLOW= 3
template_choices = [(x, _(y)) for x, y in get_cms_setting('TEMPLATES')]
created_by = models.CharField(_("created by"), max_length=70, editable=False)
changed_by = models.CharField(_("changed by"), max_length=70, editable=False)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', db_index=True)
creation_date = models.DateTimeField(auto_now_add=True)
changed_date = models.DateTimeField(auto_now=True)
publication_date = models.DateTimeField(_("publication date"), null=True, blank=True, help_text=_(
'When the page should go live. Status must be "Published" for page to go live.'), db_index=True)
publication_end_date = models.DateTimeField(_("publication end date"), null=True, blank=True,
help_text=_('When to expire the page. Leave empty to never expire.'),
db_index=True)
#
# Please use toggle_in_navigation() instead of affecting this property
# directly so that the cms page cache can be invalidated as appropriate.
#
in_navigation = models.BooleanField(_("in navigation"), default=True, db_index=True)
soft_root = models.BooleanField(_("soft root"), db_index=True, default=False,
help_text=_("All ancestors will not be displayed in the navigation"))
reverse_id = models.CharField(_("id"), max_length=40, db_index=True, blank=True, null=True, help_text=_(
"A unique identifier that is used with the page_url templatetag for linking to this page"))
navigation_extenders = models.CharField(_("attached menu"), max_length=80, db_index=True, blank=True, null=True)
template = models.CharField(_("template"), max_length=100, choices=template_choices,
help_text=_('The template used to render the content.'),
default=TEMPLATE_DEFAULT)
site = models.ForeignKey(Site, help_text=_('The site the page is accessible at.'), verbose_name=_("site"),
related_name='djangocms_pages')
login_required = models.BooleanField(_("login required"), default=False)
limit_visibility_in_menu = models.SmallIntegerField(_("menu visibility"), default=None, null=True, blank=True,
choices=LIMIT_VISIBILITY_IN_MENU_CHOICES, db_index=True,
help_text=_("limit when this page is visible in the menu"))
is_home = models.BooleanField(editable=False, db_index=True, default=False)
application_urls = models.CharField(_('application'), max_length=200, blank=True, null=True, db_index=True)
application_namespace = models.CharField(_('application instance name'), max_length=200, blank=True, null=True)
level = models.PositiveIntegerField(db_index=True, editable=False)
lft = models.PositiveIntegerField(db_index=True, editable=False)
rght = models.PositiveIntegerField(db_index=True, editable=False)
tree_id = models.PositiveIntegerField(db_index=True, editable=False)
# Placeholders (plugins)
placeholders = models.ManyToManyField(Placeholder, editable=False)
# Publisher fields
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
languages = models.CharField(max_length=255, editable=False, blank=True, null=True)
# If the draft is loaded from a reversion version save the revision id here.
revision_id = models.PositiveIntegerField(default=0, editable=False)
# X Frame Options for clickjacking protection
xframe_options = models.IntegerField(
choices=(
(X_FRAME_OPTIONS_INHERIT, _('Inherit from parent page')),
(X_FRAME_OPTIONS_DENY, _('Deny')),
(X_FRAME_OPTIONS_SAMEORIGIN, _('Only this website')),
(X_FRAME_OPTIONS_ALLOW, _('Allow'))
),
default=getattr(settings, 'CMS_DEFAULT_X_FRAME_OPTIONS', X_FRAME_OPTIONS_INHERIT)
)
# Managers
objects = PageManager()
permissions = PagePermissionsPermissionManager()
class Meta:
permissions = (
('view_page', 'Can view page'),
('publish_page', 'Can publish page'),
)
unique_together = (("publisher_is_draft", "application_namespace"), ("reverse_id", "site", "publisher_is_draft"))
verbose_name = _('page')
verbose_name_plural = _('pages')
ordering = ('tree_id', 'lft')
app_label = 'cms'
class PublisherMeta:
exclude_fields_append = ['id', 'publisher_is_draft', 'publisher_public',
'publisher_state', 'placeholders', 'lft', 'rght', 'tree_id',
'parent']
def __str__(self):
try:
title = self.get_menu_title(fallback=True)
except LanguageError:
try:
title = self.title_set.all()[0]
except IndexError:
title = None
if title is None:
title = u""
return force_unicode(title)
def __repr__(self):
# This is needed to solve the infinite recursion when
# adding new pages.
return object.__repr__(self)
def is_dirty(self, language):
state = self.get_publisher_state(language)
return state == PUBLISHER_STATE_DIRTY or state == PUBLISHER_STATE_PENDING
def get_absolute_url(self, language=None, fallback=True):
if not language:
language = get_language()
if self.is_home:
return reverse('pages-root')
path = self.get_path(language, fallback) or self.get_slug(language, fallback)
return reverse('pages-details-by-slug', kwargs={"slug": path})
def move_page(self, target, position='first-child'):
"""
Called from admin interface when page is moved. Should be used on
all the places which are changing page position. Used like an interface
to mptt, but after move is done page_moved signal is fired.
Note for issue #1166: url conflicts are handled by updated
check_title_slugs, overwrite_url on the moved page don't need any check
as it remains the same regardless of the page position in the tree
"""
assert self.publisher_is_draft
# do not mark the page as dirty after page moves
self._publisher_keep_state = True
# readability counts :)
is_inherited_template = self.template == constants.TEMPLATE_INHERITANCE_MAGIC
# make sure move_page does not break when using INHERIT template
# and moving to a top level position
if position in ('left', 'right') and not target.parent and is_inherited_template:
self.template = self.get_template()
if target.publisher_public_id and position == 'right':
public = target.publisher_public
if target.tree_id + 1 == public.tree_id:
target = target.publisher_public
else:
Logger.warn('mptt tree may need rebuilding: run manage.py cms fix-mptt')
self.move_to(target, position)
# fire signal
import cms.signals as cms_signals
cms_signals.page_moved.send(sender=Page, instance=self)
self.save() # always save the page after move, because of publisher
# check the slugs
page_utils.check_title_slugs(self)
# Make sure to update the slug and path of the target page.
page_utils.check_title_slugs(target)
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
# Ensure that the page is in the right position and save it
public_page = self._publisher_save_public(public_page)
cms_signals.page_moved.send(sender=Page, instance=public_page)
public_page.save()
page_utils.check_title_slugs(public_page)
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
def _copy_titles(self, target, language, published):
"""
Copy all the titles to a new page (which must have a pk).
:param target: The page where the new titles should be stored
"""
from .titlemodels import Title
old_titles = dict(target.title_set.filter(language=language).values_list('language', 'pk'))
for title in self.title_set.filter(language=language):
old_pk = title.pk
# If an old title exists, overwrite. Otherwise create new
title.pk = old_titles.pop(title.language, None)
title.page = target
title.publisher_is_draft = target.publisher_is_draft
title.publisher_public_id = old_pk
if published:
title.publisher_state = PUBLISHER_STATE_DEFAULT
else:
title.publisher_state = PUBLISHER_STATE_PENDING
title.published = published
title._publisher_keep_state = True
title.save()
old_title = Title.objects.get(pk=old_pk)
old_title.publisher_public = title
old_title.publisher_state = title.publisher_state
old_title.published = True
old_title._publisher_keep_state = True
old_title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = old_title
if old_titles:
Title.objects.filter(id__in=old_titles.values()).delete()
def _copy_contents(self, target, language):
"""
Copy all the plugins to a new page.
:param target: The page where the new content should be stored
"""
# TODO: Make this into a "graceful" copy instead of deleting and overwriting
# copy the placeholders (and plugins on those placeholders!)
from cms.plugin_pool import plugin_pool
plugin_pool.set_plugin_meta()
for plugin in CMSPlugin.objects.filter(placeholder__page=target, language=language).order_by('-level'):
inst, cls = plugin.get_plugin_instance()
if inst and getattr(inst, 'cmsplugin_ptr', False):
inst.cmsplugin_ptr._no_reorder = True
inst.delete()
else:
plugin._no_reorder = True
plugin.delete()
for ph in self.placeholders.all():
plugins = ph.get_plugins_list(language)
try:
ph = target.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
target.placeholders.add(ph)
# update the page copy
if plugins:
copy_plugins_to(plugins, ph, no_signals=True)
def _copy_attributes(self, target, clean=False):
"""
Copy all page data to the target. This excludes parent and other values
that are specific to an exact instance.
:param target: The Page to copy the attributes to
"""
if not clean:
target.publication_date = self.publication_date
target.publication_end_date = self.publication_end_date
target.reverse_id = self.reverse_id
target.login_required = self.login_required
target.in_navigation = self.in_navigation
target.soft_root = self.soft_root
target.limit_visibility_in_menu = self.limit_visibility_in_menu
target.navigation_extenders = self.navigation_extenders
target.application_urls = self.application_urls
target.application_namespace = self.application_namespace
target.template = self.template
target.site_id = self.site_id
target.xframe_options = self.xframe_options
def copy_page(self, target, site, position='first-child',
copy_permissions=True):
"""
Copy a page [ and all its descendants to a new location ]
Doesn't checks for add page permissions anymore, this is done in PageAdmin.
Note: public_copy was added in order to enable the creation of a copy
for creating the public page during the publish operation as it sets the
publisher_is_draft=False.
Note for issue #1166: when copying pages there is no need to check for
conflicting URLs as pages are copied unpublished.
"""
pages = [self] + list(self.get_descendants().order_by('-rght'))
site_reverse_ids = Page.objects.filter(site=site, reverse_id__isnull=False).values_list('reverse_id', flat=True)
if target:
target.old_pk = -1
if position == "first-child":
tree = [target]
elif target.parent_id:
tree = [target.parent]
else:
tree = []
else:
tree = []
if tree:
tree[0].old_pk = tree[0].pk
first = True
# loop over all affected pages (self is included in descendants)
for page in pages:
titles = list(page.title_set.all())
# get all current placeholders (->plugins)
placeholders = list(page.placeholders.all())
origin_id = page.id
# create a copy of this page by setting pk = None (=new instance)
page.old_pk = page.pk
page.pk = None
page.level = None
page.rght = None
page.lft = None
page.tree_id = None
page.publisher_public_id = None
page.is_home = False
# only set reverse_id on standard copy
if page.reverse_id in site_reverse_ids:
page.reverse_id = None
if first:
first = False
if tree:
page.parent = tree[0]
else:
page.parent = None
page.insert_at(target, position)
else:
count = 1
found = False
for prnt in tree:
if prnt.old_pk == page.parent_id:
page.parent = prnt
tree = tree[0:count]
found = True
break
count += 1
if not found:
page.parent = None
tree.append(page)
page.site = site
page.save()
# copy permissions if necessary
if get_cms_setting('PERMISSION') and copy_permissions:
from cms.models.permissionmodels import PagePermission
for permission in PagePermission.objects.filter(page__id=origin_id):
permission.pk = None
permission.page = page
permission.save()
# copy titles of this page
draft_titles = {}
public_titles = []
for title in titles:
if title.publisher_is_draft:
title.pk = None # setting pk = None creates a new instance
title.page = page
if title.publisher_public_id:
draft_titles[title.publisher_public_id] = title
title.publisher_public = None
# create slug-copy for standard copy
title.published = False
title.slug = page_utils.get_available_slug(title)
title.save()
else:
public_titles.append(title)
for title in public_titles:
draft_title = draft_titles[title.pk]
title.pk = None # setting pk = None creates a new instance
title.page = page
title.slug = page_utils.get_available_slug(title)
title.publisher_public_id = draft_title.pk
title.save()
draft_title.publisher_public = title
draft_title.save()
# copy the placeholders (and plugins on those placeholders!)
for ph in placeholders:
plugins = ph.get_plugins_list()
try:
ph = page.placeholders.get(slot=ph.slot)
except Placeholder.DoesNotExist:
ph.pk = None # make a new instance
ph.save()
page.placeholders.add(ph)
if plugins:
copy_plugins_to(plugins, ph)
# invalidate the menu for this site
menu_pool.clear(site_id=site.pk)
def save(self, no_signals=False, commit=True, **kwargs):
"""
Args:
commit: True if model should be really saved
"""
# delete template cache
if hasattr(self, '_template_cache'):
delattr(self, '_template_cache')
created = not bool(self.pk)
if self.reverse_id == "":
self.reverse_id = None
if self.application_namespace == "":
self.application_namespace = None
from cms.utils.permissions import _thread_locals
user = getattr(_thread_locals, "user", None)
if user:
try:
self.changed_by = str(user)
except AttributeError:
# AnonymousUser may not have USERNAME_FIELD
self.changed_by = "anonymous"
else:
self.changed_by = "script"
if created:
self.created_by = self.changed_by
if commit:
if no_signals: # ugly hack because of mptt
if DJANGO_1_5:
self.save_base(cls=self.__class__, **kwargs)
else:
self.save_base(**kwargs)
else:
super(Page, self).save(**kwargs)
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.title_set.all().update(publisher_state=PUBLISHER_STATE_DIRTY)
if keep_state:
delattr(self, '_publisher_keep_state')
if not DJANGO_1_5 and 'cls' in kwargs:
del kwargs['cls']
ret = super(Page, self).save_base(*args, **kwargs)
return ret
def is_new_dirty(self):
if self.pk:
fields = [
'publication_date', 'publication_end_date', 'in_navigation', 'soft_root', 'reverse_id',
'navigation_extenders', 'template', 'login_required', 'limit_visibility_in_menu'
]
try:
old_page = Page.objects.get(pk=self.pk)
except Page.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_page, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
def is_published(self, language, force_reload=False):
return self.get_title_obj(language, False, force_reload=force_reload).published
def toggle_in_navigation(self, set_to=None):
'''
Toggles (or sets) in_navigation and invalidates the cms page cache
'''
old = self.in_navigation
if set_to in [True, False]:
self.in_navigation = set_to
else:
self.in_navigation = not self.in_navigation
self.save()
#
# If there was a change, invalidate the cms page cache
#
if self.in_navigation != old:
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
return self.in_navigation
def get_publisher_state(self, language, force_reload=False):
try:
return self.get_title_obj(language, False, force_reload=force_reload).publisher_state
except AttributeError:
return None
def set_publisher_state(self, language, state, published=None):
title = self.title_set.get(language=language)
title.publisher_state = state
if not published is None:
title.published = published
title._publisher_keep_state = True
title.save()
if hasattr(self, 'title_cache') and language in self.title_cache:
self.title_cache[language].publisher_state = state
return title
def publish(self, language):
"""Overrides Publisher method, because there may be some descendants, which
are waiting for parent to publish, so publish them if possible.
:returns: True if page was successfully published.
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be published. Use draft.')
# publish, but only if all parents are published!!
published = None
if not self.pk:
self.save()
# be sure we have the newest data including mptt
p = Page.objects.get(pk=self.pk)
self.lft = p.lft
self.rght = p.rght
self.level = p.level
self.tree_id = p.tree_id
if self._publisher_can_publish():
if self.publisher_public_id:
# Ensure we have up to date mptt properties
public_page = Page.objects.get(pk=self.publisher_public_id)
else:
public_page = Page(created_by=self.created_by)
if not self.publication_date:
self.publication_date = now()
self._copy_attributes(public_page)
# we need to set relate this new public copy to its draft page (self)
public_page.publisher_public = self
public_page.publisher_is_draft = False
# Ensure that the page is in the right position and save it
public_page = self._publisher_save_public(public_page)
published = public_page.parent_id is None or public_page.parent.is_published(language)
if not public_page.pk:
public_page.save()
# The target page now has a pk, so can be used as a target
self._copy_titles(public_page, language, published)
self._copy_contents(public_page, language)
# trigger home update
public_page.save()
# invalidate the menu for this site
menu_pool.clear(site_id=self.site_id)
# taken from Publisher - copy_page needs to call self._publisher_save_public(copy) for mptt insertion
# insert_at() was maybe calling _create_tree_space() method, in this
# case may tree_id change, so we must update tree_id from db first
# before save
if getattr(self, 'tree_id', None):
me = self._default_manager.get(pk=self.pk)
self.tree_id = me.tree_id
self.publisher_public = public_page
published = True
else:
# Nothing left to do
pass
if not published:
self.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=True)
self._publisher_keep_state = True
self.save()
# If we are publishing, this page might have become a "home" which
# would change the path
if self.is_home:
for title in self.title_set.all():
if title.path != '':
title._publisher_keep_state = True
title.save()
if not published:
# was not published, escape
return
# Check if there are some children which are waiting for parents to
# become published.
publish_set = self.get_descendants().filter(title_set__published=True,
title_set__language=language).select_related('publisher_public')
from cms.models import Title
for page in publish_set:
if page.publisher_public_id:
if not page.publisher_public.parent_id:
page.publisher_public.parent = page.parent.publisher_public
page.publisher_public.save()
if page.publisher_public.parent.is_published(language):
try:
public_title = Title.objects.get(page=page.publisher_public, language=language)
except Title.DoesNotExist:
public_title = None
draft_title = Title.objects.get(page=page, language=language)
if public_title and not public_title.published:
public_title._publisher_keep_state = True
public_title.published = True
public_title.publisher_state = PUBLISHER_STATE_DEFAULT
public_title.save()
if draft_title.publisher_state == PUBLISHER_STATE_PENDING:
draft_title.publisher_state = PUBLISHER_STATE_DEFAULT
draft_title._publisher_keep_state = True
draft_title.save()
elif page.get_publisher_state(language) == PUBLISHER_STATE_PENDING:
page.publish(language)
# fire signal after publishing is done
import cms.signals as cms_signals
cms_signals.post_publish.send(sender=Page, instance=self, language=language)
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
return published
def unpublish(self, language):
"""
Removes this page from the public site
:returns: True if this page was successfully unpublished
"""
# Publish can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be unpublished. Use draft.')
# First, make sure we are in the correct state
title = self.title_set.get(language=language)
public_title = title.publisher_public
title.published = False
title.publisher_state = PUBLISHER_STATE_DIRTY
title.save()
if hasattr(self, 'title_cache'):
self.title_cache[language] = title
public_title.published = False
public_title.save()
public_page = self.publisher_public
public_placeholders = public_page.placeholders.all()
for pl in public_placeholders:
pl.cmsplugin_set.filter(language=language).delete()
public_page.save()
# trigger update home
self.save()
self.mark_descendants_pending(language)
from cms.views import invalidate_cms_page_cache
invalidate_cms_page_cache()
from cms.signals import post_unpublish
post_unpublish.send(sender=Page, instance=self, language=language)
return True
def mark_descendants_pending(self, language):
assert self.publisher_is_draft
# Go through all children of our public instance
public_page = self.publisher_public
from cms.models import Title
if public_page:
descendants = public_page.get_descendants().filter(title_set__language=language)
for child in descendants:
try:
child.set_publisher_state(language, PUBLISHER_STATE_PENDING, published=False)
except Title.DoesNotExist:
continue
draft = child.publisher_public
if draft and draft.is_published(language) and draft.get_publisher_state(
language) == PUBLISHER_STATE_DEFAULT:
draft.set_publisher_state(language, PUBLISHER_STATE_PENDING)
def revert(self, language):
"""Revert the draft version to the same state as the public version
"""
# Revert can only be called on draft pages
if not self.publisher_is_draft:
raise PublicIsUnmodifiable('The public instance cannot be reverted. Use draft.')
if not self.publisher_public:
raise PublicVersionNeeded('A public version of this page is needed')
public = self.publisher_public
public._copy_titles(self, language, public.is_published(language))
public._copy_contents(self, language)
public._copy_attributes(self)
self.title_set.filter(language=language).update(publisher_state=PUBLISHER_STATE_DEFAULT, published=True)
self.revision_id = 0
self._publisher_keep_state = True
self.save()
def get_draft_object(self):
if not self.publisher_is_draft:
return self.publisher_draft
return self
def get_public_object(self):
if not self.publisher_is_draft:
return self
return self.publisher_public
def get_languages(self):
if self.languages:
return sorted(self.languages.split(','))
else:
return []
def get_cached_ancestors(self, ascending=True):
if ascending:
if not hasattr(self, "ancestors_ascending"):
self.ancestors_ascending = list(self.get_ancestors(ascending))
return self.ancestors_ascending
else:
if not hasattr(self, "ancestors_descending"):
self.ancestors_descending = list(self.get_ancestors(ascending))
return self.ancestors_descending
# ## Title object access
def get_title_obj(self, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for accessing wanted / current title.
If wanted title doesn't exists, EmptyTitle instance will be returned.
"""
language = self._get_title_cache(language, fallback, version_id, force_reload)
if language in self.title_cache:
return self.title_cache[language]
from cms.models.titlemodels import EmptyTitle
return EmptyTitle(language)
def get_title_obj_attribute(self, attrname, language=None, fallback=True, version_id=None, force_reload=False):
"""Helper function for getting attribute or None from wanted/current title.
"""
try:
attribute = getattr(self.get_title_obj(
language, fallback, version_id, force_reload), attrname)
return attribute
except AttributeError:
return None
def get_path(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the path of the page depending on the given language
"""
return self.get_title_obj_attribute("path", language, fallback, version_id, force_reload)
def get_slug(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the slug of the page depending on the given language
"""
return self.get_title_obj_attribute("slug", language, fallback, version_id, force_reload)
def get_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the title of the page depending on the given language
"""
return self.get_title_obj_attribute("title", language, fallback, version_id, force_reload)
def get_menu_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the menu title of the page depending on the given language
"""
menu_title = self.get_title_obj_attribute("menu_title", language, fallback, version_id, force_reload)
if not menu_title:
return self.get_title(language, True, version_id, force_reload)
return menu_title
def get_admin_tree_title(self):
language = get_language()
from cms.models.titlemodels import EmptyTitle
def validate_title(title):
if isinstance(title, EmptyTitle):
return False
if not title.title or not title.slug:
return False
return True
if not hasattr(self, 'title_cache'):
self.title_cache = {}
for title in self.title_set.all():
self.title_cache[title.language] = title
if not language in self.title_cache or not validate_title(self.title_cache.get(language, EmptyTitle(language))):
fallback_langs = i18n.get_fallback_languages(language)
found = False
for lang in fallback_langs:
if lang in self.title_cache and validate_title(self.title_cache.get(lang, EmptyTitle(lang))):
found = True
language = lang
if not found:
if self.title_cache.keys():
language = self.title_cache.keys()[0]
else:
language = None
if not language:
return _("Empty")
title = self.title_cache[language]
if title.title:
return title.title
if title.page_title:
return title.page_title
if title.menu_title:
return title.menu_title
return title.slug
def get_changed_date(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get when this page was last updated
"""
return self.changed_date
def get_changed_by(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get user who last changed this page
"""
return self.changed_by
def get_page_title(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get the page title of the page depending on the given language
"""
page_title = self.get_title_obj_attribute("page_title", language, fallback, version_id, force_reload)
if not page_title:
return self.get_title(language, True, version_id, force_reload)
return page_title
def get_meta_description(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get content for the description meta tag for the page depending on the given language
"""
return self.get_title_obj_attribute("meta_description", language, fallback, version_id, force_reload)
def get_application_urls(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get application urls conf for application hook
"""
return self.application_urls
def get_redirect(self, language=None, fallback=True, version_id=None, force_reload=False):
"""
get redirect
"""
return self.get_title_obj_attribute("redirect", language, fallback, version_id, force_reload)
def _get_title_cache(self, language, fallback, version_id, force_reload):
if not language:
language = get_language()
load = False
if not hasattr(self, "title_cache") or force_reload:
load = True
self.title_cache = {}
elif not language in self.title_cache:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
load = True
if load:
from cms.models.titlemodels import Title
if version_id:
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
for rev in revs:
obj = rev.object
if obj.__class__ == Title:
self.title_cache[obj.language] = obj
else:
titles = Title.objects.filter(page=self)
for title in titles:
self.title_cache[title.language] = title
if language in self.title_cache:
return language
else:
if fallback:
fallback_langs = i18n.get_fallback_languages(language)
for lang in fallback_langs:
if lang in self.title_cache:
return lang
return language
def get_template(self):
"""
get the template of this page if defined or if closer parent if
defined or DEFAULT_PAGE_TEMPLATE otherwise
"""
if hasattr(self, '_template_cache'):
return self._template_cache
template = None
if self.template:
if self.template != constants.TEMPLATE_INHERITANCE_MAGIC:
template = self.template
else:
try:
template = self.get_ancestors(ascending=True).exclude(
template=constants.TEMPLATE_INHERITANCE_MAGIC).values_list('template', flat=True)[0]
except IndexError:
pass
if not template:
template = get_cms_setting('TEMPLATES')[0][0]
self._template_cache = template
return template
def get_template_name(self):
"""
get the textual name (2nd parameter in get_cms_setting('TEMPLATES'))
of the template of this page or of the nearest
ancestor. failing to find that, return the name of the default template.
"""
template = self.get_template()
for t in get_cms_setting('TEMPLATES'):
if t[0] == template:
return t[1]
return _("default")
def has_view_permission(self, request, user=None):
from cms.models.permissionmodels import PagePermission, GlobalPagePermission
from cms.utils.plugins import current_site
if not user:
user = request.user
if not self.publisher_is_draft:
return self.publisher_draft.has_view_permission(request, user)
# does any restriction exist?
# inherited and direct
is_restricted = PagePermission.objects.for_page(page=self).filter(can_view=True).exists()
if user.is_authenticated():
global_view_perms = GlobalPagePermission.objects.user_has_view_permission(
request.user, current_site(request)).exists()
# a global permission was given to the request's user
if global_view_perms:
return True
elif not is_restricted:
if ((get_cms_setting('PUBLIC_FOR') == 'all') or
(get_cms_setting('PUBLIC_FOR') == 'staff' and
user.is_staff)):
return True
# a restricted page and an authenticated user
elif is_restricted:
opts = self._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
user_perm = user.has_perm(codename)
generic_perm = self.has_generic_permission(request, "view")
return user_perm or generic_perm
else:
if is_restricted or not get_cms_setting('PUBLIC_FOR') == 'all':
# anyonymous user, page has restriction and global access is permitted
return False
else:
# anonymous user, no restriction saved in database
return True
# Authenticated user
# Django wide auth perms "can_view" or cms auth perms "can_view"
opts = self._meta
codename = '%s.view_%s' % (opts.app_label, opts.object_name.lower())
return (user.has_perm(codename) or
self.has_generic_permission(request, "view"))
def has_change_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + opts.get_change_permission())
and self.has_generic_permission(request, "change"))
def has_delete_permission(self, request, user=None):
opts = self._meta
if not user:
user = request.user
if user.is_superuser:
return True
return (user.has_perm(opts.app_label + '.' + opts.get_delete_permission())
and self.has_generic_permission(request, "delete"))
def has_publish_permission(self, request, user=None):
if not user:
user = request.user
if user.is_superuser:
return True
opts = self._meta
return (user.has_perm(opts.app_label + '.' + "publish_page")
and self.has_generic_permission(request, "publish"))
has_moderate_permission = has_publish_permission
def has_advanced_settings_permission(self, request, user=None):
return self.has_generic_permission(request, "advanced_settings", user)
def has_change_permissions_permission(self, request, user=None):
"""
Has user ability to change permissions for current page?
"""
return self.has_generic_permission(request, "change_permissions", user)
def has_add_permission(self, request, user=None):
"""
Has user ability to add page under current page?
"""
return self.has_generic_permission(request, "add", user)
def has_move_page_permission(self, request, user=None):
"""Has user ability to move current page?
"""
return self.has_generic_permission(request, "move_page", user)
def has_generic_permission(self, request, perm_type, user=None):
"""
Return true if the current user has permission on the page.
Return the string 'All' if the user has all rights.
"""
if not user:
user = request.user
att_name = "permission_%s_cache" % perm_type
if (not hasattr(self, "permission_user_cache")
or not hasattr(self, att_name)
or user.pk != self.permission_user_cache.pk):
from cms.utils.permissions import has_generic_permission
self.permission_user_cache = user
setattr(self, att_name, has_generic_permission(
self.pk, user, perm_type, self.site_id))
if getattr(self, att_name):
self.permission_edit_cache = True
return getattr(self, att_name)
def get_media_path(self, filename):
"""
Returns path (relative to MEDIA_ROOT/MEDIA_URL) to directory for storing page-scope files.
This allows multiple pages to contain files with identical names without namespace issues.
Plugins such as Picture can use this method to initialise the 'upload_to' parameter for
File-based fields. For example:
image = models.ImageField(_("image"), upload_to=CMSPlugin.get_media_path)
where CMSPlugin.get_media_path calls self.page.get_media_path
This location can be customised using the CMS_PAGE_MEDIA_PATH setting
"""
return join(get_cms_setting('PAGE_MEDIA_PATH'), "%d" % self.pk, filename)
def reload(self):
"""
Reload a page from the database
"""
return Page.objects.get(pk=self.pk)
def get_object_queryset(self):
"""Returns smart queryset depending on object type - draft / public
"""
qs = self.__class__.objects
return self.publisher_is_draft and qs.drafts() or qs.public().published()
def _publisher_can_publish(self):
"""Is parent of this object already published?
"""
if self.parent_id:
try:
return bool(self.parent.publisher_public_id)
except AttributeError:
raise MpttPublisherCantPublish
return True
def get_next_filtered_sibling(self, **filters):
"""Very similar to original mptt method, but adds support for filters.
Returns this model instance's next sibling in the tree, or
``None`` if it doesn't have a next sibling.
"""
opts = self._mptt_meta
if self.is_root_node():
filters.update({
'%s__isnull' % opts.parent_attr: True,
'%s__gt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
})
else:
filters.update({
opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
'%s__gt' % opts.left_attr: getattr(self, opts.right_attr),
})
# publisher stuff
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
# multisite
filters.update({
'site__id': self.site_id
})
sibling = None
try:
sibling = self._tree_manager.filter(**filters)[0]
except IndexError:
pass
return sibling
def get_previous_filtered_sibling(self, **filters):
"""Very similar to original mptt method, but adds support for filters.
Returns this model instance's previous sibling in the tree, or
``None`` if it doesn't have a previous sibling.
"""
opts = self._mptt_meta
if self.is_root_node():
filters.update({
'%s__isnull' % opts.parent_attr: True,
'%s__lt' % opts.tree_id_attr: getattr(self, opts.tree_id_attr),
})
order_by = '-%s' % opts.tree_id_attr
else:
filters.update({
opts.parent_attr: getattr(self, '%s_id' % opts.parent_attr),
'%s__lt' % opts.right_attr: getattr(self, opts.left_attr),
})
order_by = '-%s' % opts.right_attr
# publisher stuff
filters.update({
'publisher_is_draft': self.publisher_is_draft
})
# multisite
filters.update({
'site__id': self.site_id
})
sibling = None
try:
sibling = self._tree_manager.filter(**filters).order_by(order_by)[0]
except IndexError:
pass
return sibling
def _publisher_save_public(self, obj):
"""Mptt specific stuff before the object can be saved, overrides original
publisher method.
Args:
obj - public variant of `self` to be saved.
"""
public_parent = self.parent.publisher_public if self.parent_id else None
filters = dict(publisher_public__isnull=False)
if public_parent:
filters['publisher_public__parent__in'] = [public_parent]
else:
filters['publisher_public__parent__isnull'] = True
prev_sibling = self.get_previous_filtered_sibling(**filters)
public_prev_sib = prev_sibling.publisher_public if prev_sibling else None
if not self.publisher_public_id: # first time published
# is there anybody on left side?
if not self.parent_id:
obj.insert_at(self, position='right', save=False)
else:
if public_prev_sib:
obj.insert_at(public_prev_sib, position='right', save=False)
else:
if public_parent:
obj.insert_at(public_parent, position='first-child', save=False)
else:
# check if object was moved / structural tree change
prev_public_sibling = obj.get_previous_filtered_sibling()
if self.level != obj.level or \
public_parent != obj.parent or \
public_prev_sib != prev_public_sibling:
if public_prev_sib:
obj.move_to(public_prev_sib, position="right")
elif public_parent:
# move as a first child to parent
obj.move_to(public_parent, position='first-child')
else:
# it is a move from the right side or just save
next_sibling = self.get_next_filtered_sibling(**filters)
if next_sibling and next_sibling.publisher_public_id:
obj.move_to(next_sibling.publisher_public, position="left")
return obj
def rescan_placeholders(self):
"""
Rescan and if necessary create placeholders in the current template.
"""
# inline import to prevent circular imports
from cms.utils.plugins import get_placeholders
placeholders = get_placeholders(self.get_template())
found = {}
for placeholder in self.placeholders.all():
if placeholder.slot in placeholders:
found[placeholder.slot] = placeholder
for placeholder_name in placeholders:
if not placeholder_name in found:
placeholder = Placeholder.objects.create(slot=placeholder_name)
self.placeholders.add(placeholder)
found[placeholder_name] = placeholder
return found
def get_xframe_options(self):
""" Finds X_FRAME_OPTION from tree if inherited """
xframe_options = cache.get('cms:xframe_options:%s' % self.pk)
if xframe_options is None:
ancestors = self.get_ancestors(ascending=True, include_self=True)
# Ignore those pages which just inherit their value
ancestors = ancestors.exclude(xframe_options=self.X_FRAME_OPTIONS_INHERIT)
# Now just give me the clickjacking setting (not anything else)
xframe_options = ancestors.values_list('xframe_options', flat=True)
if len(xframe_options) <= 0:
# No ancestors were found
return None
xframe_options = xframe_options[0]
cache.set('cms:xframe_options:%s' % self.pk, xframe_options)
return xframe_options
def _reversion():
exclude_fields = ['publisher_is_draft', 'publisher_public', 'publisher_state']
reversion_register(
Page,
follow=["title_set", "placeholders", "pagepermission_set"],
exclude_fields=exclude_fields
)
_reversion()
|
import qiime2.core.path as qpath
class FormatBase:
def __init__(self, path=None, mode='w'):
import qiime2.plugin.model as model
if path is None:
if mode != 'w':
raise ValueError("A path must be provided when reading.")
else:
if mode != 'r':
raise ValueError("A path must be omitted when writing.")
if mode == 'w':
self.path = qpath.OutPath(
# TODO: parents shouldn't know about their children
dir=isinstance(self, model.DirectoryFormat),
prefix='q2-%s-' % self.__class__.__name__)
else:
self.path = qpath.InPath(path)
self._mode = mode
def __str__(self):
return str(self.path)
|
from flask import Blueprint
suggestion = Blueprint('suggestion', __name__)
from . import views # noqa
|
"""对应于save all"""
from QUANTAXIS.QASU.main import (QA_SU_save_etf_day, QA_SU_save_etf_min,
QA_SU_save_financialfiles,
QA_SU_save_index_day, QA_SU_save_index_min,
QA_SU_save_stock_block, QA_SU_save_stock_day,
QA_SU_save_stock_info,
QA_SU_save_stock_info_tushare,
QA_SU_save_stock_list, QA_SU_save_stock_min,
QA_SU_save_stock_xdxr)
from QUANTAXIS.QASU.save_binance import (QA_SU_save_binance_symbol,
QA_SU_save_binance_1hour,
QA_SU_save_binance_1day,
QA_SU_save_binance_1min,
QA_SU_save_binance)
from QUANTAXIS.QASU.save_bitmex import (QA_SU_save_bitmex_symbol,
QA_SU_save_bitmex_day)
from QUANTAXIS.QASU.save_huobi import (QA_SU_save_huobi_symbol,
QA_SU_save_huobi_1hour,
QA_SU_save_huobi_1day,
QA_SU_save_huobi_1min,
QA_SU_save_huobi)
QA_SU_save_stock_day('tdx')
QA_SU_save_stock_xdxr('tdx')
QA_SU_save_index_day('tdx')
QA_SU_save_stock_list('tdx')
QA_SU_save_stock_block('tdx')
|
"""Base class for a Comm"""
import threading
import uuid
from zmq.eventloop.ioloop import IOLoop
from IPython.config import LoggingConfigurable
from IPython.kernel.zmq.kernelbase import Kernel
from IPython.utils.jsonutil import json_clean
from IPython.utils.traitlets import Instance, Unicode, Bytes, Bool, Dict, Any
class Comm(LoggingConfigurable):
"""Class for communicating between a Frontend and a Kernel"""
# If this is instantiated by a non-IPython kernel, shell will be None
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
kernel = Instance('IPython.kernel.zmq.kernelbase.Kernel')
def _kernel_default(self):
if Kernel.initialized():
return Kernel.instance()
iopub_socket = Any()
def _iopub_socket_default(self):
return self.kernel.iopub_socket
session = Instance('IPython.kernel.zmq.session.Session')
def _session_default(self):
if self.kernel is not None:
return self.kernel.session
target_name = Unicode('comm')
target_module = Unicode(None, allow_none=True, help="""requirejs module from
which to load comm target.""")
topic = Bytes()
def _topic_default(self):
return ('comm-%s' % self.comm_id).encode('ascii')
_open_data = Dict(help="data dict, if any, to be included in comm_open")
_close_data = Dict(help="data dict, if any, to be included in comm_close")
_msg_callback = Any()
_close_callback = Any()
_closed = Bool(True)
comm_id = Unicode()
def _comm_id_default(self):
return uuid.uuid4().hex
primary = Bool(True, help="Am I the primary or secondary Comm?")
def __init__(self, target_name='', data=None, **kwargs):
if target_name:
kwargs['target_name'] = target_name
super(Comm, self).__init__(**kwargs)
if self.primary:
# I am primary, open my peer.
self.open(data)
else:
self._closed = False
def _publish_msg(self, msg_type, data=None, metadata=None, buffers=None, **keys):
"""Helper for sending a comm message on IOPub"""
if threading.current_thread().name != 'MainThread' and IOLoop.initialized():
# make sure we never send on a zmq socket outside the main IOLoop thread
IOLoop.instance().add_callback(lambda : self._publish_msg(msg_type, data, metadata, buffers, **keys))
return
data = {} if data is None else data
metadata = {} if metadata is None else metadata
content = json_clean(dict(data=data, comm_id=self.comm_id, **keys))
self.session.send(self.iopub_socket, msg_type,
content,
metadata=json_clean(metadata),
parent=self.kernel._parent_header,
ident=self.topic,
buffers=buffers,
)
def __del__(self):
"""trigger close on gc"""
self.close()
# publishing messages
def open(self, data=None, metadata=None, buffers=None):
"""Open the frontend-side version of this comm"""
if data is None:
data = self._open_data
comm_manager = getattr(self.kernel, 'comm_manager', None)
if comm_manager is None:
raise RuntimeError("Comms cannot be opened without a kernel "
"and a comm_manager attached to that kernel.")
comm_manager.register_comm(self)
try:
self._publish_msg('comm_open',
data=data, metadata=metadata, buffers=buffers,
target_name=self.target_name,
target_module=self.target_module,
)
self._closed = False
except:
comm_manager.unregister_comm(self)
raise
def close(self, data=None, metadata=None, buffers=None):
"""Close the frontend-side version of this comm"""
if self._closed:
# only close once
return
self._closed = True
if data is None:
data = self._close_data
self._publish_msg('comm_close',
data=data, metadata=metadata, buffers=buffers,
)
self.kernel.comm_manager.unregister_comm(self)
def send(self, data=None, metadata=None, buffers=None):
"""Send a message to the frontend-side version of this comm"""
self._publish_msg('comm_msg',
data=data, metadata=metadata, buffers=buffers,
)
# registering callbacks
def on_close(self, callback):
"""Register a callback for comm_close
Will be called with the `data` of the close message.
Call `on_close(None)` to disable an existing callback.
"""
self._close_callback = callback
def on_msg(self, callback):
"""Register a callback for comm_msg
Will be called with the `data` of any comm_msg messages.
Call `on_msg(None)` to disable an existing callback.
"""
self._msg_callback = callback
# handling of incoming messages
def handle_close(self, msg):
"""Handle a comm_close message"""
self.log.debug("handle_close[%s](%s)", self.comm_id, msg)
if self._close_callback:
self._close_callback(msg)
def handle_msg(self, msg):
"""Handle a comm_msg message"""
self.log.debug("handle_msg[%s](%s)", self.comm_id, msg)
if self._msg_callback:
if self.shell:
self.shell.events.trigger('pre_execute')
self._msg_callback(msg)
if self.shell:
self.shell.events.trigger('post_execute')
__all__ = ['Comm']
|
"""Calliope parsing errors for logging and collecting metrics.
Refer to the calliope.parser_extensions module for a detailed overview.
"""
import argparse
class ArgumentError(argparse.ArgumentError):
"""Base class for argument errors with metrics.
ArgumentError instances are intercepted by
parser_extensions.ArgumentParser.error(), which
1. reports a failed command to metrics
2. prints a usage diagnostic to the standard error
3. exits with status 2, bypassing gcloud_main exception handling
Attributes:
argument: str, The argument name(s) causing the error.
error_extra_info: {str: str}, Extra info dict for error_format.
error_format: str, A .format() string for constructng the error message
from error_extra_info.
extra_path_arg: str, Dotted command path to append to the command path.
parser: ArgmentParser, Used to generate the usage string for the command.
This could be a different subparser than the command parser.
"""
def __init__(self, error_format, argument=None, extra_path_arg=None,
parser=None, **kwargs):
self.error_format = error_format
self.argument = argument
self.extra_path_arg = extra_path_arg
self.parser = parser
self.error_extra_info = kwargs
super(ArgumentError, self).__init__(None, unicode(self))
def __str__(self):
message = self.error_format.format(**self.error_extra_info)
if self.argument:
message = u'argument {argument}: {message}'.format(
argument=self.argument, message=message)
return message
class RequiredArgumentError(ArgumentError):
"""Arparse required actions were not all present."""
class RequiredArgumentGroupError(ArgumentError):
"""Command has a group of arguments with none of the options present."""
class TooFewArgumentsError(ArgumentError):
"""Argparse didn't use all the Positional objects."""
class UnknownCommandError(ArgumentError):
"""Unknown command error."""
class UnrecognizedArgumentsError(ArgumentError):
"""User entered arguments that were not recognized by argparse."""
class WrongTrackError(ArgumentError):
"""For parsed commands in a different track."""
class OtherParsingError(ArgumentError):
"""Some other parsing error that is not any of the above."""
class ArgumentException(Exception):
"""ArgumentException is for problems with the declared arguments."""
class UnknownDestinationException(Exception):
"""Fatal error for an internal dest that has no associated arg."""
|
def match(command, settings):
return (command.script.startswith('go run ')
and not command.script.endswith('.go'))
def get_new_command(command, settings):
return command.script + '.go'
|
import numpy as np
class Skeleton:
def __init__(self, parents, joints_left, joints_right):
assert len(joints_left) == len(joints_right)
self._parents = np.array(parents)
self._joints_left = joints_left
self._joints_right = joints_right
self._compute_metadata()
def num_joints(self):
return len(self._parents)
def parents(self):
return self._parents
def has_children(self):
return self._has_children
def children(self):
return self._children
def remove_joints(self, joints_to_remove):
"""
Remove the joints specified in 'joints_to_remove'.
"""
valid_joints = []
for joint in range(len(self._parents)):
if joint not in joints_to_remove:
valid_joints.append(joint)
for i in range(len(self._parents)):
while self._parents[i] in joints_to_remove:
self._parents[i] = self._parents[self._parents[i]]
index_offsets = np.zeros(len(self._parents), dtype=int)
new_parents = []
for i, parent in enumerate(self._parents):
if i not in joints_to_remove:
new_parents.append(parent - index_offsets[parent])
else:
index_offsets[i:] += 1
self._parents = np.array(new_parents)
if self._joints_left is not None:
new_joints_left = []
for joint in self._joints_left:
if joint in valid_joints:
new_joints_left.append(joint - index_offsets[joint])
self._joints_left = new_joints_left
if self._joints_right is not None:
new_joints_right = []
for joint in self._joints_right:
if joint in valid_joints:
new_joints_right.append(joint - index_offsets[joint])
self._joints_right = new_joints_right
self._compute_metadata()
return valid_joints
def joints_left(self):
return self._joints_left
def joints_right(self):
return self._joints_right
def _compute_metadata(self):
self._has_children = np.zeros(len(self._parents)).astype(bool)
for i, parent in enumerate(self._parents):
if parent != -1:
self._has_children[parent] = True
self._children = []
for i, parent in enumerate(self._parents):
self._children.append([])
for i, parent in enumerate(self._parents):
if parent != -1:
self._children[parent].append(i)
|
from . import base
__all__ = ['TestExecutor']
class TestExecutor(object):
"""
An executor that just stores the request and passes a pre-set response to
the parser.
"""
def set_response(self, content, code, headers):
self.content = content
self.code = code
self.headers = headers
def __call__(self, request, parser):
self.request = request
return parser(self.content, self.code, self.headers)
def use():
executor = TestExecutor()
base.use_executor(executor)
return executor
|
"""
Dummy easyblock for software that uses the GNU installation procedure,
i.e. configure/make/make install.
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
class ConfigureMake(EasyBlock):
"""Dummy support for building and installing applications with configure/make/make install."""
@staticmethod
def extra_options(extra_vars=None):
"""Extra easyconfig parameters specific to ConfigureMake."""
extra_vars = EasyBlock.extra_options(extra=extra_vars)
extra_vars.update({
'test_bool': [False, "Just a test", CUSTOM],
'test_none': [None, "Another test", CUSTOM],
'test_123': ['', "Test 1, 2, 3", CUSTOM],
})
return extra_vars
|
"""Utility methods to help find, authenticate or register a remote user."""
from flask import current_app
from flask_login import logout_user
from invenio.base.globals import cfg
from invenio.ext.login import UserInfo, authenticate
from invenio.ext.sqlalchemy import db
from invenio_accounts.models import User, UserEXT
from .models import RemoteAccount, RemoteToken
def _get_external_id(account_info):
"""Get external id from account info."""
if all(k in account_info for k in ("external_id", "external_method")):
return dict(id=account_info['external_id'],
method=account_info['external_method'])
return None
def oauth_get_user(client_id, account_info=None, access_token=None):
"""Retrieve user object for the given request.
Uses either the access token or extracted account information to retrieve
the user object.
"""
if access_token:
token = RemoteToken.get_by_token(client_id, access_token)
if token:
return UserInfo(token.remote_account.user_id)
if account_info:
external_id = _get_external_id(account_info)
if external_id:
u = UserEXT.query.filter_by(id=external_id['id'],
method=external_id['method']
).first()
if u:
return UserInfo(u.id_user)
if account_info.get('email'):
u = User.query.filter_by(email=account_info['email']).first()
if u:
return UserInfo(u.id)
return None
def oauth_authenticate(client_id, userinfo, require_existing_link=False,
remember=False):
"""Authenticate an oauth authorized callback."""
# Authenticate via the access token (access token used to get user_id)
if userinfo and authenticate(userinfo['email'], remember=remember):
if require_existing_link:
account = RemoteAccount.get(userinfo.get_id(), client_id)
if account is None:
logout_user()
return False
return True
return False
def oauth_register(account_info, form_data=None):
"""Register user if possible."""
from invenio_accounts.models import User
email = account_info.get("email")
if form_data and form_data.get("email"):
email = form_data.get("email")
if email:
note = '1'
if cfg['CFG_ACCESS_CONTROL_NOTIFY_USER_ABOUT_NEW_ACCOUNT']:
note = '2'
if not User.query.filter_by(email=email).first():
# Email does not already exists. so we can proceed to register
# user.
u = User(
nickname=account_info.get('nickname', ''),
email=email,
password=None,
note=note
)
try:
db.session.add(u)
db.session.commit()
except Exception:
current_app.logger.exception("Cannot create user")
return None
# verify the email
if note == '2':
u.verify_email()
return UserInfo(u.id)
return None
def oauth_link_external_id(user, external_id=None):
"""Link a user to an external id."""
oauth_unlink_external_id(external_id)
db.session.add(UserEXT(
id=external_id['id'], method=external_id['method'], id_user=user.id
))
def oauth_unlink_external_id(external_id):
"""Unlink a user from an external id."""
UserEXT.query.filter_by(id=external_id['id'],
method=external_id['method']).delete()
db.session.commit()
|
"""
WebJournal templates - Defines the look of various parts of the
WebJournal modules. Most customizations will however be done through
BibFormat format templates files.
"""
import os
from six import iteritems
from invenio.config import \
CFG_SITE_SUPPORT_EMAIL, \
CFG_ETCDIR, \
CFG_SITE_URL, \
CFG_SITE_LANG, \
CFG_SITE_RECORD
from invenio.base.i18n import gettext_set_language
from invenio.legacy.webpage import page
from invenio.legacy.webjournal.utils import \
get_number_of_articles_for_issue, \
get_release_datetime, \
get_announcement_datetime, \
get_issue_number_display
class Template:
"""Templating class, refer to bibformat.py for examples of call"""
def tmpl_webjournal_missing_info_box(self, req, ln, title, msg_title, msg):
"""
returns a box indicating that the given journal was not found on the
server, leaving the opportunity to select an existing journal from a list.
"""
_ = gettext_set_language(ln)
box_title = msg_title
box_text = msg
box_list_title = _("Available Journals")
# todo: move to DB call
find_journals = lambda path: [entry for entry in os.listdir(str(path)) \
if os.path.isdir(str(path)+str(entry))]
try:
all_journals = find_journals('%s/webjournal/' % CFG_ETCDIR)
except:
all_journals = []
mail_msg = _("Contact %(x_url_open)sthe administrator%(x_url_close)s") % \
{'x_url_open' :
'<a href="mailto:%s">' % CFG_SITE_SUPPORT_EMAIL,
'x_url_close' : '</a>'}
box = '''
<div style="text-align: center;">
<fieldset style="width:400px; margin-left: auto; margin-right:auto">
<legend style="color:#a70509;background-color:#fff;">
<i>%s</i>
</legend>
<p style="text-align:center;">%s</p>
<h2 style="color:#0D2B88;">%s</h2>
<ul class="webjournalBoxList">
%s
</ul>
<br/>
<div style="text-align:right;">
%s
</div>
</fieldset>
</div>
''' % (box_title,
box_text,
box_list_title,
"".join(['<li><a href="%s/journal/?name=%s">%s</a></li>'
% (CFG_SITE_URL,
journal,
journal) for journal in all_journals]),
mail_msg)
return page(req=req, title=title, body=box)
def tmpl_webjournal_error_box(self, req, ln, title, title_msg, msg):
"""
returns an error box for webjournal errors.
"""
_ = gettext_set_language(ln)
title = _(title)
title_msg = _(title_msg)
msg = _(msg)
mail_msg = _("Contact %(x_url_open)sthe administrator%(x_url_close)s") % \
{'x_url_open' :
'<a href="mailto:%s">' % CFG_SITE_SUPPORT_EMAIL,
'x_url_close' : '</a>'}
box = '''
<div style="text-align: center;">
<fieldset style="width:400px; margin-left: auto; margin-right: auto;">
<legend style="color:#a70509;background-color:#fff;">
<i>%s</i>
</legend>
<p style="text-align:center;">%s</p>
<br/>
<div style="text-align:right;">
%s
</div>
</fieldset>
</div>
''' % (title_msg, msg, mail_msg)
return page(req=req, title=title, body=box)
def tmpl_admin_regenerate_confirm(self,
ln,
journal_name,
issue,
issue_released_p):
"""
Ask user confirmation about regenerating the issue, as well as if
we should move all the drafts to the public collection.
Parameters:
journal_name - the journal for which the cache should
be delete
issue - the issue for which the cache should be
deleted
ln - language
issue_released_p - is issue already released?
"""
out = '''
<form action="/admin/webjournal/webjournaladmin.py/regenerate" name="regenerate" method="post">
You are going to refresh the cache for issue %(issue)s. Do you want to continue? <br/>
<input type="hidden" name="confirmed_p" value="confirmed"/>
<input type="hidden" name="journal_name" value="%(journal_name)s">
<input type="hidden" name="issue" value="%(issue)s">
<input type="hidden" name="ln" value="%(ln)s">
<input type="checkbox" name="publish_draft_articles_p" value="move" id="publish_draft_articles_p" %(disabled)s/><label for="publish_draft_articles_p">Also switch all "<em>Offline</em>" articles to "<em>Online</em>"</label>[<a target="_blank" href="/help/admin/webjournal-editor-guide#cache-online">?</a>]<br/></br>
<input class="adminbutton" type="submit" value="Regenerate"/>
</form>
''' % {'issue': issue,
'journal_name': journal_name,
'ln': ln,
'disabled': not issue_released_p and 'disabled="disabled"' or ""}
return out
def tmpl_admin_regenerate_success(self, ln, journal_name, issue):
"""
Success message if a user applied the "regenerate" link. Links back to
the regenerated journal.
"""
_ = gettext_set_language(ln)
out = '''
The issue number %(issue)s for the %(journal_name)s journal has been successfully
regenerated. <br/>
Look at your changes: » <a href="%(CFG_SITE_URL)s/journal/%(journal_name)s/%(issue_year)s/%(issue_number)s"> %(journal_name)s </a> <br/> or go back to this journal <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">administration interface</a>.
''' % {'issue': issue,
'journal_name': journal_name,
'CFG_SITE_URL': CFG_SITE_URL,
'issue_year': issue.split('/')[1],
'issue_number': issue.split('/')[0]}
return out
def tmpl_admin_regenerate_error(self, ln, journal_name, issue):
"""
Failure message for a regeneration try.
"""
_ = gettext_set_language(ln)
return page(
title=_("Regeneration Error"),
body = _("The issue could not be correctly regenerated. "
"Please contact your administrator."))
def tmpl_admin_feature_record(self, journal_name,
featured_records=[],
ln=CFG_SITE_LANG,
msg=None):
"""
Display an interface form to feature a specific record from Invenio.
"""
_ = gettext_set_language(ln)
out = ''
out += '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="administrate?journal_name=%(journal_name)s">Administrate</a></small> </td>
<td>1. <small>Feature a Record</small> </td>
<td>2. <small><a href="configure?action=edit&journal_name=%(journal_name)s">Edit Configuration</a></small> </td>
<td>3. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>''' % {'journal_name': journal_name,
'menu': _("Menu"),
'CFG_SITE_URL': CFG_SITE_URL}
if msg is not None:
out += msg
out += '<br/><br/>'
out += '''<table class="admin_wvar" cellspacing="0" width="400px">
<tr>
<th colspan="3" class="adminheader">Featured records</th>
</tr>'''
color = "fff"
for (recid, img_url) in featured_records:
out += '''<tr style="background-color:#%(color)s">
<td class="admintd"><img src="%(img_url)s" alt="" height="40px"/></td>
<td class="admintdleft"><a href="%(CFG_SITE_URL)s/%(CFG_SITE_RECORD)s/%(recid)s">Record %(recid)s</a></td>
<td class="admintdright"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/feature_record?journal_name=%(journal_name)s&action=askremove&recid=%(recid)s">remove</a></td>
</tr>''' % {'color': color,
'journal_name': journal_name,
'recid': recid,
'img_url': img_url,
'CFG_SITE_URL': CFG_SITE_URL,
'CFG_SITE_RECORD': CFG_SITE_RECORD}
if color == 'fff':
color = 'EBF7FF'
else:
color = 'fff'
if len(featured_records) == 0:
out += '<tr><td colspan="3" class="admintd"><em>No record featured for the moment. Add one using the form below.</em></td></tr>'
out += '</table>'
out += '''
<br/><br/><br/>
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/feature_record" method="post">
<input type="hidden" name="action" value="add" />
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<table class="admin_wvar" cellspacing="0">
<tr>
<th colspan="2" class="adminheaderleft">Add a new featured record:</th>
</tr>
<tr>
<td class="admintdright"><label for="recordid"><span style="white-space: nowrap;">Featured Record ID</span></label>: </td>
<td><input tabindex="1" type="text" name="recid" value="" id="recordid"/></td>
</tr>
<tr>
<td class="admintdright"><label for="image_url"><span style="white-space: nowrap;">Icon URL</span></label>: </td>
<td><input tabindex="2" type="text" name="img_url" value="" id="image_url" size="60"/><em><br/><small>Image displayed along the featured record</small></em></td>
</tr>
<tr>
<td colspan="2" align="right"><input tabindex="3" class="adminbutton" type="submit" value="Add"/></td>
</tr>
</table>
</form>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name}
return out
def tmpl_admin_alert_plain_text(self, journal_name, ln, issue):
"""
Default plain text message for email alert of journal updates.
This will be used to pre-fill the content of the mail alert, that
can be modified by the admin.
Customize this function to return different default texts
based on journal name and language,
"""
current_publication = get_issue_number_display(issue, journal_name, ln)
plain_text = u'''Dear Subscriber,
The latest issue of %(journal_name)s, no. %(current_publication)s, has been released.
You can access it at the following URL:
%(CFG_SITE_URL)s/journal/%(journal_name)s/
Best Wishes,
%(journal_name)s team
----
Cher Abonné,
Le nouveau numéro de %(journal_name)s, no. %(current_publication)s, vient de paraître.
Vous pouvez y accéder à cette adresse :
%(CFG_SITE_URL)s/journal/%(journal_name)s/?ln=fr
Bonne lecture,
L'équipe de %(journal_name)s
''' % {'journal_name': journal_name,
'current_publication': current_publication,
'CFG_SITE_URL': CFG_SITE_URL}
return plain_text
# '
def tmpl_admin_alert_header_html(self, journal_name, ln, issue):
"""
Returns HTML header to be inserted into the HTML alert
@param journal_name: the journal name
@param ln: the current language
@param issue: the issue for wich the alert is sent
"""
_ = gettext_set_language(ln)
journal_url = '%(CFG_SITE_URL)s/journal/%(journal_name)s/%(year)s/%(number)s' % \
{'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'year': issue.split('/')[1],
'number': issue.split('/')[0]}
journal_link = '<a href="%(journal_url)s">%(journal_url)s</a>' % \
{'journal_url': journal_url}
return '<p class="htmlalertheader">' + \
_('If you cannot read this email please go to %(x_journal_link)s') % {'x_journal_link': journal_link} + \
'</p>'
def tmpl_admin_alert_subject(self, journal_name, ln, issue):
"""
Default subject for email alert of journal updates.
Customize this function to return different default texts
based on journal name and language,
"""
return "%s %s released" % (journal_name, \
get_issue_number_display(issue,
journal_name,
ln))
def tmpl_admin_alert_interface(self, ln, journal_name, default_subject,
default_msg, default_recipients, alert_ln):
"""
Alert email interface.
"""
_ = gettext_set_language(ln)
interface = '''
<table>
<tr>
<td valign="top">
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/alert" name="alert" method="post">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<p>Recipients:</p>
<input type="text" name="recipients" value="%(default_recipients)s" size="60" />
<p>Subject:</p>
<input type="text" name="subject" value="%(subject)s" size="60" />
<p>Plain Text Message:</p>
<textarea name="plainText" wrap="soft" rows="25" cols="80">%(plain_text)s</textarea>
<p> <input type="checkbox" name="htmlMail" id="htmlMail" value="html" checked="checked" />
<label for="htmlMail">Send journal front-page <small>(<em>HTML newsletter</em>)</small></label>
</p>
<br/>
<input class="formbutton" type="submit" value="Send Alert" name="sent"/>
</form>
</td><td valign="top">
<p>HTML newsletter preview:</p>
<iframe id="htmlMailPreview" src="%(CFG_SITE_URL)s/journal/%(journal_name)s?ln=%(alert_ln)s" height="600" width="600"></iframe>
</tr>
</table>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'subject': default_subject,
'plain_text': default_msg,
'default_recipients': default_recipients,
'alert_ln': alert_ln}
return interface
def tmpl_admin_alert_was_already_sent(self, ln, journal_name,
subject, plain_text, recipients,
html_mail, issue):
"""
"""
_ = gettext_set_language(ln)
out = '''
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/alert" name="alert" method="post">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<input type="hidden" name="recipients" value="%(recipients)s" />
<input type="hidden" name="subject" value="%(subject)s" />
<input type="hidden" name="plainText" value="%(plain_text)s" />
<input type="hidden" name="htmlMail" value="%(html_mail)s" />
<input type="hidden" name="force" value="True" />
<p><em>WARNING! </em>The email alert for the issue %(issue)s has already been
sent. Are you absolutely sure you want to send it again?</p>
<p>Maybe you forgot to release an update issue? If so, please do this
first <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/issue_control?journal_name=%(journal_name)s&issue=%(issue)s">here</a>.</p>
<p>Proceed with caution, or your subscribers will receive the alert a second time.</p>
<br/>
<input class="formbutton" type="submit" value="I really want this!" name="sent"/>
</form>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'recipients': recipients,
'subject': subject,
'plain_text': plain_text,
'html_mail': html_mail,
'issue': issue}
return out
def tmpl_admin_alert_unreleased_issue(self, ln, journal_name):
"""
Tried to announce an unreleased issue
"""
_ = gettext_set_language(ln)
out = '''<p style="color:#f00">An alert cannot be send for this issue!</p>
You tried to send an alert for an issue that has not yet been released.
Release it first and retry.<br/>
Go back to the <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">administration interface</a>.
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name}
return out
def tmpl_admin_alert_success_msg(self, ln, journal_name):
"""
Success messge for the alert system.
"""
_ = gettext_set_language(ln)
out = '''<p style="color:#0f0">Alert sent successfully!</p>
Return to your journal here: » \
<a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">%(journal_name)s</a> <br/>
or go back to the <a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">administration interface</a>''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name}
return out
def tmpl_admin_control_issue(self, ln, journal_name,
active_issues):
"""
Display the interface allowing to set the current issue.
"""
_ = gettext_set_language(ln)
out = '''
<p>This interface gives you the possibility to create your
current webjournal publication. Every checked issue number
will be in the current publication. Once you have made your
selection you can publish the new issue by clicking the %(publish)s
button at the end.
</p>
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/issue_control" name="publish">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
Issue Numbers to publish:
<ul>
%(issues_list)s
</ul>
<br/>
<p>Add a higher issue number by clicking "%(add)s"</p>
<input class="formbutton" type="submit" value="%(add)s" name="action"/>
<p>.. or add a custom issue number by typing it here and pressing "%(refresh)s"</p>
<input type="text" value="ww/YYYY" name="issue"/>
<input class="formbutton" type="submit" value="%(refresh)s" name="action"/>
<br/>
<br/>
<p>If all issues you want to publish are correctly checked, proceed \
by clicking "%(publish)s".</p>
<input class="formbutton" type="submit" value="%(publish)s" name="action"/>
</form>
''' % {'CFG_SITE_URL': CFG_SITE_URL,
'journal_name': journal_name,
'issues_list': "".join(['<li><input type="checkbox" name="issue" value="%s" CHECKED> %s</input></li>'
% (issue, issue) for issue in active_issues]),
'add' : _("Add"),
'publish' : _("Publish"),
'refresh' : _("Refresh")
}
return out
def tmpl_admin_control_issue_success_msg(self, ln,
active_issues, journal_name):
"""
An issue was successfully published
"""
_ = gettext_set_language(ln)
issue_string = "".join([" - %s" % issue for issue in active_issues])
title = '<h2>Issue(s) %s created successfully!</h2>' % issue_string
body = '''<p>Now you can:</p>
<p>Return to your journal here: »
<a href="%s/journal/%s"> %s </a>
</p>
<p>Make additional publications here: »
<a href="%s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%s">Publishing Interface</a>
</p>
<p>Send an alert email here: »
<a href="%s/admin/webjournal/webjournaladmin.py/alert?journal_name=%s"> Send an alert</a>
</p>''' % (CFG_SITE_URL, journal_name,
journal_name, CFG_SITE_URL,
journal_name, CFG_SITE_URL, journal_name)
return title + body
def tmpl_admin_update_issue(self, ln, journal_name, next_issue,
current_issue):
"""
A form that lets a user make an update to an issue number.
"""
_ = gettext_set_language(ln)
current_articles = get_number_of_articles_for_issue(current_issue,
journal_name,
ln)
next_articles = get_number_of_articles_for_issue(next_issue,
journal_name,
ln)
html = '''
<p>The Issue that was released on week %(current_issue)s has pending updates scheduled. The
next update for this issue is %(next_issue)s.</p>
<p><em>Note: If you want to make a new release, please click through all the
pending updates first.</em></p>
<p>Do you want to release the update from issue: <br/>
<em>%(current_issue)s</em> (%(current_articles)s) <br/>
to issue: <br/>
<em>%(next_issue)s</em> (%(next_articles)s) <br/>
now?</p>
<form action="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/issue_control" name="publish">
<input type="hidden" name="journal_name" value="%(journal_name)s"/>
<input type="hidden" name="issue" value="%(next_issue)s"/>
<input class="formbutton" type="submit" value="%(update)s" name="action"/>
</form>
''' % {'current_issue': current_issue,
'next_issue' : next_issue,
'current_articles': ",".join(["%s : %s" % (item[0], item[1]) \
for item in iteritems(current_articles)]),
'next_articles': ",".join(["%s : %s" % (item[0], item[1]) \
for item in iteritems(next_articles)]),
'CFG_SITE_URL' : CFG_SITE_URL,
'journal_name': journal_name,
'update': _("Update")}
return html
def tmpl_admin_updated_issue_msg(self, ln, update_issue, journal_name):
"""
Prints a success message for the Update release of a journal.
"""
_ = gettext_set_language(ln)
title = '<h2>Journal update %s published successfully!</h2>' % update_issue
body = '''<p>Now you can:</p>
<p>Return to your journal here: »
<a href="%s/journal/%s"> %s </a>
</p>
<p>Go back to the publishing interface: »
<a href="%s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%s">Issue Interface</a>
</p>
<p>Send an alert email here: »
<a href="%s/journal/alert?name=%s"> Send an alert</a>
</p>''' % (CFG_SITE_URL, journal_name, journal_name,
CFG_SITE_URL, journal_name, CFG_SITE_URL, journal_name)
return title + body
def tmpl_admin_administrate(self, journal_name, current_issue,
current_publication, issue_list,
next_issue_number, ln=CFG_SITE_LANG,
as_editor=True):
"""
Returns an administration interface that shows the current publication and
supports links to all important actions.
@param as_editor: True if can make changes to the configuration. Else read-only mode.
"""
_ = gettext_set_language(ln)
out = ''
if as_editor:
admin_menu = '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small>Administrate</small> </td>
<td>1. <small><a href="feature_record?journal_name=%(journal_name)s">Feature a Record</a></small> </td>
<td>2. <small><a href="configure?action=edit&journal_name=%(journal_name)s">Edit Configuration</a></small> </td>
<td>3. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>'''
else:
admin_menu = '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small>Administrate</small> </td>
<td>1. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>'''
out += admin_menu % {'journal_name': journal_name,
'menu': _("Menu"),
'CFG_SITE_URL': CFG_SITE_URL}
# format the issues
issue_boxes = []
issue_list.append(next_issue_number)
for issue in issue_list:
articles = get_number_of_articles_for_issue(issue,
journal_name,
ln)
released_on = get_release_datetime(issue, journal_name, ln)
announced_on = get_announcement_datetime(issue, journal_name, ln)
issue_box = '''
<tr style="%s">
<td class="admintdright" style="vertical-align: middle;"></td>
<td class="admintdleft" style="white-space: nowrap; vertical-align: middle;">
<p>Issue: %s</p>
<p>Publication: %s</p>
</td>
<td class="admintdright" style="vertical-align: middle;">
%s
</td>
<td class="admintdright" style="vertical-align: middle;">
<p>%s</p>
<p>%s</p>
</td>
<td class="admintdright" style="vertical-align: middle;">
<p><a href="%s/admin/webjournal/webjournaladmin.py/regenerate?journal_name=%s&issue=%s&ln=%s">>regenerate</a></p>
</td>
<tr>
''' % ((issue==current_issue) and "background:#00FF00;" or "background:#F1F1F1;",
issue, (issue==next_issue_number) and "?" or current_publication,
"\n".join(['<p>%s : %s <a href="%s/journal/%s/%s/%s/%s">>view</a></p>' %
(item[0], item[1],
CFG_SITE_URL, journal_name,
issue.split('/')[1], issue.split('/')[0], item[0]) \
for item in iteritems(articles)]),
(not released_on) and
('<em>not released</em>' + (as_editor and '<br/><a href="%s/admin/webjournal/webjournaladmin.py/issue_control?journal_name=%s">>release now</a>' % (CFG_SITE_URL, journal_name) or '')) or
'released on: %s' % released_on.strftime("%d.%m.%Y"),
(not announced_on)
and ('<em>not announced</em>' + (as_editor and '<br/><a href="%s/admin/webjournal/webjournaladmin.py/alert?journal_name=%s&issue=%s">>announce now</a>' % (CFG_SITE_URL, journal_name, issue) or '')) or
'announced on: %s <br/><a href="%s/admin/webjournal/webjournaladmin.py/alert?journal_name=%s&issue=%s">>re-announce</a>' % (announced_on.strftime("%d.%m.%Y"), CFG_SITE_URL, journal_name, issue),
CFG_SITE_URL, journal_name, issue, ln
)
issue_boxes.append(issue_box)
out += '''
<table class="admin_wvar" width="80%%" cellspacing="0">
<tbody>
<tr>
<th class="adminheaderleft"></th>
<th class="adminheaderleft">Issue / Publication</th>
<th class="adminheader">Articles</th>
<th class="adminheaderleft">Release / Announcement</th>
<th class="adminheaderleft">Cache Status</th>
<tr>
%s
</tbody>
</table>
''' % ("\n".join([issue_box for issue_box in issue_boxes]))
return out
def tmpl_admin_index(self, ln, journals, msg=None):
"""
Returns the admin index page content.
Lists the journals, and offers options to edit them, delete them
or add new journal
params:
ln - ln
journals - list of tuples (journal_info dict, as_editor)
msg - message to be displayed
"""
out = ""
if msg is not None:
out += msg
out += '''
<p>Choose the journal you want to administrate.</p>
<table class="admin_wvar" cellspacing="0">
<tr>
<th class="adminheader">Journals</th>
<th colspan="2" class="adminheader"> </th>
</tr>
'''
color = "fff"
for journal_info, as_editor in journals:
row = '''<tr style="background-color:#%(color)s">
<td class="admintdleft"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">%(journal_name)s</a></td>
<td class="admintdright"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/administrate?journal_name=%(journal_name)s">edit</a></td>'''
if as_editor:
row += '<td class="admintdright"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/index?journal_name=%(journal_name)s&action=askDelete">delete</a></td>'
row += '</tr>'
out += row % {'color': color,
'journal_name': journal_info['journal_name'],
'journal_id': journal_info['journal_id'],
'CFG_SITE_URL': CFG_SITE_URL}
if color == 'fff':
color = 'EBF7FF'
else:
color = 'fff'
out += '''<tr style="background-color:#%(color)s">
<td class="admintdleft" colspan="3" style="padding: 5px 10px;"><a href="%(CFG_SITE_URL)s/admin/webjournal/webjournaladmin.py/configure?action=add">Add new journal</a></td>
</tr>''' % {'color': color,
'CFG_SITE_URL': CFG_SITE_URL}
out += '</table>'
return out
def tmpl_admin_configure_journal(self, ln, journal_name='', xml_config=None,
action='edit', msg=None):
"""
Display a page to change the settings of a journal. Also used to
add a new journal.
"""
out = ''
_ = gettext_set_language(ln)
journal_name_readonly = 'readonly="readonly" disabled="disabled"'
journal_name_note = ''
submit_button_label = _('Apply')
if action == 'add':
journal_name = ''
journal_name_readonly = ''
journal_name_note = 'Used in URLs. Choose it short and meaningful. This cannot be changed later'
submit_button_label = _('Add')
elif action in ['edit', 'editDone']:
# Display navigation menu
out += '''<table class="admin_wvar">
<tr><th colspan="5" class="adminheaderleft" cellspacing="0">%(menu)s</th></tr>
<tr>
<td>0. <small><a href="administrate?journal_name=%(journal_name)s">Administrate</a></small> </td>
<td>1. <small><a href="feature_record?journal_name=%(journal_name)s">Feature a Record</a></small> </td>
<td>2. <small>Edit Configuration</small> </td>
<td>3. <small><a href="%(CFG_SITE_URL)s/journal/%(journal_name)s">Go to the Journal</a></small> </td>
</tr>
</table><br/>''' % {'journal_name': journal_name,
'menu': _("Menu"),
'CFG_SITE_URL': CFG_SITE_URL}
if msg is not None:
out += msg
out += '<br/><br/>'
out += '''
<form action="configure" method="post">
<input type="hidden" name="ln" value="%(ln)s" />
<input type="hidden" name="action" value="addDone" />
<table class="admin_wvar" cellspacing="0" style="width:90%%">
<tr>
<th colspan="2" class="adminheaderleft">
Journal settings</th>
</tr>
<tr>
<td class="admintdright" width="100px"><label for="journal_name">Name</label>: </td>
<td><input tabindex="0" name="journal_name" type="text" id="journal_name" maxlength="50" size="15" value="%(journal_name)s" %(readonly)s %(journal_name_readonly)s /><small>%(journal_name_note)s</small></td>
</tr>
<tr>
<td class="admintdright"><label for="xml_config">Config</label>: </td>
<td><textarea wrap="soft" rows="25" style="width:100%%" tabindex="3" name="xml_config" id="xml_config" size="25" %(readonly)s>%(xml_config)s</textarea></td>
</tr>
<td colspan="2" align="right"><input type="submit" class="adminbutton" value="%(submit_button_label)s"></td>
</tr>
</table>
</form>
''' % {'journal_name': journal_name,
'ln': ln,
'readonly': '',
'disabled': '',
'xml_config': xml_config.encode('utf-8'),
'journal_name_note': journal_name_note,
'submit_button_label': submit_button_label,
'journal_name_readonly': journal_name_readonly}
return out
|
from __future__ import absolute_import
from __future__ import print_function
from twisted.internet import defer
from twisted.internet import task
from twisted.trial import unittest
from buildbot import util
from buildbot.util import misc
class deferredLocked(unittest.TestCase):
def test_name(self):
self.assertEqual(util.deferredLocked, misc.deferredLocked)
def test_fn(self):
lock = defer.DeferredLock()
@util.deferredLocked(lock)
def check_locked(arg1, arg2):
self.assertEqual([lock.locked, arg1, arg2], [True, 1, 2])
return defer.succeed(None)
d = check_locked(1, 2)
@d.addCallback
def check_unlocked(_):
self.assertFalse(lock.locked)
return d
def test_fn_fails(self):
lock = defer.DeferredLock()
@util.deferredLocked(lock)
def do_fail():
return defer.fail(RuntimeError("oh noes"))
d = do_fail()
def check_unlocked(_):
self.assertFalse(lock.locked)
d.addCallbacks(lambda _: self.fail("didn't errback"),
lambda _: self.assertFalse(lock.locked))
return d
def test_fn_exception(self):
lock = defer.DeferredLock()
@util.deferredLocked(lock)
def do_fail():
raise RuntimeError("oh noes")
# using decorators confuses pylint and gives a false positive below
d = do_fail() # pylint: disable=assignment-from-no-return
def check_unlocked(_):
self.assertFalse(lock.locked)
d.addCallbacks(lambda _: self.fail("didn't errback"),
lambda _: self.assertFalse(lock.locked))
return d
def test_method(self):
testcase = self
class C:
@util.deferredLocked('aLock')
def check_locked(self, arg1, arg2):
testcase.assertEqual(
[self.aLock.locked, arg1, arg2], [True, 1, 2])
return defer.succeed(None)
obj = C()
obj.aLock = defer.DeferredLock()
d = obj.check_locked(1, 2)
@d.addCallback
def check_unlocked(_):
self.assertFalse(obj.aLock.locked)
return d
class TestCancelAfter(unittest.TestCase):
def setUp(self):
self.d = defer.Deferred()
def test_succeeds(self):
d = misc.cancelAfter(10, self.d)
self.assertIdentical(d, self.d)
@d.addCallback
def check(r):
self.assertEqual(r, "result")
self.assertFalse(d.called)
self.d.callback("result")
self.assertTrue(d.called)
def test_fails(self):
d = misc.cancelAfter(10, self.d)
self.assertFalse(d.called)
self.d.errback(RuntimeError("oh noes"))
self.assertTrue(d.called)
self.assertFailure(d, RuntimeError)
def test_timeout_succeeds(self):
c = task.Clock()
d = misc.cancelAfter(10, self.d, _reactor=c)
self.assertFalse(d.called)
c.advance(11)
d.callback("result") # ignored
self.assertTrue(d.called)
self.assertFailure(d, defer.CancelledError)
def test_timeout_fails(self):
c = task.Clock()
d = misc.cancelAfter(10, self.d, _reactor=c)
self.assertFalse(d.called)
c.advance(11)
self.d.errback(RuntimeError("oh noes")) # ignored
self.assertTrue(d.called)
self.assertFailure(d, defer.CancelledError)
|
_FEATURES_H = 1
_GNU_SOURCE = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__GNU_LIBRARY__ = 1
_SYS_CDEFS_H = 1
def __P(args): return args
def __P(args): return args
def __P(args): return ()
def __STRING(x): return #x
def __STRING(x): return "x"
FIOSETOWN = 0x8901
SIOCSPGRP = 0x8902
FIOGETOWN = 0x8903
SIOCGPGRP = 0x8904
SIOCATMARK = 0x8905
SIOCGSTAMP = 0x8906
SOL_SOCKET = 1
SO_DEBUG = 1
SO_REUSEADDR = 2
SO_TYPE = 3
SO_ERROR = 4
SO_DONTROUTE = 5
SO_BROADCAST = 6
SO_SNDBUF = 7
SO_RCVBUF = 8
SO_KEEPALIVE = 9
SO_OOBINLINE = 10
SO_NO_CHECK = 11
SO_PRIORITY = 12
SO_LINGER = 13
SIOCADDRT = 0x890B
SIOCDELRT = 0x890C
SIOCGIFNAME = 0x8910
SIOCSIFLINK = 0x8911
SIOCGIFCONF = 0x8912
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFDSTADDR = 0x8917
SIOCSIFDSTADDR = 0x8918
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFMETRIC = 0x891d
SIOCSIFMETRIC = 0x891e
SIOCGIFMEM = 0x891f
SIOCSIFMEM = 0x8920
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCSIFHWADDR = 0x8924
SIOCGIFENCAP = 0x8925
SIOCSIFENCAP = 0x8926
SIOCGIFHWADDR = 0x8927
SIOCGIFSLAVE = 0x8929
SIOCSIFSLAVE = 0x8930
SIOCADDMULTI = 0x8931
SIOCDELMULTI = 0x8932
OLD_SIOCDARP = 0x8950
OLD_SIOCGARP = 0x8951
OLD_SIOCSARP = 0x8952
SIOCDARP = 0x8953
SIOCGARP = 0x8954
SIOCSARP = 0x8955
SIOCDRARP = 0x8960
SIOCGRARP = 0x8961
SIOCSRARP = 0x8962
SIOCGIFMAP = 0x8970
SIOCSIFMAP = 0x8971
SIOCDEVPRIVATE = 0x89F0
SIOCPROTOPRIVATE = 0x89E0
MAX_IOVEC = 8
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_PACKET = 10
AF_UNSPEC = 0
AF_UNIX = 1
AF_INET = 2
AF_AX25 = 3
AF_IPX = 4
AF_APPLETALK = 5
AF_NETROM = 6
AF_BRIDGE = 7
AF_AAL5 = 8
AF_X25 = 9
AF_INET6 = 10
AF_MAX = 12
PF_UNSPEC = AF_UNSPEC
PF_UNIX = AF_UNIX
PF_INET = AF_INET
PF_AX25 = AF_AX25
PF_IPX = AF_IPX
PF_APPLETALK = AF_APPLETALK
PF_NETROM = AF_NETROM
PF_BRIDGE = AF_BRIDGE
PF_AAL5 = AF_AAL5
PF_X25 = AF_X25
PF_INET6 = AF_INET6
PF_MAX = AF_MAX
SOMAXCONN = 128
MSG_OOB = 1
MSG_PEEK = 2
MSG_DONTROUTE = 4
SOL_IP = 0
SOL_IPX = 256
SOL_AX25 = 257
SOL_ATALK = 258
SOL_NETROM = 259
SOL_TCP = 6
SOL_UDP = 17
IP_TOS = 1
IPTOS_LOWDELAY = 0x10
IPTOS_THROUGHPUT = 0x08
IPTOS_RELIABILITY = 0x04
IP_TTL = 2
IP_HDRINCL = 3
IP_OPTIONS = 4
IP_MULTICAST_IF = 32
IP_MULTICAST_TTL = 33
IP_MULTICAST_LOOP = 34
IP_ADD_MEMBERSHIP = 35
IP_DROP_MEMBERSHIP = 36
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IPX_TYPE = 1
TCP_NODELAY = 1
TCP_MAXSEG = 2
SOPRI_INTERACTIVE = 0
SOPRI_NORMAL = 1
SOPRI_BACKGROUND = 2
__FD_SETSIZE = 256
def __FD_ZERO(fdsetp): return \
|
"""
Integrity test of a big guest vmcore, using the dump-guest-memory QMP
command and the "crash" utility.
:copyright: 2013 Red Hat, Inc.
:author: Laszlo Ersek <lersek@redhat.com>
Related RHBZ: https://bugzilla.redhat.com/show_bug.cgi?id=990118
"""
import logging
from virttest.aexpect import ShellCmdError
from autotest.client.shared import error
import string
import os
import gzip
import threading
REQ_GUEST_MEM = 4096 # exact size of guest RAM required
REQ_GUEST_ARCH = "x86_64" # the only supported guest arch
REQ_GUEST_DF = 6144 # minimum guest disk space required
LONG_TIMEOUT = 10 * 60 # timeout for long operations
VMCORE_BASE = "vmcore" # basename of the host-side file the
VMCORE_FD_NAME = "vmcore_fd" # fd identifier used in the monitor
CRASH_SCRIPT = "crash.cmd" # guest-side filename of the minimal
def run(test, params, env):
"""
Verify the vmcore written by dump-guest-memory by a big guest.
:param test: QEMU test object.
:param params: Dictionary with the test parameters.
:param env: Dictionary with test environment.
"""
def check_requirements(vm, session):
"""
Check guest RAM size and guest architecture.
:param vm: virtual machine.
:param session: login shell session.
:raise: error.TestError if the test is misconfigured.
"""
mem_size = vm.get_memory_size()
if (mem_size != REQ_GUEST_MEM):
raise error.TestError("the guest must have %d MB RAM exactly "
"(current: %d MB)" % (REQ_GUEST_MEM,
mem_size))
arch = session.cmd("uname -m").rstrip()
if (arch != REQ_GUEST_ARCH):
raise error.TestError("this test only supports %s guests "
"(current: %s)" % (REQ_GUEST_ARCH, arch))
def install_kernel_debuginfo(vm, session, login_timeout):
"""
In the guest, install a kernel debuginfo package that matches
the running kernel.
Debuginfo packages are available for the most recent kernels
only, so this step may need a kernel upgrade and a corresponding
VM reboot. Also, the "debuginfo-install" yum utility is not good
enough for this, because its exit status doesn't seem to reflect
any failure to find a matching debuginfo package. Only "yum
install" seems to do that, and only if an individual package is
requested.
:param vm: virtual machine. Can be None if the caller demands a
debuginfo package for the running kernel.
:param session: login shell session.
:param login_timeout: passed to vm.reboot() as timeout. Can be
None if vm is None.
:return: If the debuginfo package has been successfully
installed, None is returned. If no debuginfo package
matching the running guest kernel is available.
If vm is None, an exception is raised; otherwise, the
guest kernel is upgraded, and a new session is returned
for the rebooted guest. In this case the next call to
this function should succeed, using the new session and
with vm=None.
:raise: error.TestError (guest uname command failed),
ShellCmdError (unexpected guest yum command failure),
exceptions from vm.reboot().
"""
def install_matching_debuginfo(session):
try:
guest_kernel = session.cmd("uname -r").rstrip()
except ShellCmdError, details:
raise error.TestError("guest uname command failed: %s" %
details)
return session.cmd("yum -y install --enablerepo='*debuginfo' "
"kernel-debuginfo-%s" % guest_kernel,
timeout=LONG_TIMEOUT)
try:
output = install_matching_debuginfo(session)
logging.debug("%s", output)
new_sess = None
except ShellCmdError, details:
if (vm is None):
raise
logging.info("failed to install matching debuginfo, "
"upgrading kernel")
logging.debug("shell error was: %s", details)
output = session.cmd("yum -y upgrade kernel",
timeout=LONG_TIMEOUT)
logging.debug("%s", output)
new_sess = vm.reboot(session, timeout=login_timeout)
return new_sess
def install_crash(session):
"""
Install the "crash" utility in the guest.
:param session: login shell session.
:raise: exceptions from session.cmd().
"""
output = session.cmd("yum -y install crash")
logging.debug("%s", output)
def check_disk_space(session):
"""
Check free disk space in the guest before uploading,
uncompressing and analyzing the vmcore.
:param session: login shell session.
:raise: exceptions from session.cmd(); error.TestError if free
space is insufficient.
"""
output = session.cmd("rm -f -v %s %s.gz" % (VMCORE_BASE, VMCORE_BASE))
logging.debug("%s", output)
output = session.cmd("yum clean all")
logging.debug("%s", output)
output = session.cmd("LC_ALL=C df --portability --block-size=1M .")
logging.debug("%s", output)
df_megs = int(string.split(output)[10])
if (df_megs < REQ_GUEST_DF):
raise error.TestError("insufficient free disk space: %d < %d" %
(df_megs, REQ_GUEST_DF))
def dump_and_compress(qmp_monitor, vmcore_host):
"""
Dump the guest vmcore on the host side and compress it.
Use the "dump-guest-memory" QMP command with paging=false. Start
a new Python thread that compresses data from a file descriptor
to a host file. Create a pipe and pass its writeable end to qemu
for vmcore dumping. Pass the pipe's readable end (with full
ownership) to the compressor thread. Track references to the
file descriptions underlying the pipe end fds carefully.
Compressing the vmcore on the fly, then copying it to the guest,
then decompressing it inside the guest should be much faster
than dumping and copying a huge plaintext vmcore, especially on
rotational media.
:param qmp_monitor: QMP monitor for the guest.
:param vmcore_host: absolute pathname of gzipped destination
file.
:raise: all sorts of exceptions. No resources should be leaked.
"""
def compress_from_fd(input_fd, gzfile):
# Run in a separate thread, take ownership of input_fd.
try:
buf = os.read(input_fd, 4096)
while (buf):
gzfile.write(buf)
buf = os.read(input_fd, 4096)
finally:
# If we've run into a problem, this causes an EPIPE in
# the qemu process, preventing it from blocking in
# write() forever.
os.close(input_fd)
def dump_vmcore(qmp_monitor, vmcore_fd):
# Temporarily create another reference to vmcore_fd, in the
# qemu process. We own the duplicate.
qmp_monitor.cmd(cmd="getfd",
args={"fdname": "%s" % VMCORE_FD_NAME},
fd=vmcore_fd)
try:
# Includes ownership transfer on success, no need to
# call the "closefd" command then.
qmp_monitor.cmd(cmd="dump-guest-memory",
args={"paging": False,
"protocol": "fd:%s" % VMCORE_FD_NAME},
timeout=LONG_TIMEOUT)
except:
qmp_monitor.cmd(cmd="closefd",
args={"fdname": "%s" % VMCORE_FD_NAME})
raise
gzfile = gzip.open(vmcore_host, "wb", 1)
try:
try:
(read_by_gzip, written_by_qemu) = os.pipe()
try:
compressor = threading.Thread(target=compress_from_fd,
name="compressor",
args=(read_by_gzip, gzfile))
compressor.start()
# Compressor running, ownership of readable end has
# been transferred.
read_by_gzip = -1
try:
dump_vmcore(qmp_monitor, written_by_qemu)
finally:
# Close Python's own reference to the writeable
# end as well, so that the compressor can
# experience EOF before we try to join it.
os.close(written_by_qemu)
written_by_qemu = -1
compressor.join()
finally:
if (read_by_gzip != -1):
os.close(read_by_gzip)
if (written_by_qemu != -1):
os.close(written_by_qemu)
finally:
# Close the gzipped file first, *then* delete it if
# there was an error.
gzfile.close()
except:
os.unlink(vmcore_host)
raise
def verify_vmcore(vm, session, host_compr, guest_compr, guest_plain):
"""
Verify the vmcore with the "crash" utility in the guest.
Standard output needs to be searched for "crash:" and "WARNING:"
strings; the test is successful iff there are no matches and
"crash" exits successfully.
:param vm: virtual machine.
:param session: login shell session.
:param host_compr: absolute pathname of gzipped vmcore on host,
source file.
:param guest_compr: single-component filename of gzipped vmcore
on guest, destination file.
:param guest_plain: single-component filename of gunzipped
vmcore on guest that guest-side gunzip is expected to
create.
:raise: vm.copy_files_to() and session.cmd() exceptions;
error.TestFail if "crash" meets trouble in the vmcore.
"""
vm.copy_files_to(host_compr, guest_compr)
output = session.cmd("gzip -d -v %s" % guest_compr,
timeout=LONG_TIMEOUT)
logging.debug("%s", output)
session.cmd("{ echo bt; echo quit; } > %s" % CRASH_SCRIPT)
output = session.cmd("crash -i %s "
"/usr/lib/debug/lib/modules/$(uname -r)/vmlinux "
"%s" % (CRASH_SCRIPT, guest_plain))
logging.debug("%s", output)
if (string.find(output, "crash:") >= 0 or
string.find(output, "WARNING:") >= 0):
raise error.TestFail("vmcore corrupt")
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
qmp_monitor = vm.get_monitors_by_type("qmp")
if qmp_monitor:
qmp_monitor = qmp_monitor[0]
else:
raise error.TestError('Could not find a QMP monitor, aborting test')
login_timeout = int(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=login_timeout)
try:
check_requirements(vm, session)
new_sess = install_kernel_debuginfo(vm, session, login_timeout)
if (new_sess is not None):
session = new_sess
install_kernel_debuginfo(None, session, None)
install_crash(session)
check_disk_space(session)
vmcore_compr = "%s.gz" % VMCORE_BASE
vmcore_host = os.path.join(test.tmpdir, vmcore_compr)
dump_and_compress(qmp_monitor, vmcore_host)
try:
verify_vmcore(vm, session, vmcore_host, vmcore_compr, VMCORE_BASE)
finally:
os.unlink(vmcore_host)
finally:
session.close()
|
"""
"""
from pygame.locals import *
from const import *
import widget, surface
import basic
class _button(widget.Widget):
def __init__(self,**params):
widget.Widget.__init__(self,**params)
self.state = 0
def event(self,e):
if e.type == ENTER: self.repaint()
elif e.type == EXIT: self.repaint()
elif e.type == FOCUS: self.repaint()
elif e.type == BLUR: self.repaint()
elif e.type == KEYDOWN:
if e.key == K_SPACE:
self.state = 1
self.repaint()
elif e.key == K_TAB:
self.next()
elif e.type == MOUSEBUTTONDOWN:
self.state = 1
self.repaint()
elif e.type == KEYUP:
if self.state == 1:
sub = pygame.event.Event(CLICK,{})
self.send(sub.type,sub)
self.state = 0
self.repaint()
elif e.type == MOUSEBUTTONUP:
self.state = 0
self.repaint()
elif e.type == CLICK:
self.click()
self.pcls = ""
if self.state == 0 and self.container.myhover is self:
self.pcls = "hover"
if self.state == 1 and self.container.myhover is self:
self.pcls = "down"
def click(self):
pass
class Button(_button):
"""A button, buttons can be clicked, they are usually used to set up callbacks.
<pre>Button(value=None)</pre>
<dl>
<dt>value<dd>either a widget or a string
</dl>
<strong>Example</strong>
<code>
w = gui.Button("Click Me")
w.connect(gui.CLICK,fnc,value)
</code>
"""
def __init__(self,value=None,**params):
params.setdefault('cls','button')
_button.__init__(self,**params)
self.value = value
def __setattr__(self,k,v):
if k == 'value' and type(v) == str: v = basic.Label(v,cls=self.cls+".label")
_v = self.__dict__.get(k,NOATTR)
self.__dict__[k]=v
if k == 'value' and v != None:
pass
if k == 'value' and _v != NOATTR and _v != None and _v != v:
self.send(CHANGE)
self.chsize()
def resize(self,width=None,height=None):
self.value.rect.x,self.value.rect.y = 0,0
self.value.rect.w,self.value.rect.h = self.value.resize(width,height)
return self.value.rect.w,self.value.rect.h
def paint(self,s):
self.value.paint(surface.subsurface(s,self.value.rect))
class Switch(_button):
"""A switch can have two states, True or False.
<pre>Switch(value=False)</pre>
<dl>
<dt>value<dd>initial value, (True, False)
</dl>
<strong>Example</strong>
<code>
w = gui.Switch(True)
w.connect(gui.CHANGE,fnc,value)
</code>
"""
def __init__(self,value=False,**params):
params.setdefault('cls','switch')
_button.__init__(self,**params)
self.value = value
img = self.style.off
self.style.width = img.get_width()
self.style.height = img.get_height()
def paint(self,s):
#self.pcls = ""
#if self.container.myhover is self: self.pcls = "hover"
if self.value: img = self.style.on
else: img = self.style.off
s.blit(img,(0,0))
def __setattr__(self,k,v):
_v = self.__dict__.get(k,NOATTR)
self.__dict__[k]=v
if k == 'value' and _v != NOATTR and _v != v:
self.send(CHANGE)
self.repaint()
def click(self):
self.value = not self.value
class Checkbox(_button):
"""Within a Group of Checkbox widgets several may be selected at a time.
<pre>Checkbox(group,value=None)</pre>
<dl>
<dt>group<dd>a gui.Group for the Checkbox to belong to
<dt>value<dd>the value
</dl>
<strong>Example</strong>
<code>
g = gui.Group(name='colors',value=['r','b'])
t = gui.Table()
t.tr()
t.td(gui.Label('Red'))
t.td(gui.Checkbox(g,'r'))
t.tr()
t.td(gui.Label('Green'))
t.td(gui.Checkbox(g,'g'))
t.tr()
t.td(gui.Label('Blue'))
t.td(gui.Checkbox(g,'b'))
</code>
"""
def __init__(self,group,value=None,**params):
params.setdefault('cls','checkbox')
_button.__init__(self,**params)
self.group = group
self.group.add(self)
if self.group.value == None:
self.group.value = []
self.value = value
img = self.style.off
self.style.width = img.get_width()
self.style.height = img.get_height()
def paint(self,s):
#self.pcls = ""
#if self.container.myhover is self: self.pcls = "hover"
if self.value in self.group.value: img = self.style.on
else: img = self.style.off
s.blit(img,(0,0))
def click(self):
if self.value in self.group.value:
self.group.value.remove(self.value)
else:
self.group.value.append(self.value)
self.group._change()
class Radio(_button):
"""Within a Group of Radio widgets only one may be selected at a time.
<pre>Radio(group,value=None)</pre>
<dl>
<dt>group<dd>a gui.Group for the Radio to belong to
<dt>value<dd>the value
</dl>
<strong>Example</strong>
<code>
g = gui.Group(name='colors',value='g')
t = gui.Table()
t.tr()
t.td(gui.Label('Red'))
t.td(gui.Radio(g,'r'))
t.tr()
t.td(gui.Label('Green'))
t.td(gui.Radio(g,'g'))
t.tr()
t.td(gui.Label('Blue'))
t.td(gui.Radio(g,'b'))
</code>
"""
def __init__(self,group=None,value=None,**params):
params.setdefault('cls','radio')
_button.__init__(self,**params)
self.group = group
self.group.add(self)
self.value = value
img = self.style.off
self.style.width = img.get_width()
self.style.height = img.get_height()
def paint(self,s):
#self.pcls = ""
#if self.container.myhover is self: self.pcls = "hover"
if self.group.value == self.value: img = self.style.on
else: img = self.style.off
s.blit(img,(0,0))
def click(self):
self.group.value = self.value
class Tool(_button):
"""Within a Group of Tool widgets only one may be selected at a time.
<pre>Tool(group,widget=None,value=None)</pre>
<dl>
<dt>group<dd>a gui.Group for the Tool to belong to
<dt>widget<dd>a widget to appear on the Tool (similar to a Button)
<dt>value<dd>the value
</dl>
<strong>Example</strong>
<code>
g = gui.Group(name='colors',value='g')
t = gui.Table()
t.tr()
t.td(gui.Tool(g,'Red','r'))
t.tr()
t.td(gui.Tool(g,'Green','g'))
t.tr()
t.td(gui.Tool(g,'Blue','b'))
</code>
"""
def __init__(self,group,widget=None,value=None,**params): #TODO widget= could conflict with module widget
params.setdefault('cls','tool')
_button.__init__(self,**params)
self.group = group
self.group.add(self)
self.value = value
if widget:
self.setwidget(widget)
if self.group.value == self.value: self.pcls = "down"
def setwidget(self,w):
self.widget = w
def resize(self,width=None,height=None):
self.widget.rect.w,self.widget.rect.h = self.widget.resize()
#self.widget._resize()
#self.rect.w,self.rect.h = self.widget.rect_margin.w,self.widget.rect_margin.h
return self.widget.rect.w,self.widget.rect.h
def event(self,e):
_button.event(self,e)
if self.group.value == self.value: self.pcls = "down"
def paint(self,s):
if self.group.value == self.value: self.pcls = "down"
self.widget.paint(surface.subsurface(s,self.widget.rect))
def click(self):
self.group.value = self.value
for w in self.group.widgets:
if w != self: w.pcls = ""
class Icon(_button):
"""TODO - might be deprecated
"""
def __init__(self,cls,**params):
_button.__init__(self,**params)
self.cls = cls
self.pcls = ""
s = self.style.image
self.style.width = s.get_width()
self.style.height = s.get_height()
self.state = 0
def paint(self,s):
#self.pcls = ""
#if self.state == 0 and hasattr(self.container,'myhover') and self.container.myhover is self: self.pcls = "hover"
#if self.state == 1 and hasattr(self.container,'myhover') and self.container.myhover is self: self.pcls = "down"
s.blit(self.style.image,(0,0))
|
from setuptools import setup, find_packages
from hooker_common import release
install_requires = [
'colorama',
'elasticsearch>=1.0.0,<2.0.0'
]
setup(
name = release.name,
packages = find_packages(),
version = release.version,
license = release.licenseName,
description = release.description,
author = release.author,
author_email = release.author_email,
url = release.url,
download_url = release.download_url,
keywords = release.keywords,
install_requires=install_requires
)
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
version_added: 2.1
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import (rax_argument_spec,
rax_find_loadbalancer,
rax_find_server,
rax_required_together,
rax_to_dict,
setup_rax_module,
)
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound as e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception as e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception as e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound as e:
record = {}
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
if __name__ == '__main__':
main()
|
from using_extend import *
f = FooBar()
if f.blah(3) != 3:
raise RuntimeError, "blah(int)"
if f.blah(3.5) != 3.5:
raise RuntimeError, "blah(double)"
if f.blah("hello") != "hello":
raise RuntimeError, "blah(char *)"
if f.blah(3, 4) != 7:
raise RuntimeError, "blah(int,int)"
if f.blah(3.5, 7.5) != (3.5 + 7.5):
raise RuntimeError, "blah(double,double)"
if f.duh(3) != 3:
raise RuntimeError, "duh(int)"
|
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
from distutils.version import StrictVersion
from bases.FrameworkServices.SimpleService import SimpleService
ORDER = [
'running_containers',
'healthy_containers',
'unhealthy_containers'
]
CHARTS = {
'running_containers': {
'options': [None, 'Number of running containers', 'containers', 'running containers',
'docker.running_containers', 'line'],
'lines': [
['running_containers', 'running']
]
},
'healthy_containers': {
'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
'docker.healthy_containers', 'line'],
'lines': [
['healthy_containers', 'healthy']
]
},
'unhealthy_containers': {
'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
'docker.unhealthy_containers', 'line'],
'lines': [
['unhealthy_containers', 'unhealthy']
]
}
}
MIN_REQUIRED_VERSION = '3.2.0'
class Service(SimpleService):
def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.client = None
def check(self):
if not HAS_DOCKER:
self.error("'docker' package is needed to use dockerd module")
return False
if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION):
self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format(
docker.__version__,
MIN_REQUIRED_VERSION,
))
return False
self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
try:
self.client.ping()
except docker.errors.APIError as error:
self.error(error)
return False
return True
def get_data(self):
data = dict()
data['running_containers'] = len(self.client.containers.list(sparse=True))
data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
return data or None
|
import zipfile
import xml.sax
from xml.sax import handler
from xml.sax.xmlreader import InputSource
from xml.sax.saxutils import escape, quoteattr
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from namespaces import ANIMNS, CHARTNS, CONFIGNS, DCNS, DR3DNS, DRAWNS, FONS, \
FORMNS, MATHNS, METANS, NUMBERNS, OFFICENS, PRESENTATIONNS, SCRIPTNS, \
SMILNS, STYLENS, SVGNS, TABLENS, TEXTNS, XLINKNS
class StyleToCSS:
""" The purpose of the StyleToCSS class is to contain the rules to convert
ODF styles to CSS2. Since it needs the generic fonts, it would probably
make sense to also contain the Styles in a dict as well..
"""
def __init__(self):
# Font declarations
self.fontdict = {}
# Fill-images from presentations for backgrounds
self.fillimages = {}
self.ruleconversions = {
(DRAWNS,u'fill-image-name'): self.c_drawfillimage,
(FONS,u"background-color"): self.c_fo,
(FONS,u"border"): self.c_fo,
(FONS,u"border-bottom"): self.c_fo,
(FONS,u"border-left"): self.c_fo,
(FONS,u"border-right"): self.c_fo,
(FONS,u"border-top"): self.c_fo,
(FONS,u"color"): self.c_fo,
(FONS,u"font-family"): self.c_fo,
(FONS,u"font-size"): self.c_fo,
(FONS,u"font-style"): self.c_fo,
(FONS,u"font-variant"): self.c_fo,
(FONS,u"font-weight"): self.c_fo,
(FONS,u"line-height"): self.c_fo,
(FONS,u"margin"): self.c_fo,
(FONS,u"margin-bottom"): self.c_fo,
(FONS,u"margin-left"): self.c_fo,
(FONS,u"margin-right"): self.c_fo,
(FONS,u"margin-top"): self.c_fo,
(FONS,u"min-height"): self.c_fo,
(FONS,u"padding"): self.c_fo,
(FONS,u"padding-bottom"): self.c_fo,
(FONS,u"padding-left"): self.c_fo,
(FONS,u"padding-right"): self.c_fo,
(FONS,u"padding-top"): self.c_fo,
(FONS,u"page-width"): self.c_page_width,
(FONS,u"page-height"): self.c_page_height,
(FONS,u"text-align"): self.c_text_align,
(FONS,u"text-indent") :self.c_fo,
(TABLENS,u'border-model') :self.c_border_model,
(STYLENS,u'width') : self.c_width,
(STYLENS,u'column-width') : self.c_width,
(STYLENS,u"font-name"): self.c_fn,
(STYLENS,u'text-position'): self.c_text_position,
(STYLENS,u'horizontal-pos'): self.c_hp,
# FIXME Should do style:vertical-pos here
}
def save_font(self, name, family, generic):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Fortunately ODF provides generic fallbacks.
Unfortunately they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
This method put the font and fallback into a dictionary
"""
htmlgeneric = "sans-serif"
if generic == "roman": htmlgeneric = "serif"
elif generic == "swiss": htmlgeneric = "sans-serif"
elif generic == "modern": htmlgeneric = "monospace"
elif generic == "decorative": htmlgeneric = "sans-serif"
elif generic == "script": htmlgeneric = "monospace"
elif generic == "system": htmlgeneric = "serif"
self.fontdict[name] = (family, htmlgeneric)
def c_drawfillimage(self, ruleset, sdict, rule, val):
""" Fill a figure with an image. Since CSS doesn't let you resize images
this should really be implemented as an absolutely position <img>
with a width and a height
"""
sdict['background-image'] = "url('%s')" % self.fillimages[val]
def c_fo(self, ruleset, sdict, rule, val):
""" XSL formatting attributes """
selector = rule[1]
sdict[selector] = val
def c_border_model(self, ruleset, sdict, rule, val):
""" Convert to CSS2 border model """
if val == 'collapsing':
sdict['border-collapse'] ='collapse'
else:
sdict['border-collapse'] ='separate'
def c_width(self, ruleset, sdict, rule, val):
""" Set width of box """
sdict['width'] = val
def c_text_align(self, ruleset, sdict, rule, align):
""" Text align """
if align == "start": align = "left"
if align == "end": align = "right"
sdict['text-align'] = align
def c_fn(self, ruleset, sdict, rule, fontstyle):
""" Generate the CSS font family
A generic font can be found in two ways. In a <style:font-face>
element or as a font-family-generic attribute in text-properties.
"""
generic = ruleset.get((STYLENS,'font-family-generic') )
if generic is not None:
self.save_font(fontstyle, fontstyle, generic)
family, htmlgeneric = self.fontdict.get(fontstyle, (fontstyle, 'serif'))
sdict['font-family'] = '%s, %s' % (family, htmlgeneric)
def c_text_position(self, ruleset, sdict, rule, tp):
""" Text position. This is used e.g. to make superscript and subscript
"""
textpos = tp.split(' ')
if len(textpos) == 2 and textpos[0] != "0%":
# Bug in OpenOffice. If vertical-align is 0% - ignore the text size.
sdict['font-size'] = textpos[1]
sdict['vertical-align'] = textpos[0]
def c_hp(self, ruleset, sdict, rule, hpos):
#FIXME: Frames wrap-style defaults to 'parallel', graphics to 'none'.
# It is properly set in the parent-styles, but the program doesn't
# collect the information.
wrap = ruleset.get((STYLENS,'wrap'),'parallel')
# Can have: from-left, left, center, right, from-inside, inside, outside
if hpos == "center":
sdict['margin-left'] = "auto"
sdict['margin-right'] = "auto"
else:
# force it to be *something* then delete it
sdict['margin-left'] = sdict['margin-right'] = ''
del sdict['margin-left'], sdict['margin-right']
if hpos in ("right","outside"):
if wrap in ( "left", "parallel"):
sdict['float'] = "right"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['right'] = "0"
else:
sdict['position'] = "relative" # No wrapping
sdict['top'] = "0"
sdict['right'] = "0"
elif hpos in ("left", "inside"):
if wrap in ( "right", "parallel"):
sdict['float'] = "left"
elif wrap == "run-through":
sdict['position'] = "absolute" # Simulate run-through
sdict['top'] = "0"
sdict['left'] = "0"
else:
sdict['position'] = "relative" # No wrapping
sdict['top'] = "0"
sdict['left'] = "0"
elif hpos in ("from-left", "from-inside"):
if wrap in ( "right", "parallel"):
sdict['float'] = "left"
else:
sdict['position'] = "relative" # No wrapping
if ruleset.has_key( (SVGNS,'x') ):
sdict['left'] = ruleset[(SVGNS,'x')]
def c_page_width(self, ruleset, sdict, rule, val):
""" Set width of box
HTML doesn't really have a page-width. It is always 100% of the browser width
"""
sdict['width'] = val
def c_page_height(self, ruleset, sdict, rule, val):
""" Set height of box """
sdict['height'] = val
def convert_styles(self, ruleset):
""" Rule is a tuple of (namespace, name). If the namespace is '' then
it is already CSS2
"""
sdict = {}
for rule,val in ruleset.items():
if rule[0] == '':
sdict[rule[1]] = val
continue
method = self.ruleconversions.get(rule, None )
if method:
method(ruleset, sdict, rule, val)
return sdict
class TagStack:
def __init__(self):
self.stack = []
def push(self, tag, attrs):
self.stack.append( (tag, attrs) )
def pop(self):
item = self.stack.pop()
return item
def stackparent(self):
item = self.stack[-1]
return item[1]
def rfindattr(self, attr):
""" Find a tag with the given attribute """
for tag, attrs in self.stack:
if attrs.has_key(attr):
return attrs[attr]
return None
def count_tags(self, tag):
c = 0
for ttag, tattrs in self.stack:
if ttag == tag: c = c + 1
return c
special_styles = {
'S-Emphasis':'em',
'S-Citation':'cite',
'S-Strong_20_Emphasis':'strong',
'S-Variable':'var',
'S-Definition':'dfn',
'S-Teletype':'tt',
'P-Heading_20_1':'h1',
'P-Heading_20_2':'h2',
'P-Heading_20_3':'h3',
'P-Heading_20_4':'h4',
'P-Heading_20_5':'h5',
'P-Heading_20_6':'h6',
'P-Caption':'caption',
'P-Addressee':'address',
'P-Preformatted_20_Text':'pre',
'P-Text_20_body':'p'
}
class ODF2XHTML(handler.ContentHandler):
""" The ODF2XHTML parses an ODF file and produces XHTML"""
def wlines(self,s):
if s != '': self.lines.append(s)
def __init__(self):
self.xmlfile = ''
self.title = ''
self.lines = []
self.wfunc = self.wlines
self.data = []
self.tagstack = TagStack()
self.pstack = []
self.processelem = True
self.processcont = True
self.listtypes = {}
self.headinglevels = [0, 0,0,0,0,0, 0,0,0,0,0] # level 0 to 10
self.cs = StyleToCSS()
# Style declarations
self.stylestack = []
self.styledict = {}
self.currentstyle = None
# Footnotes and endnotes
self.notedict = {}
self.currentnote = 0
self.notebody = ''
# Tags from meta.xml
self.metatags = []
# Tags
self.elements = {
(DCNS, 'title'): (self.s_processcont, self.e_dc_title),
(DCNS, 'language'): (self.s_processcont, self.e_dc_contentlanguage),
(DCNS, 'creator'): (self.s_processcont, self.e_dc_metatag),
(DCNS, 'description'): (self.s_processcont, self.e_dc_metatag),
(DCNS, 'date'): (self.s_processcont, self.e_dc_metatag),
(DRAWNS, 'frame'): (self.s_draw_frame, self.e_draw_frame),
(DRAWNS, 'image'): (self.s_draw_image, None),
(DRAWNS, 'fill-image'): (self.s_draw_fill_image, None),
(DRAWNS, "layer-set"):(self.s_ignorexml, None),
(DRAWNS, 'page'): (self.s_draw_page, self.e_draw_page),
(METANS, 'keyword'): (self.s_processcont, self.e_dc_metatag),
(METANS, 'generator'):(self.s_processcont, self.e_dc_metatag),
(NUMBERNS, "boolean-style"):(self.s_ignorexml, None),
(NUMBERNS, "currency-style"):(self.s_ignorexml, None),
(NUMBERNS, "date-style"):(self.s_ignorexml, None),
(NUMBERNS, "number-style"):(self.s_ignorexml, None),
(NUMBERNS, "text-style"):(self.s_ignorexml, None),
(OFFICENS, "automatic-styles"):(self.s_office_automatic_styles, None),
(OFFICENS, "document-content"):(self.s_office_document_content, self.e_office_document_content),
(OFFICENS, "forms"):(self.s_ignorexml, None),
(OFFICENS, "meta"):(self.s_ignorecont, None),
(OFFICENS, "presentation"):(self.s_office_presentation, self.e_office_presentation),
(OFFICENS, "spreadsheet"):(self.s_office_spreadsheet, self.e_office_spreadsheet),
(OFFICENS, "styles"):(self.s_office_styles, None),
(OFFICENS, "text"):(self.s_office_text, self.e_office_text),
(OFFICENS, "scripts"):(self.s_ignorexml, None),
(PRESENTATIONNS, "notes"):(self.s_ignorexml, None),
(STYLENS, "default-style"):(self.s_style_default_style, self.e_style_default_style),
(STYLENS, "drawing-page-properties"):(self.s_style_handle_properties, None),
(STYLENS, "font-face"):(self.s_style_font_face, None),
(STYLENS, "graphic-properties"):(self.s_style_handle_properties, None),
(STYLENS, "handout-master"):(self.s_ignorexml, None),
(STYLENS, "master-page"):(self.s_style_master_page, None),
(STYLENS, "page-layout-properties"):(self.s_style_handle_properties, None),
(STYLENS, "page-layout"):(self.s_style_page_layout, self.e_style_page_layout),
(STYLENS, "paragraph-properties"):(self.s_style_handle_properties, None),
(STYLENS, "style"):(self.s_style_style, self.e_style_style),
(STYLENS, "table-cell-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-column-properties"):(self.s_style_handle_properties, None),
(STYLENS, "table-properties"):(self.s_style_handle_properties, None),
(STYLENS, "text-properties"):(self.s_style_handle_properties, None),
(TABLENS, 'covered-table-cell'): (self.s_ignorexml, None),
(TABLENS, 'table-cell'): (self.s_table_table_cell, self.e_table_table_cell),
(TABLENS, 'table-column'): (self.s_table_table_column, None),
(TABLENS, 'table-row'): (self.s_table_table_row, self.e_table_table_row),
(TABLENS, 'table'): (self.s_table_table, self.e_table_table),
(TEXTNS, 'a'): (self.s_text_a, self.e_text_a),
(TEXTNS, "alphabetical-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "bibliography-configuration"):(self.s_ignorexml, None),
(TEXTNS, "bibliography-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'h'): (self.s_text_h, self.e_text_h),
(TEXTNS, "illustration-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'line-break'):(self.s_text_line_break, None),
(TEXTNS, "linenumbering-configuration"):(self.s_ignorexml, None),
(TEXTNS, "list"):(self.s_text_list, self.e_text_list),
(TEXTNS, "list-item"):(self.s_text_list_item, self.e_text_list_item),
(TEXTNS, "list-level-style-bullet"):(self.s_text_list_level_style_bullet, self.e_text_list_level_style_bullet),
(TEXTNS, "list-level-style-number"):(self.s_text_list_level_style_number, self.e_text_list_level_style_number),
(TEXTNS, "list-style"):(None, None),
(TEXTNS, "note"):(self.s_text_note, None),
(TEXTNS, "note-body"):(self.s_text_note_body, self.e_text_note_body),
(TEXTNS, "note-citation"):(None, self.e_text_note_citation),
(TEXTNS, "notes-configuration"):(self.s_ignorexml, None),
(TEXTNS, "object-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, 'p'): (self.s_text_p, self.e_text_p),
(TEXTNS, 's'): (self.s_text_s, None),
(TEXTNS, 'span'): (self.s_text_span, self.e_text_span),
(TEXTNS, 'tab'): (self.s_text_tab, None),
(TEXTNS, "table-index-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "table-of-content-source"):(self.s_text_x_source, self.e_text_x_source),
(TEXTNS, "user-index-source"):(self.s_text_x_source, self.e_text_x_source),
}
def writeout(self, s):
if s != '':
self.wfunc(s)
def writedata(self):
d = ''.join(self.data)
if d != '':
self.writeout(escape(d))
def opentag(self, tag, attrs={}):
""" Create an open HTML tag """
a = []
for key,val in attrs.items():
a.append('''%s=%s''' % (key, quoteattr(val)))
if len(a) == 0:
self.writeout("<%s>" % tag)
else:
self.writeout("<%s %s>" % (tag, " ".join(a)))
def closetag(self, tag):
self.writeout("</%s>\n" % tag)
def emptytag(self, tag, attrs={}):
a = []
for key,val in attrs.items():
a.append('''%s=%s''' % (key, quoteattr(val)))
self.writeout("<%s %s/>\n" % (tag, " ".join(a)))
def characters(self, data):
if self.processelem and self.processcont:
self.data.append(data)
def handle_starttag(self, tag, method, attrs):
method(tag,attrs)
def handle_endtag(self, tag, attrs, method):
method(tag, attrs)
def startElementNS(self, tag, qname, attrs):
self.pstack.append( (self.processelem, self.processcont) )
if self.processelem:
method = self.elements.get(tag, (None, None) )[0]
if method:
self.handle_starttag(tag, method, attrs)
else:
self.unknown_starttag(tag,attrs)
self.tagstack.push( tag, attrs )
def endElementNS(self, tag, qname):
stag, attrs = self.tagstack.pop()
if self.processelem:
method = self.elements.get(tag, (None, None) )[1]
if method:
self.handle_endtag(tag, attrs, method)
else:
self.unknown_endtag(tag, attrs)
self.processelem, self.processcont = self.pstack.pop()
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag, attrs):
pass
def s_ignorexml(self, tag, attrs):
""" Ignore this xml element and all children of it
It will automatically stop ignoring
"""
self.processelem = False
def s_ignorecont(self, tag, attrs):
self.processcont = False
def s_processcont(self, tag, attrs):
self.processcont = True
def classname(self, attrs):
""" Generate a class name from a style name """
c = attrs[(TEXTNS,'style-name')]
c = c.replace(".","_")
return c
def purgedata(self):
self.data = []
def e_dc_title(self, tag, attrs):
""" Get the title from the meta data and create a HTML <title>
"""
self.metatags.append('<title>%s</title>\n' % escape(''.join(self.data)))
self.title = ''.join(self.data)
self.data = []
def e_dc_metatag(self, tag, attrs):
""" Any other meta data is added as a <meta> element
"""
self.metatags.append('<meta name="%s" content=%s/>\n' % (tag[1], quoteattr(''.join(self.data))))
self.data = []
def e_dc_contentlanguage(self, tag, attrs):
""" Set the content language. Identifies the targeted audience
"""
self.metatags.append('<meta http-equiv="content-language" content="%s"/>\n' % ''.join(self.data))
self.data = []
def s_draw_frame(self, tag, attrs):
""" A <draw:frame> is made into a <div> in HTML which is then styled
"""
name = "G-" + attrs.get( (DRAWNS,'style-name'), "")
if name == 'G-':
name = "PR-" + attrs.get( (PRESENTATIONNS,'style-name'), "")
name = name.replace(".","_")
style = "position: absolute;"
if attrs.has_key( (SVGNS,"width") ):
style = style + "width:" + attrs[(SVGNS,"width")] + ";"
if attrs.has_key( (SVGNS,"height") ):
style = style + "height:" + attrs[(SVGNS,"height")] + ";"
if attrs.has_key( (SVGNS,"x") ):
style = style + "left:" + attrs[(SVGNS,"x")] + ";"
if attrs.has_key( (SVGNS,"y") ):
style = style + "top:" + attrs[(SVGNS,"y")] + ";"
self.opentag('div', {'class': name, 'style': style})
def e_draw_frame(self, tag, attrs):
""" End the <draw:frame>
"""
self.closetag('div')
def s_draw_fill_image(self, tag, attrs):
name = attrs.get( (DRAWNS,'name'), "NoName")
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
self.cs.fillimages[name] = imghref
def rewritelink(self, imghref):
""" Intended to be overloaded if you don't store your pictures
in a Pictures subfolder
"""
return imghref
def s_draw_image(self, tag, attrs):
""" A <draw:image> becomes an <img/> element
"""
parent = self.tagstack.stackparent()
anchor_type = parent.get((TEXTNS,'anchor-type'))
imghref = attrs[(XLINKNS,"href")]
imghref = self.rewritelink(imghref)
htmlattrs = {'alt':"", 'src':imghref }
if anchor_type != "character":
htmlattrs['style'] = "display: block;"
self.emptytag('img', htmlattrs)
def s_draw_page(self, tag, attrs):
""" A <draw:page> is a slide in a presentation. We use a <fieldset> element in HTML.
Therefore if you convert a ODP file, you get a series of <fieldset>s.
Override this for your own purpose.
"""
name = attrs.get( (DRAWNS,'name'), "NoName")
stylename = attrs.get( (DRAWNS,'style-name'), "")
stylename = stylename.replace(".","_")
masterpage = attrs.get( (DRAWNS,'master-page-name'),"")
masterpage = masterpage.replace(".","_")
self.opentag('fieldset', {'class':"DP-%s MP-%s" % (stylename, masterpage) })
self.opentag('legend')
self.writeout(escape(name))
self.closetag('legend')
def e_draw_page(self, tag, attrs):
self.closetag('fieldset')
def html_body(self, tag, attrs):
self.writedata()
self.opentag('style', {'type':"text/css"})
self.writeout('\nimg { width: 100%; height: 100%; }\n')
self.writeout('* { padding: 0; margin: 0; }\n')
self.generate_stylesheet()
self.closetag('style')
self.purgedata()
self.closetag('head')
self.opentag('body')
def generate_stylesheet(self):
for name in self.stylestack:
styles = self.styledict.get(name)
# Preload with the family's default style
if styles.has_key('__style-family') and self.styledict.has_key(styles['__style-family']):
#if styles['__style-family'] == 'p': pdb.set_trace()
familystyle = self.styledict[styles['__style-family']].copy()
del styles['__style-family']
for style, val in styles.items():
familystyle[style] = val
styles = familystyle
# Resolve the remaining parent styles
while styles.has_key('__parent-style-name') and self.styledict.has_key(styles['__parent-style-name']):
parentstyle = self.styledict[styles['__parent-style-name']].copy()
del styles['__parent-style-name']
for style, val in styles.items():
parentstyle[style] = val
styles = parentstyle
self.styledict[name] = styles
# Write the styles to HTML
for name in self.stylestack:
styles = self.styledict.get(name)
css2 = self.cs.convert_styles(styles)
self.writeout("%s {\n" % name)
for style, val in css2.items():
self.writeout("\t%s: %s;\n" % (style, val) )
self.writeout("}\n")
def generate_footnotes(self):
for key,note in self.notedict.items():
self.opentag('div')
self.opentag('a', { 'name':"footnote-%d" % key })
self.closetag('a')
self.opentag('sup')
self.writeout(escape(note['citation']))
self.closetag('sup')
self.writeout(escape(note['body']))
self.closetag('div')
def s_office_automatic_styles(self, tag, attrs):
if self.xmlfile == 'styles.xml':
self.autoprefix = "A"
else:
self.autoprefix = ""
def s_office_document_content(self, tag, attrs):
""" First tag in the content.xml file"""
self.writeout('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ')
self.writeout('"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
self.opentag('html', {'xmlns':"http://www.w3.org/1999/xhtml"})
self.opentag('head')
self.emptytag('meta', { 'http-equiv':"Content-Type", 'content':"text/html;charset=UTF-8"})
for metaline in self.metatags:
self.writeout(metaline)
def e_office_document_content(self, tag, attrs):
""" Last tag """
self.closetag('html')
def s_office_presentation(self, tag, attrs):
""" For some odd reason, OpenOffice Impress doesn't define a default-style
for the 'paragraph'. We therefore force a standard when we see
it is a presentation
"""
self.styledict['p'] = {(FONS,u'font-size'): u"24pt" }
self.styledict['presentation'] = {(FONS,u'font-size'): u"24pt" }
self.html_body(tag, attrs)
def e_office_presentation(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_spreadsheet(self, tag, attrs):
self.html_body(tag, attrs)
def e_office_spreadsheet(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_office_styles(self, tag, attrs):
self.autoprefix = ""
def s_office_text(self, tag, attrs):
""" OpenDocument text """
self.styledict['frame'] = { (STYLENS,'wrap'): u'parallel'}
self.html_body(tag, attrs)
def e_office_text(self, tag, attrs):
self.generate_footnotes()
self.closetag('body')
def s_style_handle_properties(self, tag, attrs):
""" Copy all attributes to a struct.
We will later convert them to CSS2
"""
for key,attr in attrs.items():
self.styledict[self.currentstyle][key] = attr
familymap = {'frame':'frame', 'paragraph':'p', 'presentation':'presentation',
'text':'span','section':'div',
'table':'table','table-cell':'td','table-column':'col',
'table-row':'tr','graphic':'graphic' }
def s_style_default_style(self, tag, attrs):
""" A default style is like a style on an HTML tag
"""
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
self.currentstyle = htmlfamily
self.styledict[self.currentstyle] = {}
def e_style_default_style(self, tag, attrs):
self.currentstyle = None
def s_style_font_face(self, tag, attrs):
""" It is possible that the HTML browser doesn't know how to
show a particular font. Luckily ODF provides generic fallbacks
Unluckily they are not the same as CSS2.
CSS2: serif, sans-serif, cursive, fantasy, monospace
ODF: roman, swiss, modern, decorative, script, system
"""
name = attrs[(STYLENS,"name")]
family = attrs[(SVGNS,"font-family")]
generic = attrs.get( (STYLENS,'font-family-generic'),"" )
self.cs.save_font(name, family, generic)
def s_style_page_layout(self, tag, attrs):
""" Collect the formatting for the page layout style.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".PL-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
def e_style_page_layout(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_style_master_page(self, tag, attrs):
""" Collect the formatting for the page layout style.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
self.currentstyle = ".MP-" + name
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {('','position'):'relative'}
# Then load the pagelayout style if we find it
pagelayout = attrs.get( (STYLENS,'page-layout-name'), None)
if pagelayout:
pagelayout = ".PL-" + pagelayout
if self.styledict.has_key( pagelayout ):
styles = self.styledict[pagelayout]
for style, val in styles.items():
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = pagelayout
self.s_ignorexml(tag, attrs)
# Short prefixes for class selectors
familyshort = {'drawing-page':'DP', 'paragraph':'P', 'presentation':'PR',
'text':'S', 'section':'D',
'table':'T', 'table-cell':'TD', 'table-column':'TC',
'table-row':'TR', 'graphic':'G' }
def s_style_style(self, tag, attrs):
""" Collect the formatting for the style.
Styles have scope. The same name can be used for both paragraph and
character styles Since CSS has no scope we use a prefix. (Not elegant)
In ODF a style can have a parent, these parents can be chained.
We may not have encountered the parent yet, but if we have, we resolve it.
"""
name = attrs[(STYLENS,'name')]
name = name.replace(".","_")
family = attrs[(STYLENS,'family')]
htmlfamily = self.familymap.get(family,'unknown')
sfamily = self.familyshort.get(family,'X')
name = "%s%s-%s" % (self.autoprefix, sfamily, name)
parent = attrs.get( (STYLENS,'parent-style-name') )
self.currentstyle = special_styles.get(name,"."+name)
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
self.styledict[self.currentstyle]['__style-family'] = htmlfamily
# Then load the parent style if we find it
if parent:
parent = "%s-%s" % (sfamily, parent)
parent = special_styles.get(parent, "."+parent)
if self.styledict.has_key( parent ):
styles = self.styledict[parent]
for style, val in styles.items():
self.styledict[self.currentstyle][style] = val
else:
self.styledict[self.currentstyle]['__parent-style-name'] = parent
def e_style_style(self, tag, attrs):
""" End this style
"""
self.currentstyle = None
def s_table_table(self, tag, attrs):
""" Start a table
"""
c = attrs.get( (TABLENS,'style-name'), None)
if c:
c = c.replace(".","_")
self.opentag('table',{ 'class': "T-%s" % c })
else:
self.opentag('table')
self.purgedata()
def e_table_table(self, tag, attrs):
""" End a table
"""
self.writedata()
self.closetag('table')
self.purgedata()
def s_table_table_cell(self, tag, attrs):
#FIXME: number-columns-repeated § 8.1.3
#repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
rowspan = attrs.get( (TABLENS,'number-rows-spanned') )
if rowspan:
htmlattrs['rowspan'] = rowspan
colspan = attrs.get( (TABLENS,'number-columns-spanned') )
if colspan:
htmlattrs['colspan'] = colspan
c = attrs.get( (TABLENS,'style-name') )
if c:
htmlattrs['class'] = 'TD-%s' % c.replace(".","_")
self.opentag('td', htmlattrs)
self.purgedata()
def e_table_table_cell(self, tag, attrs):
self.writedata()
self.closetag('td')
self.purgedata()
def s_table_table_column(self, tag, attrs):
c = attrs.get( (TABLENS,'style-name'), None)
repeated = int(attrs.get( (TABLENS,'number-columns-repeated'), 1))
htmlattrs = {}
if c:
htmlattrs['class'] = "TC-%s" % c.replace(".","_")
for x in xrange(repeated):
self.emptytag('col', htmlattrs)
self.purgedata()
def s_table_table_row(self, tag, attrs):
#FIXME: table:number-rows-repeated
c = attrs.get( (TABLENS,'style-name'), None)
htmlattrs = {}
if c:
htmlattrs['class'] = "TR-%s" % c.replace(".","_")
self.opentag('tr', htmlattrs)
self.purgedata()
def e_table_table_row(self, tag, attrs):
self.writedata()
self.closetag('tr')
self.purgedata()
def s_text_a(self, tag, attrs):
""" Anchors start """
self.writedata()
href = attrs[(XLINKNS,"href")].split("|")[0]
self.opentag('a', {'href':href})
self.purgedata()
def e_text_a(self, tag, attrs):
self.writedata()
self.closetag('a')
self.purgedata()
def s_text_h(self, tag, attrs):
""" Headings start """
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
self.headinglevels[level] = self.headinglevels[level] + 1
name = self.classname(attrs)
for x in range(level + 1,10):
self.headinglevels[x] = 0
special = special_styles.get("P-"+name)
if special:
self.opentag('h%s' % level)
else:
self.opentag('h%s' % level, {'class':"P-%s" % name })
self.purgedata()
def e_text_h(self, tag, attrs):
""" Headings end """
self.writedata()
level = int(attrs[(TEXTNS,'outline-level')])
if level > 6: level = 6 # Heading levels go only to 6 in XHTML
if level < 1: level = 1
lev = self.headinglevels[1:level+1]
outline = '.'.join(map(str,lev) )
self.opentag('a', {'name':"%s.%s" % ( outline, ''.join(self.data))} )
self.closetag('a')
self.closetag('h%s' % level)
self.purgedata()
def s_text_line_break(self, tag, attrs):
self.writedata()
self.emptytag('br')
self.purgedata()
def s_text_list(self, tag, attrs):
""" To know which level we're at, we have to count the number
of <text:list> elements on the tagstack.
"""
name = attrs.get( (TEXTNS,'style-name') )
if name:
name = name.replace(".","_")
level = 1
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
level = self.tagstack.count_tags(tag) + 1
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
self.opentag('%s' % self.listtypes.get(name), {'class':"%s_%d" % (name, level) })
self.purgedata()
def e_text_list(self, tag, attrs):
self.writedata()
name = attrs.get( (TEXTNS,'style-name') )
if name:
name = name.replace(".","_")
level = 1
else:
# FIXME: If a list is contained in a table cell or text box,
# the list level must return to 1, even though the table or
# textbox itself may be nested within another list.
level = self.tagstack.count_tags(tag) + 1
name = self.tagstack.rfindattr( (TEXTNS,'style-name') )
self.closetag(self.listtypes.get(name))
self.purgedata()
def s_text_list_item(self, tag, attrs):
self.opentag('li')
self.purgedata()
def e_text_list_item(self, tag, attrs):
self.writedata()
self.closetag('li')
self.purgedata()
def s_text_list_level_style_bullet(self, tag, attrs):
name = self.tagstack.rfindattr( (STYLENS,'name') )
self.listtypes[name] = 'ul'
level = attrs[(TEXTNS,'level')]
self.prevstyle = self.currentstyle
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
level = int(level)
if level % 3 == 1: listtype = "disc"
if level % 3 == 2: listtype = "circle"
if level % 3 == 0: listtype = "square"
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_bullet(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_list_level_style_number(self, tag, attrs):
name = self.tagstack.stackparent()[(STYLENS,'name')]
self.listtypes[name] = 'ol'
level = attrs[(TEXTNS,'level')]
num_format = attrs.get( (STYLENS,'name'),"1")
self.prevstyle = self.currentstyle
self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level)
self.stylestack.append(self.currentstyle)
self.styledict[self.currentstyle] = {}
if num_format == "1": listtype = "decimal"
elif num_format == "I": listtype = "upper-roman"
elif num_format == "i": listtype = "lower-roman"
elif num_format == "A": listtype = "upper-alpha"
elif num_format == "a": listtype = "lower-alpha"
else: listtype = "decimal"
self.styledict[self.currentstyle][('','list-style-type')] = listtype
def e_text_list_level_style_number(self, tag, attrs):
self.currentstyle = self.prevstyle
del self.prevstyle
def s_text_note(self, tag, attrs):
self.currentnote = self.currentnote + 1
self.notedict[self.currentnote] = {}
self.notebody = []
def e_text_note(self, tag, attrs):
pass
def collectnote(self,s):
if s != '':
self.notebody.append(s)
def s_text_note_body(self, tag, attrs):
self.orgwfunc = self.wfunc
self.wfunc = self.collectnote
def e_text_note_body(self, tag, attrs):
self.wfunc = self.orgwfunc
self.notedict[self.currentnote]['body'] = ''.join(self.notebody)
self.notebody = ''
del self.orgwfunc
def e_text_note_citation(self, tag, attrs):
mark = ''.join(self.data)
self.notedict[self.currentnote]['citation'] = mark
self.opentag('a',{ 'href': "#footnote-%s" % self.currentnote })
self.opentag('sup')
self.writeout( escape(mark) )
self.closetag('sup')
self.closetag('a')
def s_text_p(self, tag, attrs):
""" Paragraph
"""
c = attrs.get( (TEXTNS,'style-name'), None)
htmlattrs = {}
if c:
c = c.replace(".","_")
special = special_styles.get("P-"+c)
if special is None:
htmlattrs['class'] = "P-%s" % c
self.opentag('p', htmlattrs)
self.purgedata()
def e_text_p(self, tag, attrs):
""" End Paragraph
"""
self.writedata()
self.closetag('p')
self.purgedata()
def s_text_s(self, tag, attrs):
""" Generate a number of spaces. ODF has an element; HTML uses
We use   so we can send the output through an XML parser if we desire to
"""
c = attrs.get( (TEXTNS,'c'),"1")
for x in xrange(int(c)):
self.writeout(' ')
def s_text_span(self, tag, attrs):
""" The <text:span> element matches the <span> element in HTML. It is
typically used to properties of the text.
"""
self.writedata()
c = attrs.get( (TEXTNS,'style-name'), None)
htmlattrs = {}
if c:
c = c.replace(".","_")
special = special_styles.get("S-"+c)
if special is None:
htmlattrs['class'] = "S-%s" % c
self.opentag('span', htmlattrs)
self.purgedata()
def e_text_span(self, tag, attrs):
""" End the <text:span> """
self.writedata()
self.closetag('span')
self.purgedata()
def s_text_tab(self, tag, attrs):
""" Move to the next tabstop. We ignore this in HTML
"""
self.writedata()
self.writeout(' ')
self.purgedata()
def s_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
self.s_ignorexml(tag, attrs)
def e_text_x_source(self, tag, attrs):
""" Various indexes and tables of contents. We ignore those.
"""
self.writedata()
self.purgedata()
def odf2xhtml(self, odtfile):
# Extract the interesting files
z = zipfile.ZipFile(odtfile)
parser = xml.sax.make_parser()
parser.setFeature(handler.feature_namespaces, 1)
parser.setContentHandler(self)
parser.setErrorHandler(handler.ErrorHandler())
inpsrc = InputSource()
for xmlfile in ('meta.xml', 'styles.xml', 'content.xml'):
self.xmlfile = xmlfile
content = z.read(xmlfile)
inpsrc.setByteStream(StringIO(content))
parser.parse(inpsrc)
z.close()
return ''.join(self.lines)
if __name__ == "__main__":
import sys
odhandler = ODF2XHTML()
result = odhandler.odf2xhtml(sys.argv[1])
sys.stdout.write(result.encode('utf-8'))
|
import os
import subprocess
import integration_tests
class NilPluginTestCase(integration_tests.TestCase):
def test_snap_nil_plugin(self):
project_dir = 'simple-nil'
self.run_snapcraft('snap', project_dir)
dirs = os.listdir(os.path.join(project_dir, 'prime'))
self.assertEqual(['meta'], dirs)
def test_nil_no_additional_properties(self):
project_dir = 'nil-with-additional-properties'
exception = self.assertRaises(
subprocess.CalledProcessError, self.run_snapcraft, 'snap',
project_dir)
self.assertTrue(
"Additional properties are not allowed ('extra-property' was "
"unexpected)" in exception.output.replace('\n', ' ').strip())
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_vrf
short_description: Manage VRF (private networks aka. contexts) on Cisco ACI fabrics (fv:Ctx)
description:
- Manage VRF (private networks aka. contexts) on Cisco ACI fabrics.
- Each context is a private network associated to a tenant, i.e. VRF.
- More information from the internal APIC class
I(fv:Ctx) at U(https://developer.cisco.com/media/mim-ref/MO-fvCtx.html).
author:
- Swetha Chunduri (@schunduri)
- Dag Wieers (@dagwieers)
- Jacob McGill (@jmcgill298)
version_added: '2.4'
requirements:
- Tested with ACI Fabric 1.0(3f)+
notes:
- The C(tenant) used must exist before using this module in your playbook.
The M(aci_tenant) module can be used for this.
options:
tenant:
description:
- The name of the Tenant the VRF should belong to.
aliases: [ tenant_name ]
vrf:
description:
- The name of the VRF.
aliases: [ context, name, vrf_name ]
policy_control_direction:
description:
- Determines if the policy should be enforced by the fabric on ingress or egress.
choices: [ egress, ingress ]
policy_control_preference:
description:
- Determines if the Fabric should enforce Contrac Policies.
choices: [ enforced, unenforced ]
description:
description:
- The description for the VRF.
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new VRF to a tenant
aci_vrf:
hostname: apic
username: admin
password: SomeSecretPassword
vrf: vrf_lab
tenant: lab_tenant
descr: Lab VRF
policy_control_preference: enforced
policy_control_direction: ingress
state: present
- name: Remove a VRF for a tenant
aci_vrf:
hostname: apic
username: admin
password: SomeSecretPassword
vrf: vrf_lab
tenant: lab_tenant
state: absent
- name: Query a VRF of a tenant
aci_vrf:
hostname: apic
username: admin
password: SomeSecretPassword
vrf: vrf_lab
tenant: lab_tenant
state: query
- name: Query all VRFs
aci_vrf:
hostname: apic
username: admin
password: SomeSecretPassword
state: query
'''
RETURN = r''' # '''
from ansible.module_utils.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec
argument_spec.update(
description=dict(type='str', aliases=['descr']),
method=dict(type='str', choices=['delete', 'get', 'post'], aliases=['action'], removed_in_version='2.6'), # Deprecated starting from v2.6
policy_control_direction=dict(choices=['ingress', 'egress'], type='str'),
policy_control_preference=dict(choices=['enforced', 'unenforced'], type='str'),
state=dict(choices=['absent', 'present', 'query'], type='str', default='present'),
tenant=dict(type='str', required=False, aliases=['tenant_name']), # Not required for querying all objects
vrf=dict(type='str', required=False, aliases=['context', 'name', 'vrf_name']), # Not required for querying all objects
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
description = module.params['description']
policy_control_direction = module.params['policy_control_direction']
policy_control_preference = module.params['policy_control_preference']
state = module.params['state']
tenant = module.params['tenant']
vrf = module.params['vrf']
aci = ACIModule(module)
if vrf is not None:
if tenant is not None:
path = 'api/mo/uni/tn-%(tenant)s/ctx-%(vrf)s.json' % module.params
elif state == 'query':
path = 'api/mo/uni/tn-%(tenant)s.json?rsp-subtree=children&rsp-subtree-class=fvCtx&rsp-subtree-include=no-scoped' % module.params
else:
module.fail_json(msg="Parameter 'tenant' is required for state 'absent' or 'present'")
elif state == 'query':
path = 'api/class/fvCtx.json'
else:
module.fail_json(msg="Parameter 'vrf' is required for state 'absent' or 'present'")
aci.result['url'] = '%(protocol)s://%(hostname)s/' % aci.params + path
aci.get_existing()
if state == 'present':
# Filter out module params with null values
aci.payload(aci_class='fvCtx', class_config=dict(descr=description, pcEnfDir=policy_control_direction, pcEnfPref=policy_control_preference, name=vrf))
# Generate config diff which will be used as POST request body
aci.get_diff(aci_class='fvCtx')
# Submit changes if module not in check_mode and the proposed is different than existing
aci.post_config()
elif state == 'absent':
aci.delete_config()
module.exit_json(**aci.result)
if __name__ == "__main__":
main()
|
"""
Acceptance tests for Video.
"""
import json
from unittest import skipIf, skip
import requests
from box.test.flaky import flaky
from ..helpers import UniqueCourseTest, is_youtube_available
from ...pages.lms.video.video import VideoPage
from ...pages.lms.tab_nav import TabNavPage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.auto_auth import AutoAuthPage
from ...pages.lms.course_info import CourseInfoPage
from ...fixtures.course import CourseFixture, XBlockFixtureDesc
from ..helpers import skip_if_browser
VIDEO_SOURCE_PORT = 8777
YOUTUBE_STUB_PORT = 9080
YOUTUBE_STUB_URL = 'http://127.0.0.1:{}/'.format(YOUTUBE_STUB_PORT)
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
class YouTubeConfigError(Exception):
"""
Error occurred while configuring YouTube Stub Server.
"""
pass
@flaky
@skipIf(is_youtube_available() is False, 'YouTube is not available!')
class VideoBaseTest(UniqueCourseTest):
"""
Base class for tests of the Video Player
Sets up the course and provides helper functions for the Video tests.
"""
def setUp(self):
"""
Initialization of pages and course fixture for video tests
"""
super(VideoBaseTest, self).setUp()
self.video = VideoPage(self.browser)
self.tab_nav = TabNavPage(self.browser)
self.course_nav = CourseNavPage(self.browser)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.metadata = None
self.assets = []
self.verticals = None
self.youtube_configuration = {}
# reset youtube stub server
self.addCleanup(self._reset_youtube_stub_server)
def navigate_to_video(self):
""" Prepare the course and get to the video and render it """
self._install_course_fixture()
self._navigate_to_courseware_video_and_render()
def navigate_to_video_no_render(self):
"""
Prepare the course and get to the video unit
however do not wait for it to render, because
the has been an error.
"""
self._install_course_fixture()
self._navigate_to_courseware_video_no_render()
def _install_course_fixture(self):
""" Install the course fixture that has been defined """
if self.assets:
self.course_fixture.add_asset(self.assets)
chapter_sequential = XBlockFixtureDesc('sequential', 'Test Section')
chapter_sequential.add_children(*self._add_course_verticals())
chapter = XBlockFixtureDesc('chapter', 'Test Chapter').add_children(chapter_sequential)
self.course_fixture.add_children(chapter)
self.course_fixture.install()
if len(self.youtube_configuration) > 0:
self._configure_youtube_stub_server(self.youtube_configuration)
def _add_course_verticals(self):
"""
Create XBlockFixtureDesc verticals
:return: a list of XBlockFixtureDesc
"""
xblock_verticals = []
_verticals = self.verticals
# Video tests require at least one vertical with a single video.
if not _verticals:
_verticals = [[{'display_name': 'Video', 'metadata': self.metadata}]]
for vertical_index, vertical in enumerate(_verticals):
xblock_verticals.append(self._create_single_vertical(vertical, vertical_index))
return xblock_verticals
def _create_single_vertical(self, vertical, vertical_index):
"""
Create a single course vertical of type XBlockFixtureDesc with category `vertical`.
A single course vertical can contain single or multiple video modules.
:param vertical: vertical data list
:param vertical_index: index for the vertical display name
:return: XBlockFixtureDesc
"""
xblock_course_vertical = XBlockFixtureDesc('vertical', 'Test Vertical-{0}'.format(vertical_index))
for video in vertical:
xblock_course_vertical.add_children(
XBlockFixtureDesc('video', video['display_name'], metadata=video.get('metadata')))
return xblock_course_vertical
def _navigate_to_courseware_video(self):
""" Register for the course and navigate to the video unit """
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self.course_info_page.visit()
self.tab_nav.go_to_tab('Courseware')
def _navigate_to_courseware_video_and_render(self):
""" Wait for the video player to render """
self._navigate_to_courseware_video()
self.video.wait_for_video_player_render()
def _navigate_to_courseware_video_no_render(self):
""" Wait for the video Xmodule but not for rendering """
self._navigate_to_courseware_video()
self.video.wait_for_video_class()
def _configure_youtube_stub_server(self, config):
"""
Allow callers to configure the stub server using the /set_config URL.
:param config: Configuration dictionary.
The request should have PUT data, such that:
Each PUT parameter is the configuration key.
Each PUT value is a JSON-encoded string value for the configuration.
:raise YouTubeConfigError:
"""
youtube_stub_config_url = YOUTUBE_STUB_URL + 'set_config'
config_data = {param: json.dumps(value) for param, value in config.items()}
response = requests.put(youtube_stub_config_url, data=config_data)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL {0}, Configuration Data: {1}, Status was {2}'.format(
youtube_stub_config_url, config, response.status_code))
def _reset_youtube_stub_server(self):
"""
Reset YouTube Stub Server Configurations using the /del_config URL.
:raise YouTubeConfigError:
"""
youtube_stub_config_url = YOUTUBE_STUB_URL + 'del_config'
response = requests.delete(youtube_stub_config_url)
if not response.ok:
raise YouTubeConfigError(
'YouTube Server Configuration Failed. URL: {0} Status was {1}'.format(
youtube_stub_config_url, response.status_code))
def metadata_for_mode(self, player_mode, additional_data=None):
"""
Create a dictionary for video player configuration according to `player_mode`
:param player_mode (str): Video player mode
:param additional_data (dict): Optional additional metadata.
:return: dict
"""
metadata = {}
if player_mode == 'html5':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
metadata.update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
metadata.update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
metadata.update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if additional_data:
metadata.update(additional_data)
return metadata
def go_to_sequential_position(self, position):
"""
Navigate to sequential specified by `video_display_name`
"""
self.course_nav.go_to_sequential_position(position)
self.video.wait_for_video_player_render()
class YouTubeVideoTest(VideoBaseTest):
""" Test YouTube Video Player """
def setUp(self):
super(YouTubeVideoTest, self).setUp()
def test_youtube_video_rendering_wo_html5_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
"""
self.navigate_to_video()
# Verify that video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in Youtube mode
Given the course has a Video component in "Youtube" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', data)
self.assets.append('chinese_transcripts.srt')
self.navigate_to_video()
self.video.show_captions()
# Verify that we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text())
def test_cc_button_transcripts_and_sub_fields_empty(self):
"""
Scenario: CC button works correctly if transcripts and sub fields are empty,
but transcript file exists in assets (Youtube mode of Video component)
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
Then I see the correct english text in the captions
"""
self._install_course_fixture()
self.course_fixture.add_asset(['subs_OEoXaMPEzfM.srt.sjson'])
self.course_fixture._upload_assets()
self._navigate_to_courseware_video_and_render()
self.video.show_captions()
# Verify that we see "Hi, welcome to Edx." text in the captions
self.assertIn('Hi, welcome to Edx.', self.video.captions_text())
def test_cc_button_hidden_no_translations(self):
"""
Scenario: CC button is hidden if no translations
Given the course has a Video component in "Youtube" mode
Then the "CC" button is hidden
"""
self.navigate_to_video()
self.assertFalse(self.video.is_button_shown('CC'))
def test_fullscreen_video_alignment_with_transcript_hidden(self):
"""
Scenario: Video is aligned with transcript hidden in fullscreen mode
Given the course has a Video component in "Youtube" mode
When I view the video at fullscreen
Then the video with the transcript hidden is aligned correctly
"""
self.navigate_to_video()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in YouTube mode
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I can download the transcript in "srt" format
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages
Given the course has a Video component in "Youtube" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_OEoXaMPEzfM.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Hi, welcome to Edx." text in the captions
self.assertIn('Hi, welcome to Edx.', self.video.captions_text())
# check if we can download transcript in "srt" format that has text "Hi, welcome to Edx."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Hi, welcome to Edx.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text())
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_fullscreen_video_alignment_on_transcript_toggle(self):
"""
Scenario: Video is aligned correctly on transcript toggle in fullscreen mode
Given the course has a Video component in "Youtube" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
And the video with the transcript hidden is aligned correctly
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data = {'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
# click video button "CC"
self.video.click_player_button('CC')
# check if video aligned correctly without enabled transcript
self.assertTrue(self.video.is_aligned(False))
def test_video_rendering_with_default_response_time(self):
"""
Scenario: Video is rendered in Youtube mode when the YouTube Server responds quickly
Given the YouTube server response time less than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "Youtube" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 0.4
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('youtube'))
def test_video_rendering_wo_default_response_time(self):
"""
Scenario: Video is rendered in HTML5 when the YouTube Server responds slowly
Given the YouTube server response time is greater than 1.5 seconds
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration['time_to_response'] = 2.0
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_video_with_youtube_blocked(self):
"""
Scenario: Video is rendered in HTML5 mode when the YouTube API is blocked
Given the YouTube server response time is greater than 1.5 seconds
And the YouTube API is blocked
And the course has a Video component in "Youtube_HTML5" mode
Then the video has rendered in "HTML5" mode
"""
# configure youtube server
self.youtube_configuration.update({
'time_to_response': 2.0,
'youtube_api_blocked': True,
})
self.metadata = self.metadata_for_mode('youtube_html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
def test_download_transcript_button_works_correctly(self):
"""
Scenario: Download Transcript button works correctly
Given the course has Video components A and B in "Youtube" mode
And Video component C in "HTML5" mode
And I have defined downloadable transcripts for the videos
Then I can download a transcript for Video A in "srt" format
And I can download a transcript for Video A in "txt" format
And I can download a transcript for Video B in "txt" format
And the Download Transcript menu does not exist for Video C
"""
data_a = {'sub': 'OEoXaMPEzfM', 'download_track': True}
youtube_a_metadata = self.metadata_for_mode('youtube', additional_data=data_a)
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data_b = {'youtube_id_1_0': 'b7xgknqkQk8', 'sub': 'b7xgknqkQk8', 'download_track': True}
youtube_b_metadata = self.metadata_for_mode('youtube', additional_data=data_b)
self.assets.append('subs_b7xgknqkQk8.srt.sjson')
data_c = {'track': 'http://example.org/', 'download_track': True}
html5_c_metadata = self.metadata_for_mode('html5', additional_data=data_c)
self.verticals = [
[{'display_name': 'A', 'metadata': youtube_a_metadata}],
[{'display_name': 'B', 'metadata': youtube_b_metadata}],
[{'display_name': 'C', 'metadata': html5_c_metadata}]
]
# open the section with videos (open video "A")
self.navigate_to_video()
# check if we can download transcript in "srt" format that has text "00:00:00,270"
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', '00:00:00,270'))
# select the transcript format "txt"
self.assertTrue(self.video.select_transcript_format('txt'))
# check if we can download transcript in "txt" format that has text "Hi, welcome to Edx."
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Hi, welcome to Edx.'))
# open video "B"
self.course_nav.go_to_sequential('B')
# check if we can download transcript in "txt" format that has text "Equal transcripts"
self.assertTrue(self.video.downloaded_transcript_contains_text('txt', 'Equal transcripts'))
# open video "C"
self.course_nav.go_to_sequential('C')
# menu "download_transcript" doesn't exist
self.assertFalse(self.video.is_menu_exist('download_transcript'))
def _verify_caption_text(self, text):
self.video._wait_for(
lambda: (text in self.video.captions_text()),
u'Captions contain "{}" text'.format(text),
timeout=5
)
def test_video_language_menu_working(self):
"""
Scenario: Language menu works correctly in Video component
Given the course has a Video component in "Youtube" mode
And I have defined multiple language transcripts for the videos
And I make sure captions are closed
And I see video menu "language" with correct items
And I select language with code "zh"
Then I see "好 各位同学" text in the captions
And I select language with code "en"
Then I see "Hi, welcome to Edx." text in the captions
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_OEoXaMPEzfM.srt.sjson'])
data = {'transcripts': {"zh": "chinese_transcripts.srt"}, 'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.hide_captions()
correct_languages = {'en': 'English', 'zh': 'Chinese'}
self.assertEqual(self.video.caption_languages(), correct_languages)
self.video.select_language('zh')
unicode_text = "好 各位同学".decode('utf-8')
self._verify_caption_text(unicode_text)
self.video.select_language('en')
self._verify_caption_text('Hi, welcome to Edx.')
def test_multiple_videos_in_sequentials_load_and_work(self):
"""
Scenario: Multiple videos in sequentials all load and work, switching between sequentials
Given it has videos "A,B" in "Youtube" mode in position "1" of sequential
And videos "E,F" in "Youtube" mode in position "2" of sequential
"""
self.verticals = [
[{'display_name': 'A'}, {'display_name': 'B'}], [{'display_name': 'C'}, {'display_name': 'D'}]
]
tab1_video_names = ['A', 'B']
tab2_video_names = ['C', 'D']
def execute_video_steps(video_names):
"""
Execute video steps
"""
for video_name in video_names:
self.video.click_player_button('play', video_name)
self.assertIn(self.video.state(video_name), ['playing', 'buffering'])
self.video.click_player_button('pause', video_name)
# go to video
self.navigate_to_video()
execute_video_steps(tab1_video_names)
# go to second sequential position
self.go_to_sequential_position(2)
execute_video_steps(tab2_video_names)
# go back to first sequential position
# we are again playing tab 1 videos to ensure that switching didn't broke some video functionality.
self.go_to_sequential_position(1)
execute_video_steps(tab1_video_names)
def test_video_component_stores_speed_correctly_for_multiple_videos(self):
"""
Scenario: Video component stores speed correctly when each video is in separate sequential
Given I have a video "A" in "Youtube" mode in position "1" of sequential
And a video "B" in "Youtube" mode in position "2" of sequential
And a video "C" in "HTML5" mode in position "3" of sequential
"""
self.verticals = [
[{'display_name': 'A'}], [{'display_name': 'B'}],
[{'display_name': 'C', 'metadata': self.metadata_for_mode('html5')}]
]
self.navigate_to_video()
# select the "2.0" speed on video "A"
self.course_nav.go_to_sequential('A')
self.video.set_speed('2.0')
# select the "0.50" speed on video "B"
self.course_nav.go_to_sequential('B')
self.video.set_speed('0.50')
# open video "C"
self.course_nav.go_to_sequential('C')
# check if video "C" should start playing at speed "0.75"
self.assertEqual(self.video.get_speed(), '0.75x')
# open video "A"
self.course_nav.go_to_sequential('A')
# check if video "A" should start playing at speed "2.0"
self.assertEqual(self.video.get_speed(), '2.0x')
# reload the page
self.video.reload_page()
# open video "A"
self.course_nav.go_to_sequential('A')
# check if video "A" should start playing at speed "2.0"
self.assertEqual(self.video.get_speed(), '2.0x')
# select the "1.0" speed on video "A"
self.video.set_speed('1.0')
# open video "B"
self.course_nav.go_to_sequential('B')
# check if video "B" should start playing at speed "0.50"
self.assertEqual(self.video.get_speed(), '0.50x')
# open video "C"
self.course_nav.go_to_sequential('C')
# check if video "C" should start playing at speed "1.0"
self.assertEqual(self.video.get_speed(), '1.0x')
def test_video_has_correct_transcript(self):
"""
Scenario: Youtube video has correct transcript if fields for other speeds are filled
Given it has a video in "Youtube" mode
And I have uploaded multiple transcripts
And I make sure captions are opened
Then I see "Hi, welcome to Edx." text in the captions
And I select the "1.50" speed
And I reload the page with video
Then I see "Hi, welcome to Edx." text in the captions
And I see duration "1:56"
"""
self.assets.extend(['subs_OEoXaMPEzfM.srt.sjson', 'subs_b7xgknqkQk8.srt.sjson'])
data = {'sub': 'OEoXaMPEzfM', 'youtube_id_1_5': 'b7xgknqkQk8'}
self.metadata = self.metadata_for_mode('youtube', additional_data=data)
# go to video
self.navigate_to_video()
self.video.show_captions()
self.assertIn('Hi, welcome to Edx.', self.video.captions_text())
self.video.set_speed('1.50')
self.video.reload_page()
self.assertIn('Hi, welcome to Edx.', self.video.captions_text())
self.assertTrue(self.video.duration(), '1.56')
def test_video_position_stored_correctly_wo_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
Then I wait until video reaches at position "0.05"
And I click video button "pause"
And I reload the page with video
And I click video button "play""
And I click video button "pause"
Then video slider should be Equal or Greater than "0:05"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:05')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds(), 5)
@skip("Intermittently fails 03 June 2014")
def test_video_position_stored_correctly_with_seek(self):
"""
Scenario: Video component stores position correctly when page is reloaded
Given the course has a Video component in "Youtube" mode
Then the video has rendered in "Youtube" mode
And I click video button "play""
And I click video button "pause"
Then I seek video to "0:10" position
And I click video button "play""
And I click video button "pause"
And I reload the page with video
Then video slider should be Equal or Greater than "0:10"
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.video.seek('0:10')
self.video.click_player_button('pause')
self.video.reload_page()
self.video.click_player_button('play')
self.video.click_player_button('pause')
self.assertGreaterEqual(self.video.seconds(), 10)
class YouTubeHtml5VideoTest(VideoBaseTest):
""" Test YouTube HTML5 Video Player """
def setUp(self):
super(YouTubeHtml5VideoTest, self).setUp()
def test_youtube_video_rendering_with_unsupported_sources(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode
with HTML5 sources that doesn't supported by browser
Given the course has a Video component in "Youtube_HTML5_Unsupported_Video" mode
Then the video has rendered in "Youtube" mode
"""
self.metadata = self.metadata_for_mode('youtube_html5_unsupported_video')
self.navigate_to_video()
# Verify that the video has rendered in "Youtube" mode
self.assertTrue(self.video.is_video_rendered('youtube'))
class Html5VideoTest(VideoBaseTest):
""" Test HTML5 Video Player """
def setUp(self):
super(Html5VideoTest, self).setUp()
def test_autoplay_disabled_for_video_component(self):
"""
Scenario: Autoplay is disabled by default for a Video component
Given the course has a Video component in "HTML5" mode
When I view the Video component
Then it does not have autoplay enabled
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
# Verify that the video has autoplay mode disabled
self.assertFalse(self.video.is_autoplay_enabled())
def test_html5_video_rendering_with_unsupported_sources(self):
"""
Scenario: LMS displays an error message for HTML5 sources that are not supported by browser
Given the course has a Video component in "HTML5_Unsupported_Video" mode
When I view the Video component
Then and error message is shown
And the error message has the correct text
"""
self.metadata = self.metadata_for_mode('html5_unsupported_video')
self.navigate_to_video_no_render()
# Verify that error message is shown
self.assertTrue(self.video.is_error_message_shown())
# Verify that error message has correct text
correct_error_message_text = 'No playable video sources found.'
self.assertIn(correct_error_message_text, self.video.error_message_text())
# Verify that spinner is not shown
self.assertFalse(self.video.is_spinner_shown())
def test_download_button_wo_english_transcript(self):
"""
Scenario: Download button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
self.assets.append('chinese_transcripts.srt')
# go to video
self.navigate_to_video()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text())
# check if we can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_download_button_two_transcript_languages(self):
"""
Scenario: Download button works correctly for multiple transcript languages in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a downloadable non-english transcript for the video
And I have defined english subtitles for the video
Then I see the correct english text in the captions
And the english transcript downloads correctly
And I see the correct non-english text in the captions
And the non-english transcript downloads correctly
"""
self.assets.extend(['chinese_transcripts.srt', 'subs_OEoXaMPEzfM.srt.sjson'])
data = {'download_track': True, 'transcripts': {'zh': 'chinese_transcripts.srt'}, 'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# check if "Hi, welcome to Edx." text in the captions
self.assertIn('Hi, welcome to Edx.', self.video.captions_text())
# check if we can download transcript in "srt" format that has text "Hi, welcome to Edx."
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', 'Hi, welcome to Edx.'))
# select language with code "zh"
self.assertTrue(self.video.select_language('zh'))
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text())
#Then I can download transcript in "srt" format that has text "好 各位同学"
unicode_text = "好 各位同学".decode('utf-8')
self.assertTrue(self.video.downloaded_transcript_contains_text('srt', unicode_text))
def test_full_screen_video_alignment_with_transcript_visible(self):
"""
Scenario: Video is aligned correctly with transcript enabled in fullscreen mode
Given the course has a Video component in "HTML5" mode
And I have uploaded a .srt.sjson file to assets
And I have defined subtitles for the video
When I show the captions
And I view the video at fullscreen
Then the video with the transcript enabled is aligned correctly
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data = {'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# click video button "fullscreen"
self.video.click_player_button('fullscreen')
# check if video aligned correctly with enabled transcript
self.assertTrue(self.video.is_aligned(True))
def test_cc_button_with_english_transcript(self):
"""
Scenario: CC button works correctly with only english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined english subtitles for the video
And I have uploaded an english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('subs_OEoXaMPEzfM.srt.sjson')
data = {'sub': 'OEoXaMPEzfM'}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "Hi, welcome to Edx." text in the captions
self.assertIn("Hi, welcome to Edx.", self.video.captions_text())
def test_cc_button_wo_english_transcript(self):
"""
Scenario: CC button works correctly w/o english transcript in HTML5 mode
Given the course has a Video component in "HTML5" mode
And I have defined a non-english transcript for the video
And I have uploaded a non-english transcript file to assets
Then I see the correct text in the captions
"""
self.assets.append('chinese_transcripts.srt')
data = {'transcripts': {'zh': 'chinese_transcripts.srt'}}
self.metadata = self.metadata_for_mode('html5', additional_data=data)
# go to video
self.navigate_to_video()
# make sure captions are opened
self.video.show_captions()
# check if we see "好 各位同学" text in the captions
unicode_text = "好 各位同学".decode('utf-8')
self.assertIn(unicode_text, self.video.captions_text())
def test_video_rendering(self):
"""
Scenario: Video component is fully rendered in the LMS in HTML5 mode
Given the course has a Video component in "HTML5" mode
Then the video has rendered in "HTML5" mode
And video sources are correct
"""
self.metadata = self.metadata_for_mode('html5')
self.navigate_to_video()
self.assertTrue(self.video.is_video_rendered('html5'))
self.assertTrue(all([source in HTML5_SOURCES for source in self.video.sources()]))
class YouTubeQualityTest(VideoBaseTest):
""" Test YouTube Video Quality Button """
def setUp(self):
super(YouTubeQualityTest, self).setUp()
@skip_if_browser('firefox')
def test_quality_button_visibility(self):
"""
Scenario: Quality button appears on play.
Given the course has a Video component in "Youtube" mode
Then I see video button "quality" is hidden
And I click video button "play"
Then I see video button "quality" is visible
"""
self.navigate_to_video()
self.assertFalse(self.video.is_quality_button_visible())
self.video.click_player_button('play')
self.assertTrue(self.video.is_quality_button_visible())
@skip_if_browser('firefox')
def test_quality_button_works_correctly(self):
"""
Scenario: Quality button works correctly.
Given the course has a Video component in "Youtube" mode
And I click video button "play"
And I see video button "quality" is inactive
And I click video button "quality"
Then I see video button "quality" is active
"""
self.navigate_to_video()
self.video.click_player_button('play')
self.assertFalse(self.video.is_quality_button_active())
self.video.click_player_button('quality')
self.assertTrue(self.video.is_quality_button_active())
|
"""
Tools for creating discussion content fixture data.
"""
from datetime import datetime
import json
import factory
import requests
from . import COMMENTS_STUB_URL
class ContentFactory(factory.Factory):
FACTORY_FOR = dict
id = None
user_id = "dummy-user-id"
username = "dummy-username"
course_id = "dummy-course-id"
commentable_id = "dummy-commentable-id"
anonymous = False
anonymous_to_peers = False
at_position_list = []
abuse_flaggers = []
created_at = datetime.utcnow().isoformat()
updated_at = datetime.utcnow().isoformat()
endorsed = False
closed = False
votes = {"up_count": 0}
@classmethod
def _adjust_kwargs(cls, **kwargs):
# The discussion code assumes that user_id is a string. This ensures that it always will be.
if 'user_id' in kwargs:
kwargs['user_id'] = str(kwargs['user_id'])
return kwargs
class Thread(ContentFactory):
thread_type = "discussion"
anonymous = False
anonymous_to_peers = False
comments_count = 0
unread_comments_count = 0
title = "dummy thread title"
body = "dummy thread body"
type = "thread"
group_id = None
pinned = False
read = False
class Comment(ContentFactory):
thread_id = "dummy thread"
depth = 0
type = "comment"
body = "dummy comment body"
class Response(Comment):
depth = 1
body = "dummy response body"
class SearchResult(factory.Factory):
FACTORY_FOR = dict
discussion_data = []
annotated_content_info = {}
num_pages = 1
page = 1
corrected_text = None
class DiscussionContentFixture(object):
def push(self):
"""
Push the data to the stub comments service.
"""
requests.put(
'{}/set_config'.format(COMMENTS_STUB_URL),
data=self.get_config_data()
)
def get_config_data(self):
"""
return a dictionary with the fixture's data serialized for PUTting to the stub server's config endpoint.
"""
raise NotImplementedError()
class SingleThreadViewFixture(DiscussionContentFixture):
def __init__(self, thread):
self.thread = thread
def addResponse(self, response, comments=[]):
response['children'] = comments
if self.thread["thread_type"] == "discussion":
responseListAttr = "children"
elif response["endorsed"]:
responseListAttr = "endorsed_responses"
else:
responseListAttr = "non_endorsed_responses"
self.thread.setdefault(responseListAttr, []).append(response)
self.thread['comments_count'] += len(comments) + 1
def _get_comment_map(self):
"""
Generate a dict mapping each response/comment in the thread
by its `id`.
"""
def _visit(obj):
res = []
for child in obj.get('children', []):
res.append((child['id'], child))
if 'children' in child:
res += _visit(child)
return res
return dict(_visit(self.thread))
def get_config_data(self):
return {
"threads": json.dumps({self.thread['id']: self.thread}),
"comments": json.dumps(self._get_comment_map())
}
class MultipleThreadFixture(DiscussionContentFixture):
def __init__(self, threads):
self.threads = threads
def get_config_data(self):
threads_list = {thread['id']: thread for thread in self.threads}
return {"threads": json.dumps(threads_list), "comments": '{}'}
class UserProfileViewFixture(DiscussionContentFixture):
def __init__(self, threads):
self.threads = threads
def get_config_data(self):
return {"active_threads": json.dumps(self.threads)}
class SearchResultFixture(DiscussionContentFixture):
def __init__(self, result):
self.result = result
def get_config_data(self):
return {"search_result": json.dumps(self.result)}
|
from spack import *
class Cppzmq(CMakePackage):
"""C++ binding for 0MQ"""
homepage = "https://www.zeromq.org"
url = "https://github.com/zeromq/cppzmq/archive/v4.2.2.tar.gz"
git = "https://github.com/zeromq/cppzmq.git"
version('master', branch='master')
version('4.7.1', sha256='9853e0437d834cbed5d3c223bf1d755cadee70e7c964c6e42c4c6783dee5d02c')
version('4.6.0', sha256='e9203391a0b913576153a2ad22a2dc1479b1ec325beb6c46a3237c669aef5a52')
version('4.5.0', sha256='64eb4e58eaf0c77505391c6c9a606cffcb57c6086f3431567a1ef4a25b01fa36')
version('4.4.1', sha256='117fc1ca24d98dbe1a60c072cde13be863d429134907797f8e03f654ce679385')
version('4.4.0', sha256='118b9ff117f07d1aabadfb905d7227362049d7940d16b7863b3dd3cebd28be85')
version('4.3.0', sha256='27d1f56406ba94ee779e639203218820975cf68174f92fbeae0f645df0fcada4')
version('4.2.3', sha256='3e6b57bf49115f4ae893b1ff7848ead7267013087dc7be1ab27636a97144d373')
version('4.2.2', sha256='3ef50070ac5877c06c6bb25091028465020e181bbfd08f110294ed6bc419737d')
variant("drafts", default=False,
description="Build and install draft classes and methods")
depends_on('cmake@3.0.0:', type='build')
depends_on('libzmq')
depends_on('libzmq@4.2.2', when='@4.2.2:4.2.3')
depends_on('libzmq+drafts', when='+drafts')
def cmake_args(self):
args = []
args.append(self.define_from_variant("ENABLE_DRAFTS", "drafts"))
# https://github.com/zeromq/cppzmq/issues/422
# https://github.com/zeromq/cppzmq/pull/288
args.append('-DCPPZMQ_BUILD_TESTS=OFF')
return args
|
"""Get useful information from live Python objects.
This module encapsulates the interface provided by the internal special
attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
It also provides some help for examining source code and class layout.
Here are some of the useful functions provided by this module:
ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
isroutine() - check object types
getmembers() - get members of an object that satisfy a given condition
getfile(), getsourcefile(), getsource() - find an object's source code
getdoc(), getcomments() - get documentation on an object
getmodule() - determine the module that an object came from
getclasstree() - arrange classes so as to represent their hierarchy
getargspec(), getargvalues(), getcallargs() - get info about function arguments
getfullargspec() - same, with support for Python-3000 features
formatargspec(), formatargvalues() - format an argument spec
getouterframes(), getinnerframes() - get info about frames
currentframe() - get the current stack frame
stack(), trace() - get info about frames on the stack or in a traceback
signature() - get a Signature object for the callable
"""
__author__ = ('Ka-Ping Yee <ping@lfw.org>',
'Yury Selivanov <yselivanov@sprymix.com>')
import imp
import importlib.machinery
import itertools
import linecache
import os
import re
import sys
import tokenize
import types
import warnings
import functools
import builtins
from operator import attrgetter
from collections import namedtuple, OrderedDict
try:
from dis import COMPILER_FLAG_NAMES as _flag_names
except ImportError:
CO_OPTIMIZED, CO_NEWLOCALS = 0x1, 0x2
CO_VARARGS, CO_VARKEYWORDS = 0x4, 0x8
CO_NESTED, CO_GENERATOR, CO_NOFREE = 0x10, 0x20, 0x40
else:
mod_dict = globals()
for k, v in _flag_names.items():
mod_dict["CO_" + v] = k
TPFLAGS_IS_ABSTRACT = 1 << 20
def ismodule(object):
"""Return true if the object is a module.
Module objects provide these attributes:
__cached__ pathname to byte compiled file
__doc__ documentation string
__file__ filename (missing for built-in modules)"""
return isinstance(object, types.ModuleType)
def isclass(object):
"""Return true if the object is a class.
Class objects provide these attributes:
__doc__ documentation string
__module__ name of module in which this class was defined"""
return isinstance(object, type)
def ismethod(object):
"""Return true if the object is an instance method.
Instance method objects provide these attributes:
__doc__ documentation string
__name__ name with which this method was defined
__func__ function object containing implementation of method
__self__ instance to which this method is bound"""
return isinstance(object, types.MethodType)
def ismethoddescriptor(object):
"""Return true if the object is a method descriptor.
But not if ismethod() or isclass() or isfunction() are true.
This is new in Python 2.2, and, for example, is true of int.__add__.
An object passing this test has a __get__ attribute but not a __set__
attribute, but beyond that the set of attributes varies. __name__ is
usually sensible, and __doc__ often is.
Methods implemented via descriptors that also pass one of the other
tests return false from the ismethoddescriptor() test, simply because
the other tests promise more -- you can, e.g., count on having the
__func__ attribute (etc) when an object passes ismethod()."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
def isdatadescriptor(object):
"""Return true if the object is a data descriptor.
Data descriptors have both a __get__ and a __set__ attribute. Examples are
properties (defined in Python) and getsets and members (defined in C).
Typically, data descriptors will also have __name__ and __doc__ attributes
(properties, getsets, and members have both of these attributes), but this
is not guaranteed."""
if isclass(object) or ismethod(object) or isfunction(object):
# mutual exclusion
return False
tp = type(object)
return hasattr(tp, "__set__") and hasattr(tp, "__get__")
if hasattr(types, 'MemberDescriptorType'):
# CPython and equivalent
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.MemberDescriptorType)
else:
# Other implementations
def ismemberdescriptor(object):
"""Return true if the object is a member descriptor.
Member descriptors are specialized descriptors defined in extension
modules."""
return False
if hasattr(types, 'GetSetDescriptorType'):
# CPython and equivalent
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return isinstance(object, types.GetSetDescriptorType)
else:
# Other implementations
def isgetsetdescriptor(object):
"""Return true if the object is a getset descriptor.
getset descriptors are specialized descriptors defined in extension
modules."""
return False
def isfunction(object):
"""Return true if the object is a user-defined function.
Function objects provide these attributes:
__doc__ documentation string
__name__ name with which this function was defined
__code__ code object containing compiled function bytecode
__defaults__ tuple of any default values for arguments
__globals__ global namespace in which this function was defined
__annotations__ dict of parameter annotations
__kwdefaults__ dict of keyword only parameters with defaults"""
return isinstance(object, types.FunctionType)
def isgeneratorfunction(object):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See help(isfunction) for attributes listing."""
return bool((isfunction(object) or ismethod(object)) and
object.__code__.co_flags & CO_GENERATOR)
def isgenerator(object):
"""Return true if the object is a generator.
Generator objects provide these attributes:
__iter__ defined to support interation over container
close raises a new GeneratorExit exception inside the
generator to terminate the iteration
gi_code code object
gi_frame frame object or possibly None once the generator has
been exhausted
gi_running set to 1 when generator is executing, 0 otherwise
next return the next item from the container
send resumes the generator and "sends" a value that becomes
the result of the current yield-expression
throw used to raise an exception inside the generator"""
return isinstance(object, types.GeneratorType)
def istraceback(object):
"""Return true if the object is a traceback.
Traceback objects provide these attributes:
tb_frame frame object at this level
tb_lasti index of last attempted instruction in bytecode
tb_lineno current line number in Python source code
tb_next next inner traceback object (called by this level)"""
return isinstance(object, types.TracebackType)
def isframe(object):
"""Return true if the object is a frame object.
Frame objects provide these attributes:
f_back next outer frame object (this frame's caller)
f_builtins built-in namespace seen by this frame
f_code code object being executed in this frame
f_globals global namespace seen by this frame
f_lasti index of last attempted instruction in bytecode
f_lineno current line number in Python source code
f_locals local namespace seen by this frame
f_trace tracing function for this frame, or None"""
return isinstance(object, types.FrameType)
def iscode(object):
"""Return true if the object is a code object.
Code objects provide these attributes:
co_argcount number of arguments (not including * or ** args)
co_code string of raw compiled bytecode
co_consts tuple of constants used in the bytecode
co_filename name of file in which this code object was created
co_firstlineno number of first line in Python source code
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
co_lnotab encoded mapping of line numbers to bytecode indices
co_name name with which this code object was defined
co_names tuple of names of local variables
co_nlocals number of local variables
co_stacksize virtual machine stack space required
co_varnames tuple of names of arguments and local variables"""
return isinstance(object, types.CodeType)
def isbuiltin(object):
"""Return true if the object is a built-in function or method.
Built-in functions and methods provide these attributes:
__doc__ documentation string
__name__ original name of this function or method
__self__ instance to which a method is bound, or None"""
return isinstance(object, types.BuiltinFunctionType)
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object))
def isabstract(object):
"""Return true if the object is an abstract base class (ABC)."""
return bool(isinstance(object, type) and object.__flags__ & TPFLAGS_IS_ABSTRACT)
def getmembers(object, predicate=None):
"""Return all members of an object as (name, value) pairs sorted by name.
Optionally, only return members that satisfy a given predicate."""
if isclass(object):
mro = (object,) + getmro(object)
else:
mro = ()
results = []
for key in dir(object):
# First try to get the value via __dict__. Some descriptors don't
# like calling their __get__ (see bug #1785).
for base in mro:
if key in base.__dict__:
value = base.__dict__[key]
break
else:
try:
value = getattr(object, key)
except AttributeError:
continue
if not predicate or predicate(value):
results.append((key, value))
results.sort()
return results
Attribute = namedtuple('Attribute', 'name kind defining_class object')
def classify_class_attrs(cls):
"""Return list of attribute-descriptor tuples.
For each name in dir(cls), the return list contains a 4-tuple
with these elements:
0. The name (a string).
1. The kind of attribute this is, one of these strings:
'class method' created via classmethod()
'static method' created via staticmethod()
'property' created via property()
'method' any other flavor of method
'data' not a method
2. The class which defined this attribute (a class).
3. The object as obtained directly from the defining class's
__dict__, not via getattr. This is especially important for
data attributes: C.data is just a data object, but
C.__dict__['data'] may be a data descriptor with additional
info, like a __doc__ string.
"""
mro = getmro(cls)
names = dir(cls)
result = []
for name in names:
# Get the object associated with the name, and where it was defined.
# Getting an obj from the __dict__ sometimes reveals more than
# using getattr. Static and class methods are dramatic examples.
# Furthermore, some objects may raise an Exception when fetched with
# getattr(). This is the case with some descriptors (bug #1785).
# Thus, we only use getattr() as a last resort.
homecls = None
for base in (cls,) + mro:
if name in base.__dict__:
obj = base.__dict__[name]
homecls = base
break
else:
obj = getattr(cls, name)
homecls = getattr(obj, "__objclass__", homecls)
# Classify the object.
if isinstance(obj, staticmethod):
kind = "static method"
elif isinstance(obj, classmethod):
kind = "class method"
elif isinstance(obj, property):
kind = "property"
elif ismethoddescriptor(obj):
kind = "method"
elif isdatadescriptor(obj):
kind = "data"
else:
obj_via_getattr = getattr(cls, name)
if (isfunction(obj_via_getattr) or
ismethoddescriptor(obj_via_getattr)):
kind = "method"
else:
kind = "data"
obj = obj_via_getattr
result.append(Attribute(name, kind, homecls, obj))
return result
def getmro(cls):
"Return tuple of base classes (including cls) in method resolution order."
return cls.__mro__
def indentsize(line):
"""Return the indent size, in spaces, at the start of a line of text."""
expline = line.expandtabs()
return len(expline) - len(expline.lstrip())
def getdoc(object):
"""Get the documentation string for an object.
All tabs are expanded to spaces. To clean up docstrings that are
indented to line up with blocks of code, any whitespace than can be
uniformly removed from the second line onwards is removed."""
try:
doc = object.__doc__
except AttributeError:
return None
if not isinstance(doc, str):
return None
return cleandoc(doc)
def cleandoc(doc):
"""Clean up indentation from docstrings.
Any whitespace that can be uniformly removed from the second line
onwards is removed."""
try:
lines = doc.expandtabs().split('\n')
except UnicodeError:
return None
else:
# Find minimum indentation of any non-blank lines after first line.
margin = sys.maxsize
for line in lines[1:]:
content = len(line.lstrip())
if content:
indent = len(line) - content
margin = min(margin, indent)
# Remove indentation.
if lines:
lines[0] = lines[0].lstrip()
if margin < sys.maxsize:
for i in range(1, len(lines)): lines[i] = lines[i][margin:]
# Remove any trailing or leading blank lines.
while lines and not lines[-1]:
lines.pop()
while lines and not lines[0]:
lines.pop(0)
return '\n'.join(lines)
def getfile(object):
"""Work out which source or compiled file an object was defined in."""
if ismodule(object):
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in module'.format(object))
if isclass(object):
object = sys.modules.get(object.__module__)
if hasattr(object, '__file__'):
return object.__file__
raise TypeError('{!r} is a built-in class'.format(object))
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
return object.co_filename
raise TypeError('{!r} is not a module, class, method, '
'function, traceback, frame, or code object'.format(object))
ModuleInfo = namedtuple('ModuleInfo', 'name suffix mode module_type')
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
warnings.warn('inspect.getmoduleinfo() is deprecated', DeprecationWarning,
2)
filename = os.path.basename(path)
suffixes = [(-len(suffix), suffix, mode, mtype)
for suffix, mode, mtype in imp.get_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return ModuleInfo(filename[:neglen], suffix, mode, mtype)
def getmodulename(path):
"""Return the module name for a given file, or None."""
fname = os.path.basename(path)
# Check for paths that look like an actual module file
suffixes = [(-len(suffix), suffix)
for suffix in importlib.machinery.all_suffixes()]
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix in suffixes:
if fname.endswith(suffix):
return fname[:neglen]
return None
def getsourcefile(object):
"""Return the filename that can be used to locate an object's source.
Return None if no way can be identified to get the source.
"""
filename = getfile(object)
all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
if any(filename.endswith(s) for s in all_bytecode_suffixes):
filename = (os.path.splitext(filename)[0] +
importlib.machinery.SOURCE_SUFFIXES[0])
elif any(filename.endswith(s) for s in
importlib.machinery.EXTENSION_SUFFIXES):
return None
if os.path.exists(filename):
return filename
# only return a non-existent filename if the module has a PEP 302 loader
if hasattr(getmodule(object, filename), '__loader__'):
return filename
# or it is in the linecache
if filename in linecache.cache:
return filename
def getabsfile(object, _filename=None):
"""Return an absolute path to the source or compiled file for an object.
The idea is for each object to have a unique origin, so this routine
normalizes the result as much as possible."""
if _filename is None:
_filename = getsourcefile(object) or getfile(object)
return os.path.normcase(os.path.abspath(_filename))
modulesbyfile = {}
_filesbymodname = {}
def getmodule(object, _filename=None):
"""Return the module an object was defined in, or None if not found."""
if ismodule(object):
return object
if hasattr(object, '__module__'):
return sys.modules.get(object.__module__)
# Try the filename to modulename cache
if _filename is not None and _filename in modulesbyfile:
return sys.modules.get(modulesbyfile[_filename])
# Try the cache again with the absolute file name
try:
file = getabsfile(object, _filename)
except TypeError:
return None
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Update the filename to module name cache and check yet again
# Copy sys.modules in order to cope with changes while iterating
for modname, module in list(sys.modules.items()):
if ismodule(module) and hasattr(module, '__file__'):
f = module.__file__
if f == _filesbymodname.get(modname, None):
# Have already mapped this module, so skip it
continue
_filesbymodname[modname] = f
f = getabsfile(module)
# Always map to the name the module knows itself by
modulesbyfile[f] = modulesbyfile[
os.path.realpath(f)] = module.__name__
if file in modulesbyfile:
return sys.modules.get(modulesbyfile[file])
# Check the main module
main = sys.modules['__main__']
if not hasattr(object, '__name__'):
return None
if hasattr(main, object.__name__):
mainobject = getattr(main, object.__name__)
if mainobject is object:
return main
# Check builtins
builtin = sys.modules['builtins']
if hasattr(builtin, object.__name__):
builtinobject = getattr(builtin, object.__name__)
if builtinobject is object:
return builtin
def findsource(object):
"""Return the entire source file and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of all the lines
in the file and the line number indexes a line in that list. An IOError
is raised if the source code cannot be retrieved."""
file = getfile(object)
sourcefile = getsourcefile(object)
if not sourcefile and file[0] + file[-1] != '<>':
raise IOError('source code not available')
file = sourcefile if sourcefile else file
module = getmodule(object, file)
if module:
lines = linecache.getlines(file, module.__dict__)
else:
lines = linecache.getlines(file)
if not lines:
raise IOError('could not get source code')
if ismodule(object):
return lines, 0
if isclass(object):
name = object.__name__
pat = re.compile(r'^(\s*)class\s*' + name + r'\b')
# make some effort to find the best matching class definition:
# use the one with the least indentation, which is the one
# that's most probably not inside a function definition.
candidates = []
for i in range(len(lines)):
match = pat.match(lines[i])
if match:
# if it's at toplevel, it's already the best one
if lines[i][0] == 'c':
return lines, i
# else add whitespace to candidate list
candidates.append((match.group(1), i))
if candidates:
# this will sort by whitespace, and by line number,
# less whitespace first
candidates.sort()
return lines, candidates[0][1]
else:
raise IOError('could not find class definition')
if ismethod(object):
object = object.__func__
if isfunction(object):
object = object.__code__
if istraceback(object):
object = object.tb_frame
if isframe(object):
object = object.f_code
if iscode(object):
if not hasattr(object, 'co_firstlineno'):
raise IOError('could not find function definition')
lnum = object.co_firstlineno - 1
pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
while lnum > 0:
if pat.match(lines[lnum]): break
lnum = lnum - 1
return lines, lnum
raise IOError('could not find code object')
def getcomments(object):
"""Get lines of comments immediately preceding an object's source code.
Returns None when source can't be found.
"""
try:
lines, lnum = findsource(object)
except (IOError, TypeError):
return None
if ismodule(object):
# Look for a comment block at the top of the file.
start = 0
if lines and lines[0][:2] == '#!': start = 1
while start < len(lines) and lines[start].strip() in ('', '#'):
start = start + 1
if start < len(lines) and lines[start][:1] == '#':
comments = []
end = start
while end < len(lines) and lines[end][:1] == '#':
comments.append(lines[end].expandtabs())
end = end + 1
return ''.join(comments)
# Look for a preceding block of comments at the same indentation.
elif lnum > 0:
indent = indentsize(lines[lnum])
end = lnum - 1
if end >= 0 and lines[end].lstrip()[:1] == '#' and \
indentsize(lines[end]) == indent:
comments = [lines[end].expandtabs().lstrip()]
if end > 0:
end = end - 1
comment = lines[end].expandtabs().lstrip()
while comment[:1] == '#' and indentsize(lines[end]) == indent:
comments[:0] = [comment]
end = end - 1
if end < 0: break
comment = lines[end].expandtabs().lstrip()
while comments and comments[0].strip() == '#':
comments[:1] = []
while comments and comments[-1].strip() == '#':
comments[-1:] = []
return ''.join(comments)
class EndOfBlock(Exception): pass
class BlockFinder:
"""Provide a tokeneater() method to detect the end of a code block."""
def __init__(self):
self.indent = 0
self.islambda = False
self.started = False
self.passline = False
self.last = 1
def tokeneater(self, type, token, srowcol, erowcol, line):
if not self.started:
# look for the first "def", "class" or "lambda"
if token in ("def", "class", "lambda"):
if token == "lambda":
self.islambda = True
self.started = True
self.passline = True # skip to the end of the line
elif type == tokenize.NEWLINE:
self.passline = False # stop skipping when a NEWLINE is seen
self.last = srowcol[0]
if self.islambda: # lambdas always end at the first NEWLINE
raise EndOfBlock
elif self.passline:
pass
elif type == tokenize.INDENT:
self.indent = self.indent + 1
self.passline = True
elif type == tokenize.DEDENT:
self.indent = self.indent - 1
# the end of matching indent/dedent pairs end a block
# (note that this only works for "def"/"class" blocks,
# not e.g. for "if: else:" or "try: finally:" blocks)
if self.indent <= 0:
raise EndOfBlock
elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
# any other token on the same indentation level end the previous
# block as well, except the pseudo-tokens COMMENT and NL.
raise EndOfBlock
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
def getsourcelines(object):
"""Return a list of source lines and starting line number for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a list of the lines
corresponding to the object and the line number indicates where in the
original source file the first line of code was found. An IOError is
raised if the source code cannot be retrieved."""
lines, lnum = findsource(object)
if ismodule(object): return lines, 0
else: return getblock(lines[lnum:]), lnum + 1
def getsource(object):
"""Return the text of the source code for an object.
The argument may be a module, class, method, function, traceback, frame,
or code object. The source code is returned as a single string. An
IOError is raised if the source code cannot be retrieved."""
lines, lnum = getsourcelines(object)
return ''.join(lines)
def walktree(classes, children, parent):
"""Recursive helper function for getclasstree()."""
results = []
classes.sort(key=attrgetter('__module__', '__name__'))
for c in classes:
results.append((c, c.__bases__))
if c in children:
results.append(walktree(children[c], children, c))
return results
def getclasstree(classes, unique=False):
"""Arrange the given list of classes into a hierarchy of nested lists.
Where a nested list appears, it contains classes derived from the class
whose entry immediately precedes the list. Each entry is a 2-tuple
containing a class and a tuple of its base classes. If the 'unique'
argument is true, exactly one entry appears in the returned structure
for each class in the given list. Otherwise, classes using multiple
inheritance and their descendants will appear multiple times."""
children = {}
roots = []
for c in classes:
if c.__bases__:
for parent in c.__bases__:
if not parent in children:
children[parent] = []
children[parent].append(c)
if unique and parent in classes: break
elif c not in roots:
roots.append(c)
for parent in children:
if parent not in classes:
roots.append(parent)
return walktree(roots, children, None)
Arguments = namedtuple('Arguments', 'args, varargs, varkw')
def getargs(co):
"""Get information about the arguments accepted by a code object.
Three things are returned: (args, varargs, varkw), where
'args' is the list of argument names. Keyword-only arguments are
appended. 'varargs' and 'varkw' are the names of the * and **
arguments or None."""
args, varargs, kwonlyargs, varkw = _getfullargs(co)
return Arguments(args + kwonlyargs, varargs, varkw)
def _getfullargs(co):
"""Get information about the arguments accepted by a code object.
Four things are returned: (args, varargs, kwonlyargs, varkw), where
'args' and 'kwonlyargs' are lists of argument names, and 'varargs'
and 'varkw' are the names of the * and ** arguments or None."""
if not iscode(co):
raise TypeError('{!r} is not a code object'.format(co))
nargs = co.co_argcount
names = co.co_varnames
nkwargs = co.co_kwonlyargcount
args = list(names[:nargs])
kwonlyargs = list(names[nargs:nargs+nkwargs])
step = 0
nargs += nkwargs
varargs = None
if co.co_flags & CO_VARARGS:
varargs = co.co_varnames[nargs]
nargs = nargs + 1
varkw = None
if co.co_flags & CO_VARKEYWORDS:
varkw = co.co_varnames[nargs]
return args, varargs, kwonlyargs, varkw
ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
def getargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of four things is returned: (args, varargs, varkw, defaults).
'args' is a list of the argument names.
'args' will include keyword-only argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
Use the getfullargspec() API for Python-3000 code, as annotations
and keyword arguments are supported. getargspec() will raise ValueError
if the func has either annotations or keyword arguments.
"""
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
getfullargspec(func)
if kwonlyargs or ann:
raise ValueError("Function has keyword-only arguments or annotations"
", use getfullargspec() API which can support them")
return ArgSpec(args, varargs, varkw, defaults)
FullArgSpec = namedtuple('FullArgSpec',
'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
def getargvalues(frame):
"""Get information about arguments passed into a particular frame.
A tuple of four things is returned: (args, varargs, varkw, locals).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'locals' is the locals dictionary of the given frame."""
args, varargs, varkw = getargs(frame.f_code)
return ArgInfo(args, varargs, varkw, frame.f_locals)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', base_module):
return annotation.__name__
return annotation.__module__+'.'+annotation.__name__
return repr(annotation)
def formatannotationrelativeto(object):
module = getattr(object, '__module__', None)
def _formatannotation(annotation):
return formatannotation(annotation, module)
return _formatannotation
def formatargspec(args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={},
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
formatreturns=lambda text: ' -> ' + text,
formatannotation=formatannotation):
"""Format an argument spec from the values returned by getargspec
or getfullargspec.
The first seven arguments are (args, varargs, varkw, defaults,
kwonlyargs, kwonlydefaults, annotations). The other five arguments
are the corresponding optional formatting functions that are called to
turn names and values into strings. The last argument is an optional
function to format the sequence of arguments."""
def formatargandannotation(arg):
result = formatarg(arg)
if arg in annotations:
result += ': ' + formatannotation(annotations[arg])
return result
specs = []
if defaults:
firstdefault = len(args) - len(defaults)
for i, arg in enumerate(args):
spec = formatargandannotation(arg)
if defaults and i >= firstdefault:
spec = spec + formatvalue(defaults[i - firstdefault])
specs.append(spec)
if varargs is not None:
specs.append(formatvarargs(formatargandannotation(varargs)))
else:
if kwonlyargs:
specs.append('*')
if kwonlyargs:
for kwonlyarg in kwonlyargs:
spec = formatargandannotation(kwonlyarg)
if kwonlydefaults and kwonlyarg in kwonlydefaults:
spec += formatvalue(kwonlydefaults[kwonlyarg])
specs.append(spec)
if varkw is not None:
specs.append(formatvarkw(formatargandannotation(varkw)))
result = '(' + ', '.join(specs) + ')'
if 'return' in annotations:
result += formatreturns(formatannotation(annotations['return']))
return result
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value)):
"""Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments."""
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(convert(args[i]))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + ', '.join(specs) + ')'
def _missing_arguments(f_name, argnames, pos, values):
names = [repr(name) for name in argnames if name not in values]
missing = len(names)
if missing == 1:
s = names[0]
elif missing == 2:
s = "{} and {}".format(*names)
else:
tail = ", {} and {}".format(names[-2:])
del names[-2:]
s = ", ".join(names) + tail
raise TypeError("%s() missing %i required %s argument%s: %s" %
(f_name, missing,
"positional" if pos else "keyword-only",
"" if missing == 1 else "s", s))
def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
atleast = len(args) - defcount
kwonly_given = len([arg for arg in kwonly if arg in values])
if varargs:
plural = atleast != 1
sig = "at least %d" % (atleast,)
elif defcount:
plural = True
sig = "from %d to %d" % (atleast, len(args))
else:
plural = len(args) != 1
sig = str(len(args))
kwonly_sig = ""
if kwonly_given:
msg = " positional argument%s (and %d keyword-only argument%s)"
kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
"s" if kwonly_given != 1 else ""))
raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
(f_name, sig, "s" if plural else "", given, kwonly_sig,
"was" if given == 1 and not kwonly_given else "were"))
def getcallargs(func, *positional, **named):
"""Get the mapping of arguments to values.
A dict is returned, with keys the function argument names (including the
names of the * and ** arguments, if any), and values the respective bound
values from 'positional' and 'named'."""
spec = getfullargspec(func)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
f_name = func.__name__
arg2value = {}
if ismethod(func) and func.__self__ is not None:
# implicit 'self' (or 'cls' for classmethods) argument
positional = (func.__self__,) + positional
num_pos = len(positional)
num_args = len(args)
num_defaults = len(defaults) if defaults else 0
n = min(num_pos, num_args)
for i in range(n):
arg2value[args[i]] = positional[i]
if varargs:
arg2value[varargs] = tuple(positional[n:])
possible_kwargs = set(args + kwonlyargs)
if varkw:
arg2value[varkw] = {}
for kw, value in named.items():
if kw not in possible_kwargs:
if not varkw:
raise TypeError("%s() got an unexpected keyword argument %r" %
(f_name, kw))
arg2value[varkw][kw] = value
continue
if kw in arg2value:
raise TypeError("%s() got multiple values for argument %r" %
(f_name, kw))
arg2value[kw] = value
if num_pos > num_args and not varargs:
_too_many(f_name, args, kwonlyargs, varargs, num_defaults,
num_pos, arg2value)
if num_pos < num_args:
req = args[:num_args - num_defaults]
for arg in req:
if arg not in arg2value:
_missing_arguments(f_name, req, True, arg2value)
for i, arg in enumerate(args[num_args - num_defaults:]):
if arg not in arg2value:
arg2value[arg] = defaults[i]
missing = 0
for kwarg in kwonlyargs:
if kwarg not in arg2value:
if kwarg in kwonlydefaults:
arg2value[kwarg] = kwonlydefaults[kwarg]
else:
missing += 1
if missing:
_missing_arguments(f_name, kwonlyargs, False, arg2value)
return arg2value
ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
def getclosurevars(func):
"""
Get the mapping of free variables to their current values.
Returns a named tuple of dicts mapping the current nonlocal, global
and builtin references as seen by the body of the function. A final
set of unbound names that could not be resolved is also provided.
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError("'{!r}' is not a Python function".format(func))
code = func.__code__
# Nonlocal references are named in co_freevars and resolved
# by looking them up in __closure__ by positional index
if func.__closure__ is None:
nonlocal_vars = {}
else:
nonlocal_vars = {
var : cell.cell_contents
for var, cell in zip(code.co_freevars, func.__closure__)
}
# Global and builtin references are named in co_names and resolved
# by looking them up in __globals__ or __builtins__
global_ns = func.__globals__
builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
if ismodule(builtin_ns):
builtin_ns = builtin_ns.__dict__
global_vars = {}
builtin_vars = {}
unbound_names = set()
for name in code.co_names:
if name in ("None", "True", "False"):
# Because these used to be builtins instead of keywords, they
# may still show up as name references. We ignore them.
continue
try:
global_vars[name] = global_ns[name]
except KeyError:
try:
builtin_vars[name] = builtin_ns[name]
except KeyError:
unbound_names.add(name)
return ClosureVars(nonlocal_vars, global_vars,
builtin_vars, unbound_names)
Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
def getframeinfo(frame, context=1):
"""Get information about a frame or traceback object.
A tuple of five things is returned: the filename, the line number of
the current line, the function name, a list of lines of context from
the source code, and the index of the current line within that list.
The optional second argument specifies the number of lines of context
to return, which are centered around the current line."""
if istraceback(frame):
lineno = frame.tb_lineno
frame = frame.tb_frame
else:
lineno = frame.f_lineno
if not isframe(frame):
raise TypeError('{!r} is not a frame or traceback object'.format(frame))
filename = getsourcefile(frame) or getfile(frame)
if context > 0:
start = lineno - 1 - context//2
try:
lines, lnum = findsource(frame)
except IOError:
lines = index = None
else:
start = max(start, 1)
start = max(0, min(start, len(lines) - context))
lines = lines[start:start+context]
index = lineno - 1 - start
else:
lines = index = None
return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
def getouterframes(frame, context=1):
"""Get a list of records for a frame and all higher (calling) frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while frame:
framelist.append((frame,) + getframeinfo(frame, context))
frame = frame.f_back
return framelist
def getinnerframes(tb, context=1):
"""Get a list of records for a traceback's frame and all lower frames.
Each record contains a frame object, filename, line number, function
name, a list of lines of context, and index within the context."""
framelist = []
while tb:
framelist.append((tb.tb_frame,) + getframeinfo(tb, context))
tb = tb.tb_next
return framelist
def currentframe():
"""Return the frame of the caller or None if this is not possible."""
return sys._getframe(1) if hasattr(sys, "_getframe") else None
def stack(context=1):
"""Return a list of records for the stack above the caller's frame."""
return getouterframes(sys._getframe(1), context)
def trace(context=1):
"""Return a list of records for the stack below the current exception."""
return getinnerframes(sys.exc_info()[2], context)
_sentinel = object()
def _static_getmro(klass):
return type.__dict__['__mro__'].__get__(klass)
def _check_instance(obj, attr):
instance_dict = {}
try:
instance_dict = object.__getattribute__(obj, "__dict__")
except AttributeError:
pass
return dict.get(instance_dict, attr, _sentinel)
def _check_class(klass, attr):
for entry in _static_getmro(klass):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
return _sentinel
def _is_type(obj):
try:
_static_getmro(obj)
except TypeError:
return False
return True
def _shadowed_dict(klass):
dict_attr = type.__dict__["__dict__"]
for entry in _static_getmro(klass):
try:
class_dict = dict_attr.__get__(entry)["__dict__"]
except KeyError:
pass
else:
if not (type(class_dict) is types.GetSetDescriptorType and
class_dict.__name__ == "__dict__" and
class_dict.__objclass__ is entry):
return class_dict
return _sentinel
def getattr_static(obj, attr, default=_sentinel):
"""Retrieve attributes without triggering dynamic lookup via the
descriptor protocol, __getattr__ or __getattribute__.
Note: this function may not be able to retrieve all attributes
that getattr can fetch (like dynamically created attributes)
and may find attributes that getattr can't (like descriptors
that raise AttributeError). It can also return descriptor objects
instead of instance members in some cases. See the
documentation for details.
"""
instance_result = _sentinel
if not _is_type(obj):
klass = type(obj)
dict_attr = _shadowed_dict(klass)
if (dict_attr is _sentinel or
type(dict_attr) is types.MemberDescriptorType):
instance_result = _check_instance(obj, attr)
else:
klass = obj
klass_result = _check_class(klass, attr)
if instance_result is not _sentinel and klass_result is not _sentinel:
if (_check_class(type(klass_result), '__get__') is not _sentinel and
_check_class(type(klass_result), '__set__') is not _sentinel):
return klass_result
if instance_result is not _sentinel:
return instance_result
if klass_result is not _sentinel:
return klass_result
if obj is klass:
# for types we check the metaclass too
for entry in _static_getmro(type(klass)):
if _shadowed_dict(type(entry)) is _sentinel:
try:
return entry.__dict__[attr]
except KeyError:
pass
if default is not _sentinel:
return default
raise AttributeError(attr)
GEN_CREATED = 'GEN_CREATED'
GEN_RUNNING = 'GEN_RUNNING'
GEN_SUSPENDED = 'GEN_SUSPENDED'
GEN_CLOSED = 'GEN_CLOSED'
def getgeneratorstate(generator):
"""Get current state of a generator-iterator.
Possible states are:
GEN_CREATED: Waiting to start execution.
GEN_RUNNING: Currently being executed by the interpreter.
GEN_SUSPENDED: Currently suspended at a yield expression.
GEN_CLOSED: Execution has completed.
"""
if generator.gi_running:
return GEN_RUNNING
if generator.gi_frame is None:
return GEN_CLOSED
if generator.gi_frame.f_lasti == -1:
return GEN_CREATED
return GEN_SUSPENDED
def getgeneratorlocals(generator):
"""
Get the mapping of generator local variables to their current values.
A dict is returned, with the keys the local variable names and values the
bound values."""
if not isgenerator(generator):
raise TypeError("'{!r}' is not a Python generator".format(generator))
frame = getattr(generator, "gi_frame", None)
if frame is not None:
return generator.gi_frame.f_locals
else:
return {}
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def _get_user_defined_method(cls, method_name):
try:
meth = getattr(cls, method_name)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
sig = signature(obj.__func__)
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {!r} has incorrect arguments'.format(obj)
raise ValueError(msg) from ex
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {!r} is not supported by signature'.format(obj))
class _void:
'''A private marker - used in Parameter & Signature'''
class _empty:
pass
class _ParameterKind(int):
def __new__(self, *args, name):
obj = int.__new__(self, *args)
obj._name = name
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter:
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, *, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not name.isidentifier():
msg = '{!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, *, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{}:{}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{}={}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{} at {:#x} {!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments:
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature:
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, *, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {} before {}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = types.MappingProxyType(params)
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = func_code.co_kwonlyargcount
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = func.__annotations__
defaults = func.__defaults__
kwdefaults = func.__kwdefaults__
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
return self._parameters
@property
def return_annotation(self):
return self._return_annotation
def replace(self, *, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = {param: idx
for idx, param in enumerate(other.parameters.keys())}
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, *, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg) from None
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments') from None
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name)) from None
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {}'.format(anno)
return rendered
|
from .base import BaseOp
from .array import ArrLoad, ArrStore, ArrLength
from .checkcast import CheckCast, InstanceOf
from .convert import Convert
from .fieldaccess import FieldAccess
from .fmath import FAdd, FDiv, FMul, FRem, FSub, FNeg, FCmp
from .invoke import Invoke, InvokeDynamic
from .imath import IAdd, IDiv, IMul, IRem, ISub, IAnd, IOr, IShl, IShr, IUshr, IXor, ICmp
from .monitor import Monitor
from .new import New, NewArray, MultiNewArray
from .throw import Throw, MagicThrow
from .truncate import Truncate
from .tryreturn import TryReturn
from .phi import Phi, ExceptionPhi
|
import iris.tests as tests
from .gallerytest_util import (
add_gallery_to_path,
fail_any_deprecation_warnings,
show_replaced_by_check_graphic,
)
class TestProjectionsAndAnnotations(tests.GraphicsTest):
"""Test the atlantic_profiles gallery code."""
def test_plot_projections_and_annotations(self):
with fail_any_deprecation_warnings():
with add_gallery_to_path():
import plot_projections_and_annotations
with show_replaced_by_check_graphic(self):
plot_projections_and_annotations.main()
if __name__ == "__main__":
tests.main()
|
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
from lxml import etree
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova import utils
from nova.virt.libvirt import config as vconfig
libvirt = None
LOG = logging.getLogger(__name__)
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest ready to be launched
"""
try:
# TODO(sahid): Host.write_instance_config should return
# an instance of Guest
domain = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a domain with XML: %s') %
encodeutils.safe_decode(xml))
return cls(domain)
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain '
'with XML: %s') %
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def inject_nmi(self):
"""Injects an NMI to a guest."""
self._domain.injectNMI()
def resume(self):
"""Resumes a suspended guest."""
self._domain.resume()
def enable_hairpin(self):
"""Enables hairpin mode for this guest."""
interfaces = self.get_interfaces()
try:
for interface in interfaces:
utils.execute(
'tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
self._encoded_xml, errors='ignore')
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: guest.VCPUInfo
"""
vcpus = self._domain.vcpus()
if vcpus is not None:
for vcpu in vcpus[0]:
yield VCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self):
"""Undefines a domain from hypervisor."""
try:
self._domain.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags. %d"
"Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
def has_persistent_configuration(self):
"""Whether domain config is persistently stored on the host."""
return self._domain.isPersistent()
def attach_device(self, conf, persistent=False, live=False):
"""Attaches device to the guest.
:param conf: A LibvirtConfigObject of the device to attach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.attachDeviceFlags(conf.to_xml(), flags=flags)
def get_disk(self, device):
"""Returns the disk mounted at device
:returns LivirtConfigGuestDisk: mounted at device or None
"""
try:
doc = etree.fromstring(self._domain.XMLDesc(0))
except Exception:
return None
node = doc.find("./devices/disk/target[@dev='%s'].." % device)
if node is not None:
conf = vconfig.LibvirtConfigGuestDisk()
conf.parse_dom(node)
return conf
def detach_device(self, conf, persistent=False, live=False):
"""Detaches device to the guest.
:param conf: A LibvirtConfigObject of the device to detach
:param persistent: A bool to indicate whether the change is
persistent or not
:param live: A bool to indicate whether it affect the guest
in running state
"""
flags = persistent and libvirt.VIR_DOMAIN_AFFECT_CONFIG or 0
flags |= live and libvirt.VIR_DOMAIN_AFFECT_LIVE or 0
self._domain.detachDeviceFlags(conf.to_xml(), flags=flags)
def get_xml_desc(self, dump_inactive=False, dump_sensitive=False,
dump_migratable=False):
"""Returns xml description of guest.
:param dump_inactive: Dump inactive domain information
:param dump_sensitive: Dump security sensitive information
:param dump_migratable: Dump XML suitable for migration
:returns string: XML description of the guest
"""
flags = dump_inactive and libvirt.VIR_DOMAIN_XML_INACTIVE or 0
flags |= dump_sensitive and libvirt.VIR_DOMAIN_XML_SECURE or 0
flags |= dump_migratable and libvirt.VIR_DOMAIN_XML_MIGRATABLE or 0
return self._domain.XMLDesc(flags=flags)
def save_memory_state(self):
"""Saves the domain's memory state. Requires running domain.
raises: raises libvirtError on error
"""
self._domain.managedSave(0)
def get_block_device(self, disk):
"""Returns a block device wrapper for disk."""
return BlockDevice(self, disk)
class BlockDevice(object):
"""Wrapper around block device API"""
REBASE_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
COMMIT_DEFAULT_BANDWIDTH = 0 # in MiB/s - 0 unlimited
def __init__(self, guest, disk):
self._guest = guest
self._disk = disk
def abort_job(self, async=False, pivot=False):
"""Request to cancel any job currently running on the block.
:param async: Request only, do not wait for completion
:param pivot: Pivot to new file when ending a copy or
active commit job
"""
flags = async and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_ASYNC or 0
flags |= pivot and libvirt.VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT or 0
self._guest._domain.blockJobAbort(self._disk, flags=flags)
def get_job_info(self):
"""Returns information about job currently running
:returns: BlockDeviceJobInfo or None
"""
status = self._guest._domain.blockJobInfo(self._disk, flags=0)
if status != -1:
return BlockDeviceJobInfo(
job=status.get("type", 0),
bandwidth=status.get("bandwidth", 0),
cur=status.get("cur", 0),
end=status.get("end", 0))
def rebase(self, base, shallow=False, reuse_ext=False,
copy=False, relative=False):
"""Rebases block to new base
:param shallow: Limit copy to top of source backing chain
:param reuse_ext: Reuse existing external file of a copy
:param copy: Start a copy job
:param relative: Keep backing chain referenced using relative names
"""
flags = shallow and libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW or 0
flags |= reuse_ext and libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT or 0
flags |= copy and libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY or 0
flags |= relative and libvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE or 0
return self._guest._domain.blockRebase(
self._disk, base, self.REBASE_DEFAULT_BANDWIDTH, flags=flags)
def commit(self, base, top, relative=False):
"""Commit on block device
For performance during live snapshot it will reduces the disk chain
to a single disk.
:param relative: Keep backing chain referenced using relative names
"""
flags = relative and libvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE or 0
return self._guest._domain.blockCommit(
self._disk, base, top, self.COMMIT_DEFAULT_BANDWIDTH, flags=flags)
def resize(self, size_kb):
"""Resizes block device to Kib size."""
self._guest._domain.blockResize(self._disk, size_kb)
def wait_for_job(self, abort_on_error=False, wait_for_job_clean=False):
"""Wait for libvirt block job to complete.
Libvirt may return either cur==end or an empty dict when
the job is complete, depending on whether the job has been
cleaned up by libvirt yet, or not.
:param abort_on_error: Whether to stop process and raise NovaException
on error (default: False)
:param wait_for_job_clean: Whether to force wait to ensure job is
finished (see bug: LP#1119173)
:returns: True if still in progress
False if completed
"""
status = self.get_job_info()
if not status and abort_on_error:
msg = _('libvirt error while requesting blockjob info.')
raise exception.NovaException(msg)
if wait_for_job_clean:
job_ended = status.job == 0
else:
job_ended = status.cur == status.end
return not job_ended
class VCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
class BlockDeviceJobInfo(object):
def __init__(self, job, bandwidth, cur, end):
"""Structure for information about running job.
:param job: The running job (0 placeholder, 1 pull,
2 copy, 3 commit, 4 active commit)
:param bandwidth: Used in MiB/s
:param cur: Indicates the position between 0 and 'end'
:param end: Indicates the position for this operation
"""
self.job = job
self.bandwidth = bandwidth
self.cur = cur
self.end = end
|
"""Views for Zinnia comments"""
from django.contrib import comments
from django.template.defaultfilters import slugify
from django.http import HttpResponsePermanentRedirect
from django.core.exceptions import ObjectDoesNotExist
from django.views.generic.base import View
from django.views.generic.base import TemplateResponseMixin
class CommentSuccess(TemplateResponseMixin, View):
"""
View for handing the publication of a Comment on an Entry.
Do a redirection if the comment is visible,
else render a confirmation template.
"""
template_name = 'comments/zinnia/entry/posted.html'
def get_context_data(self, **kwargs):
return {'comment': self.comment}
def get(self, request, *args, **kwargs):
self.comment = None
if 'c' in request.GET:
try:
self.comment = comments.get_model().objects.get(
pk=request.GET['c'])
except (ObjectDoesNotExist, ValueError):
pass
if self.comment and self.comment.is_public:
return HttpResponsePermanentRedirect(
self.comment.get_absolute_url(
'#comment-%(id)s-by-') + slugify(self.comment.user_name))
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
|
import os
import fixtures
from tempest.cmd import init
from tempest.tests import base
class TestTempestInit(base.TestCase):
def test_generate_testr_conf(self):
# Create fake conf dir
conf_dir = self.useFixture(fixtures.TempDir())
init_cmd = init.TempestInit(None, None)
init_cmd.generate_testr_conf(conf_dir.path)
# Generate expected file contents
top_level_path = os.path.dirname(os.path.dirname(init.__file__))
discover_path = os.path.join(top_level_path, 'test_discover')
testr_conf_file = init.TESTR_CONF % (top_level_path, discover_path)
conf_path = conf_dir.join('.testr.conf')
with open(conf_path, 'r') as conf_file:
self.assertEqual(conf_file.read(), testr_conf_file)
def test_generate_sample_config(self):
local_dir = self.useFixture(fixtures.TempDir())
etc_dir_path = os.path.join(local_dir.path, 'etc/')
os.mkdir(etc_dir_path)
init_cmd = init.TempestInit(None, None)
local_sample_conf_file = os.path.join(etc_dir_path,
'tempest.conf.sample')
# Verify no sample config file exist
self.assertFalse(os.path.isfile(local_sample_conf_file))
init_cmd.generate_sample_config(local_dir.path)
# Verify sample config file exist with some content
self.assertTrue(os.path.isfile(local_sample_conf_file))
self.assertGreater(os.path.getsize(local_sample_conf_file), 0)
def test_update_local_conf(self):
local_dir = self.useFixture(fixtures.TempDir())
etc_dir_path = os.path.join(local_dir.path, 'etc/')
os.mkdir(etc_dir_path)
lock_dir = os.path.join(local_dir.path, 'tempest_lock')
config_path = os.path.join(etc_dir_path, 'tempest.conf')
log_dir = os.path.join(local_dir.path, 'logs')
init_cmd = init.TempestInit(None, None)
# Generate the config file
init_cmd.generate_sample_config(local_dir.path)
# Create a conf file with populated values
config_parser_pre = init_cmd.get_configparser(config_path)
with open(config_path, 'w+') as conf_file:
# create the same section init will check for and add values to
config_parser_pre.add_section('oslo_concurrency')
config_parser_pre.set('oslo_concurrency', 'TEST', local_dir.path)
# create a new section
config_parser_pre.add_section('TEST')
config_parser_pre.set('TEST', 'foo', "bar")
config_parser_pre.write(conf_file)
# Update the config file the same way tempest init does
init_cmd.update_local_conf(config_path, lock_dir, log_dir)
# parse the new config file to verify it
config_parser_post = init_cmd.get_configparser(config_path)
# check that our value in oslo_concurrency wasn't overwritten
self.assertTrue(config_parser_post.has_section('oslo_concurrency'))
self.assertEqual(config_parser_post.get('oslo_concurrency', 'TEST'),
local_dir.path)
# check that the lock directory was set correctly
self.assertEqual(config_parser_post.get('oslo_concurrency',
'lock_path'), lock_dir)
# check that our new section still exists and wasn't modified
self.assertTrue(config_parser_post.has_section('TEST'))
self.assertEqual(config_parser_post.get('TEST', 'foo'), 'bar')
# check that the DEFAULT values are correct
# NOTE(auggy): has_section ignores DEFAULT
self.assertEqual(config_parser_post.get('DEFAULT', 'log_dir'), log_dir)
def test_create_working_dir_with_existing_local_dir_non_empty(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
open("%s/foo" % fake_local_dir.path, 'w').close()
_init = init.TempestInit(None, None)
self.assertRaises(OSError,
_init.create_working_dir,
fake_local_dir.path,
fake_local_conf_dir.path)
def test_create_working_dir(self):
fake_local_dir = self.useFixture(fixtures.TempDir())
fake_local_conf_dir = self.useFixture(fixtures.TempDir())
os.rmdir(fake_local_dir.path)
# Create a fake conf file
fake_file = fake_local_conf_dir.join('conf_file.conf')
open(fake_file, 'w').close()
init_cmd = init.TempestInit(None, None)
init_cmd.create_working_dir(fake_local_dir.path,
fake_local_conf_dir.path)
# Assert directories are created
lock_path = os.path.join(fake_local_dir.path, 'tempest_lock')
etc_dir = os.path.join(fake_local_dir.path, 'etc')
log_dir = os.path.join(fake_local_dir.path, 'logs')
testr_dir = os.path.join(fake_local_dir.path, '.testrepository')
self.assertTrue(os.path.isdir(lock_path))
self.assertTrue(os.path.isdir(etc_dir))
self.assertTrue(os.path.isdir(log_dir))
self.assertTrue(os.path.isdir(testr_dir))
# Assert file creation
fake_file_moved = os.path.join(etc_dir, 'conf_file.conf')
local_conf_file = os.path.join(etc_dir, 'tempest.conf')
local_testr_conf = os.path.join(fake_local_dir.path, '.testr.conf')
self.assertTrue(os.path.isfile(fake_file_moved))
self.assertTrue(os.path.isfile(local_conf_file))
self.assertTrue(os.path.isfile(local_testr_conf))
def test_take_action_fails(self):
class ParsedArgs(object):
workspace_dir = self.useFixture(fixtures.TempDir()).path
workspace_path = os.path.join(workspace_dir, 'workspace.yaml')
name = 'test'
dir_base = self.useFixture(fixtures.TempDir()).path
dir = os.path.join(dir_base, 'foo', 'bar')
config_dir = self.useFixture(fixtures.TempDir()).path
show_global_dir = False
pa = ParsedArgs()
init_cmd = init.TempestInit(None, None)
self.assertRaises(OSError, init_cmd.take_action, pa)
# one more trying should be a same error not "workspace already exists"
self.assertRaises(OSError, init_cmd.take_action, pa)
|
"""docstring
"""
__revision__ = '0.1'
global_read_user_interceptor = None
global_access_secret_key = None
global_login_page = None
|
from context import seqfindr
from context import pytest
import numpy as np
def test_strip_uninteresting():
"""
Test the strip_uninteresting function
Function signature::
strip_uninteresting(matrix, query_classes, query_list, cons, invert)
"""
# No cons
matrix = np.array([(0.5, 2, 3), (0.5, 5, 6)])
nm, newqc, newql = seqfindr.strip_uninteresting(matrix, ['a', 'b', 'c'],
['a1', 'b1', 'c1'], None,
False)
assert newqc == ['b', 'c']
assert newql == ['b1', 'c1']
assert nm.all() == np.array([(2, 3), (5, 6)]).all()
# Cons
matrix = np.array([(1.0, 1.0, 3), (0.5, 1.0, 6)])
nm, newqc, newql = seqfindr.strip_uninteresting(matrix, ['a', 'b', 'c'],
['a1', 'b1', 'c1'], True,
False)
assert newqc == ['a', 'c']
assert newql == ['a1', 'c1']
assert nm.all() == np.array([(1.0, 3), (0.5, 6)]).all()
# Cons, invert
matrix = np.array([(-1.0, -1.0, -3), (-0.5, -1.0, -6)])
nm, newqc, newql = seqfindr.strip_uninteresting(matrix, ['a', 'b', 'c'],
['a1', 'b1', 'c1'], True,
True)
assert newqc == ['a', 'c']
assert newql == ['a1', 'c1']
assert nm.all() == np.array([(-1.0, -3), (-0.5, -6)]).all()
def test_check_singularity():
"""
Test the check_singularity function
Function signature::
check_singularity(matrix, cons, invert)
"""
matrix = np.array([(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)])
with pytest.raises(ValueError):
seqfindr.check_singularity(matrix, None, False)
|
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest.test import attr
class TenantsTestJSON(base.BaseIdentityAdminTest):
_interface = 'json'
@attr(type='gate')
def test_tenant_list_delete(self):
# Create several tenants and delete them
tenants = []
for _ in xrange(3):
tenant_name = data_utils.rand_name(name='tenant-new')
resp, tenant = self.client.create_tenant(tenant_name)
self.assertEqual(200, resp.status)
self.data.tenants.append(tenant)
tenants.append(tenant)
tenant_ids = map(lambda x: x['id'], tenants)
resp, body = self.client.list_tenants()
self.assertTrue(resp['status'].startswith('2'))
found = [tenant for tenant in body if tenant['id'] in tenant_ids]
self.assertEqual(len(found), len(tenants), 'Tenants not created')
for tenant in tenants:
resp, body = self.client.delete_tenant(tenant['id'])
self.assertTrue(resp['status'].startswith('2'))
self.data.tenants.remove(tenant)
resp, body = self.client.list_tenants()
found = [tenant for tenant in body if tenant['id'] in tenant_ids]
self.assertFalse(any(found), 'Tenants failed to delete')
@attr(type='gate')
def test_tenant_create_with_description(self):
# Create tenant with a description
tenant_name = data_utils.rand_name(name='tenant-')
tenant_desc = data_utils.rand_name(name='desc-')
resp, body = self.client.create_tenant(tenant_name,
description=tenant_desc)
tenant = body
self.data.tenants.append(tenant)
st1 = resp['status']
tenant_id = body['id']
desc1 = body['description']
self.assertTrue(st1.startswith('2'))
self.assertEqual(desc1, tenant_desc, 'Description should have '
'been sent in response for create')
resp, body = self.client.get_tenant(tenant_id)
desc2 = body['description']
self.assertEqual(desc2, tenant_desc, 'Description does not appear'
'to be set')
self.client.delete_tenant(tenant_id)
self.data.tenants.remove(tenant)
@attr(type='gate')
def test_tenant_create_enabled(self):
# Create a tenant that is enabled
tenant_name = data_utils.rand_name(name='tenant-')
resp, body = self.client.create_tenant(tenant_name, enabled=True)
tenant = body
self.data.tenants.append(tenant)
tenant_id = body['id']
st1 = resp['status']
en1 = body['enabled']
self.assertTrue(st1.startswith('2'))
self.assertTrue(en1, 'Enable should be True in response')
resp, body = self.client.get_tenant(tenant_id)
en2 = body['enabled']
self.assertTrue(en2, 'Enable should be True in lookup')
self.client.delete_tenant(tenant_id)
self.data.tenants.remove(tenant)
@attr(type='gate')
def test_tenant_create_not_enabled(self):
# Create a tenant that is not enabled
tenant_name = data_utils.rand_name(name='tenant-')
resp, body = self.client.create_tenant(tenant_name, enabled=False)
tenant = body
self.data.tenants.append(tenant)
tenant_id = body['id']
st1 = resp['status']
en1 = body['enabled']
self.assertTrue(st1.startswith('2'))
self.assertEqual('false', str(en1).lower(),
'Enable should be False in response')
resp, body = self.client.get_tenant(tenant_id)
en2 = body['enabled']
self.assertEqual('false', str(en2).lower(),
'Enable should be False in lookup')
self.client.delete_tenant(tenant_id)
self.data.tenants.remove(tenant)
@attr(type='gate')
def test_tenant_update_name(self):
# Update name attribute of a tenant
t_name1 = data_utils.rand_name(name='tenant-')
resp, body = self.client.create_tenant(t_name1)
self.assertEqual(200, resp.status)
tenant = body
self.data.tenants.append(tenant)
t_id = body['id']
resp1_name = body['name']
t_name2 = data_utils.rand_name(name='tenant2-')
resp, body = self.client.update_tenant(t_id, name=t_name2)
st2 = resp['status']
resp2_name = body['name']
self.assertTrue(st2.startswith('2'))
self.assertNotEqual(resp1_name, resp2_name)
resp, body = self.client.get_tenant(t_id)
resp3_name = body['name']
self.assertNotEqual(resp1_name, resp3_name)
self.assertEqual(t_name1, resp1_name)
self.assertEqual(resp2_name, resp3_name)
self.client.delete_tenant(t_id)
self.data.tenants.remove(tenant)
@attr(type='gate')
def test_tenant_update_desc(self):
# Update description attribute of a tenant
t_name = data_utils.rand_name(name='tenant-')
t_desc = data_utils.rand_name(name='desc-')
resp, body = self.client.create_tenant(t_name, description=t_desc)
self.assertEqual(200, resp.status)
tenant = body
self.data.tenants.append(tenant)
t_id = body['id']
resp1_desc = body['description']
t_desc2 = data_utils.rand_name(name='desc2-')
resp, body = self.client.update_tenant(t_id, description=t_desc2)
st2 = resp['status']
resp2_desc = body['description']
self.assertTrue(st2.startswith('2'))
self.assertNotEqual(resp1_desc, resp2_desc)
resp, body = self.client.get_tenant(t_id)
resp3_desc = body['description']
self.assertNotEqual(resp1_desc, resp3_desc)
self.assertEqual(t_desc, resp1_desc)
self.assertEqual(resp2_desc, resp3_desc)
self.client.delete_tenant(t_id)
self.data.tenants.remove(tenant)
@attr(type='gate')
def test_tenant_update_enable(self):
# Update the enabled attribute of a tenant
t_name = data_utils.rand_name(name='tenant-')
t_en = False
resp, body = self.client.create_tenant(t_name, enabled=t_en)
self.assertEqual(200, resp.status)
tenant = body
self.data.tenants.append(tenant)
t_id = body['id']
resp1_en = body['enabled']
t_en2 = True
resp, body = self.client.update_tenant(t_id, enabled=t_en2)
st2 = resp['status']
resp2_en = body['enabled']
self.assertTrue(st2.startswith('2'))
self.assertNotEqual(resp1_en, resp2_en)
resp, body = self.client.get_tenant(t_id)
resp3_en = body['enabled']
self.assertNotEqual(resp1_en, resp3_en)
self.assertEqual('false', str(resp1_en).lower())
self.assertEqual(resp2_en, resp3_en)
self.client.delete_tenant(t_id)
self.data.tenants.remove(tenant)
class TenantsTestXML(TenantsTestJSON):
_interface = 'xml'
|
"""
Management class for host-related functions (start, reboot, etc).
"""
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
class Host(object):
"""
Implements host related operations.
"""
def __init__(self, session, virtapi):
self._session = session
self._virtapi = virtapi
def host_power_action(self, _host, action):
"""Reboots or shuts down the host."""
args = {"action": jsonutils.dumps(action)}
methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"}
response = call_xenhost(self._session, methods[action], args)
return response.get("power_action", response)
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
if not mode:
return 'off_maintenance'
host_list = [host_ref for host_ref in
self._session.call_xenapi('host.get_all')
if host_ref != self._session.get_xenapi_host()]
migrations_counter = vm_counter = 0
ctxt = context.get_admin_context()
for vm_ref, vm_rec in vm_utils.list_vms(self._session):
for host_ref in host_list:
try:
# Ensure only guest instances are migrated
uuid = vm_rec['other_config'].get('nova_uuid')
if not uuid:
name = vm_rec['name_label']
uuid = _uuid_find(ctxt, host, name)
if not uuid:
LOG.info(_('Instance %(name)s running on %(host)s'
' could not be found in the database:'
' assuming it is a worker VM and skip'
' ping migration to a new host'),
{'name': name, 'host': host})
continue
instance = instance_obj.Instance.get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
aggregate = self._virtapi.aggregate_get_by_host(
ctxt, host, key=pool_states.POOL_FLAG)
if not aggregate:
msg = _('Aggregate for host %(host)s count not be'
' found.') % dict(host=host)
raise exception.NotFound(msg)
dest = _host_find(ctxt, self._session, aggregate[0],
host_ref)
instance.host = dest
instance.task_state = task_states.MIGRATING
instance.save()
self._session.call_xenapi('VM.pool_migrate',
vm_ref, host_ref, {})
migrations_counter = migrations_counter + 1
instance.vm_state = vm_states.ACTIVE
instance.save()
break
except self._session.XenAPI.Failure:
LOG.exception(_('Unable to migrate VM %(vm_ref)s '
'from %(host)s'),
{'vm_ref': vm_ref, 'host': host})
instance.host = host
instance.vm_state = vm_states.ACTIVE
instance.save()
if vm_counter == migrations_counter:
return 'on_maintenance'
else:
raise exception.NoValidHost(reason='Unable to find suitable '
'host for VMs evacuation')
def set_host_enabled(self, _host, enabled):
"""Sets the specified host's ability to accept new instances."""
args = {"enabled": jsonutils.dumps(enabled)}
response = call_xenhost(self._session, "set_host_enabled", args)
return response.get("status", response)
def get_host_uptime(self, _host):
"""Returns the result of calling "uptime" on the target host."""
response = call_xenhost(self._session, "host_uptime", {})
return response.get("uptime", response)
class HostState(object):
"""Manages information about the XenServer host this compute
node is running on.
"""
def __init__(self, session):
super(HostState, self).__init__()
self._session = session
self._stats = {}
self.update_status()
def get_host_stats(self, refresh=False):
"""Return the current state of the host. If 'refresh' is
True, run the update first.
"""
if refresh:
self.update_status()
return self._stats
def update_status(self):
"""Since under Xenserver, a compute node runs on a given host,
we can get host status information using xenapi.
"""
LOG.debug(_("Updating host stats"))
data = call_xenhost(self._session, "host_data", {})
if data:
sr_ref = vm_utils.safe_find_sr(self._session)
self._session.call_xenapi("SR.scan", sr_ref)
sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
total = int(sr_rec["physical_size"])
used = int(sr_rec["physical_utilisation"])
data["disk_total"] = total
data["disk_used"] = used
data["disk_available"] = total - used
data["supported_instances"] = to_supported_instances(
data.get("host_capabilities")
)
host_memory = data.get('host_memory', None)
if host_memory:
data["host_memory_total"] = host_memory.get('total', 0)
data["host_memory_overhead"] = host_memory.get('overhead', 0)
data["host_memory_free"] = host_memory.get('free', 0)
data["host_memory_free_computed"] = host_memory.get(
'free-computed', 0)
del data['host_memory']
data['hypervisor_hostname'] = data['host_hostname']
self._stats = data
def to_supported_instances(host_capabilities):
if not host_capabilities:
return []
result = []
for capability in host_capabilities:
try:
ostype, _version, arch = capability.split("-")
result.append((arch, 'xapi', ostype))
except ValueError:
LOG.warning(
_("Failed to extract instance support from %s"), capability)
return result
def call_xenhost(session, method, arg_dict):
"""There will be several methods that will need this general
handling for interacting with the xenhost plugin, so this abstracts
out that behavior.
"""
# Create a task ID as something that won't match any instance ID
try:
result = session.call_plugin('xenhost', method, args=arg_dict)
if not result:
return ''
return jsonutils.loads(result)
except ValueError:
LOG.exception(_("Unable to get updated status"))
return None
except session.XenAPI.Failure as e:
LOG.error(_("The call to %(method)s returned "
"an error: %(e)s."), {'method': method, 'e': e})
return e.details[1]
def _uuid_find(context, host, name_label):
"""Return instance uuid by name_label."""
for i in instance_obj.InstanceList.get_by_host(context, host):
if i.name == name_label:
return i.uuid
return None
def _host_find(context, session, src_aggregate, dst):
"""Return the host from the xenapi host reference.
:param src_aggregate: the aggregate that the compute host being put in
maintenance (source of VMs) belongs to
:param dst: the hypervisor host reference (destination of VMs)
:return: the compute host that manages dst
"""
# NOTE: this would be a lot simpler if nova-compute stored
# CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
uuid = session.call_xenapi('host.get_record', dst)['uuid']
for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
if host_uuid == uuid:
return compute_host
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
'from aggregate metadata: %(metadata)s.' %
{'host_uuid': uuid,
'metadata': src_aggregate.metadetails})
|
"""Functional tests for Stack and ParallelStack Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def randn(self, shape, dtype):
data = np.random.randn(*shape)
if dtype == np.bool:
return data < 0 # Naive casting yields True with P(1)!
else:
return data.astype(dtype)
@test_util.run_deprecated_v1
def testSimple(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
rank = len(shape)
for axis in range(-rank, rank):
for dtype in [np.bool, np.float32, np.int32, np.int64]:
data = self.randn(shape, dtype)
xs = np_split_squeeze(data, axis)
# Stack back into a single tensorflow tensor
with self.subTest(shape=shape, axis=axis, dtype=dtype):
c = array_ops.stack(xs, axis=axis)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testSimpleParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (100, 24, 24, 3):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConst(self):
np.random.seed(7)
with self.session(use_gpu=True):
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
# Check on a variety of shapes and types
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
for dtype in [np.bool, np.float32, np.int16, np.int32, np.int64]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
# Stack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c, data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl, data)
@test_util.run_deprecated_v1
def testConstParallelCPU(self):
np.random.seed(7)
with self.session(use_gpu=False):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2), (8, 2, 10):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testConstParallelGPU(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
with self.subTest(shape=shape):
data = self.randn(shape, np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl, data)
data = self.randn(shape, np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c, data)
@test_util.run_deprecated_v1
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
with self.subTest(shape=shape):
with self.cached_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (8, 2, 10):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.subTest(shape=shape):
with self.cached_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs, axis=1)
err = gradient_checker.compute_gradient_error(xs, shapes, c,
out_shape)
self.assertLess(err, 1e-6)
@test_util.run_deprecated_v1
def testZeroSizeCPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=False):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testZeroSizeGPU(self):
# Verify that stack doesn't crash for zero size inputs
with self.session(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
with self.subTest(shape=shape):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
@test_util.run_deprecated_v1
def testAxis0DefaultCPU(self):
with self.session(use_gpu=False):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
@test_util.run_deprecated_v1
def testAxis0DefaultGPU(self):
with self.session(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
expected = np.array([[1, 2, 3], [4, 5, 6]])
self.assertAllEqual(stacked, expected)
self.assertAllEqual(parallel_stacked, expected)
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for shape in (3,), (2, 2, 3), (4, 1, 2, 2), (8, 2, 10):
rank = len(shape)
expected = self.randn(shape, np.float32)
for dtype in [np.bool, np.float32, np.int32, np.int64]:
# For all the possible axis to split it, including negative indices.
for axis in range(-rank, rank):
test_arrays = np_split_squeeze(expected, axis)
with self.cached_session(use_gpu=True):
with self.subTest(shape=shape, dtype=dtype, axis=axis):
actual_pack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = self.evaluate(actual_pack)
actual_stack = array_ops.stack(test_arrays, axis=axis)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = self.evaluate(actual_stack)
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError, r"axis = 2 not in \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegex(ValueError, r"axis = -3 not in \[-2, 2\)"):
array_ops.stack(t, axis=-3)
def testComplex(self):
np.random.seed(7)
with self.session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (8, 2, 10):
for dtype in [np.complex64, np.complex128]:
with self.subTest(shape=shape, dtype=dtype):
data = self.randn(shape, dtype)
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
self.assertAllEqual(self.evaluate(c), data)
class AutomaticStackingTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with self.session(use_gpu=True):
self.assertAllEqual(
[1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], [0, constant_op.constant(1), 0],
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], constant_op.constant([0, 1, 0]),
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]).eval())
def testWithNDArray(self):
with self.session(use_gpu=True):
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
self.evaluate(result))
@test_util.run_deprecated_v1
def testVariable(self):
with self.session(use_gpu=True):
v = variables.Variable(17)
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
self.evaluate(v.initializer)
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]],
self.evaluate(result))
v.assign(38).op.run()
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]],
self.evaluate(result))
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
t_3 = ops.convert_to_tensor(
[[0., 0., 0.],
constant_op.constant([0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_3.dtype)
t_4 = ops.convert_to_tensor(
[constant_op.constant([0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
self.assertEqual(dtypes.float32, t_4.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
def testDtypeConversionWhenTensorDtypeMismatch(self):
t_0 = ops.convert_to_tensor([0., 0., 0.])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([0, 0, 0])
self.assertEqual(dtypes.int32, t_1.dtype)
t_2 = ops.convert_to_tensor([t_0, t_0, t_1], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
@test_util.run_deprecated_v1
def testPlaceholder(self):
with self.session(use_gpu=True):
# Test using placeholder with a defined shape.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 2}))
@test_util.run_deprecated_v1
def testShapeErrors(self):
# Static shape error.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.session(use_gpu=True):
with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
test.main()
|
"""Add cascade to FileCoverage
Revision ID: 21c9439330f
Revises: 1f5caa34d9c2
Create Date: 2014-04-01 15:29:26.765288
"""
revision = '21c9439330f'
down_revision = '1f5caa34d9c2'
from alembic import op
def upgrade():
op.create_index('idx_filecoverage_job_id', 'filecoverage', ['job_id'])
op.create_index('idx_filecoverage_project_id', 'filecoverage', ['project_id'])
op.drop_constraint('filecoverage_build_id_fkey', 'filecoverage')
op.create_foreign_key('filecoverage_job_id_fkey', 'filecoverage', 'job', ['job_id'], ['id'], ondelete='CASCADE')
op.create_foreign_key('filecoverage_project_id_fkey', 'filecoverage', 'project', ['project_id'], ['id'], ondelete='CASCADE')
def downgrade():
pass
|
import numpy as np
from scipy import interpolate
from ..utils import check_random_state
class FunctionDistribution(object):
"""Generate random variables distributed according to an arbitrary function
Parameters
----------
func : function
func should take an array of x values, and return an array
proportional to the probability density at each value
xmin : float
minimum value of interest
xmax : float
maximum value of interest
Nx : int (optional)
number of samples to draw. Default is 1000
random_state : None, int, or np.random.RandomState instance
random seed or random number generator
func_args : dictionary (optional)
additional keyword arguments to be passed to func
"""
def __init__(self, func, xmin, xmax, Nx=1000,
random_state=None, func_args=None):
self.random_state = check_random_state(random_state)
if func_args is None:
func_args = {}
x = np.linspace(xmin, xmax, Nx)
Px = func(x, **func_args)
# if there are too many zeros, interpolation will fail
positive = (Px > 1E-10 * Px.max())
x = x[positive]
Px = Px[positive].cumsum()
Px /= Px[-1]
self._tck = interpolate.splrep(Px, x)
def rvs(self, shape):
"""Draw random variables from the distribution
Parameters
----------
shape : integer or tuple
shape of desired array
Returns
-------
rv : ndarray, shape=shape
random variables
"""
# generate uniform variables between 0 and 1
y = self.random_state.random_sample(shape)
return interpolate.splev(y, self._tck)
class EmpiricalDistribution(object):
"""Empirically learn a distribution from one-dimensional data
Parameters
----------
data : one-dimensional array
input data
Examples
--------
>>> import numpy as np
>>> np.random.seed(0)
>>> x = np.random.normal(size=10000) # normally-distributed variables
>>> x.mean(), x.std()
(-0.018433720158265783, 0.98755656817612003)
>>> x2 = EmpiricalDistribution(x).rvs(10000)
>>> x2.mean(), x2.std()
(-0.020293716681613363, 1.0039249294845276)
Notes
-----
This function works by approximating the inverse of the cumulative
distribution using an efficient spline fit to the sorted values.
"""
def __init__(self, data):
# copy, because we'll need to sort in-place
data = np.array(data, copy=True)
if data.ndim != 1:
raise ValueError("data should be one-dimensional")
data.sort()
# set up spline
y = np.linspace(0, 1, data.size)
self._tck = interpolate.splrep(y, data)
def rvs(self, shape):
"""Draw random variables from the distribution
Parameters
----------
shape : integer or tuple
shape of desired array
Returns
-------
rv : ndarray, shape=shape
random variables
"""
# generate uniform variables between 0 and 1
y = np.random.random(shape)
return interpolate.splev(y, self._tck)
|
"""
Test helper functions from numba.numpy_support.
"""
import sys
from itertools import product
import numpy as np
import unittest
from numba.core import types
from numba.tests.support import TestCase
from numba.tests.enum_usecases import Shake, RequestError
from numba.np import numpy_support
class TestFromDtype(TestCase):
def test_number_types(self):
"""
Test from_dtype() and as_dtype() with the various scalar number types.
"""
f = numpy_support.from_dtype
def check(typechar, numba_type):
# Only native ordering and alignment is supported
dtype = np.dtype(typechar)
self.assertIs(f(dtype), numba_type)
self.assertIs(f(np.dtype('=' + typechar)), numba_type)
self.assertEqual(dtype, numpy_support.as_dtype(numba_type))
check('?', types.bool_)
check('f', types.float32)
check('f4', types.float32)
check('d', types.float64)
check('f8', types.float64)
check('F', types.complex64)
check('c8', types.complex64)
check('D', types.complex128)
check('c16', types.complex128)
check('O', types.pyobject)
check('b', types.int8)
check('i1', types.int8)
check('B', types.uint8)
check('u1', types.uint8)
check('h', types.int16)
check('i2', types.int16)
check('H', types.uint16)
check('u2', types.uint16)
check('i', types.int32)
check('i4', types.int32)
check('I', types.uint32)
check('u4', types.uint32)
check('q', types.int64)
check('Q', types.uint64)
for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'intp', 'uintp'):
self.assertIs(f(np.dtype(name)), getattr(types, name))
# Non-native alignments are unsupported (except for 1-byte types)
foreign_align = '>' if sys.byteorder == 'little' else '<'
for letter in 'hHiIlLqQfdFD':
self.assertRaises(NotImplementedError, f,
np.dtype(foreign_align + letter))
def test_string_types(self):
"""
Test from_dtype() and as_dtype() with the character string types.
"""
def check(typestring, numba_type):
# Only native ordering and alignment is supported
dtype = np.dtype(typestring)
self.assertEqual(numpy_support.from_dtype(dtype), numba_type)
self.assertEqual(dtype, numpy_support.as_dtype(numba_type))
check('S10', types.CharSeq(10))
check('a11', types.CharSeq(11))
check('U12', types.UnicodeCharSeq(12))
def check_datetime_types(self, letter, nb_class):
def check(dtype, numba_type, code):
tp = numpy_support.from_dtype(dtype)
self.assertEqual(tp, numba_type)
self.assertEqual(tp.unit_code, code)
self.assertEqual(numpy_support.as_dtype(numba_type), dtype)
self.assertEqual(numpy_support.as_dtype(tp), dtype)
# Unit-less ("generic") type
check(np.dtype(letter), nb_class(''), 14)
def test_datetime_types(self):
"""
Test from_dtype() and as_dtype() with the datetime types.
"""
self.check_datetime_types('M', types.NPDatetime)
def test_timedelta_types(self):
"""
Test from_dtype() and as_dtype() with the timedelta types.
"""
self.check_datetime_types('m', types.NPTimedelta)
def test_struct_types(self):
def check(dtype, fields, size, aligned):
tp = numpy_support.from_dtype(dtype)
self.assertIsInstance(tp, types.Record)
# Only check for dtype equality, as the Numba type may be interned
self.assertEqual(tp.dtype, dtype)
self.assertEqual(tp.fields, fields)
self.assertEqual(tp.size, size)
self.assertEqual(tp.aligned, aligned)
dtype = np.dtype([('a', np.int16), ('b', np.int32)])
check(dtype,
fields={'a': (types.int16, 0, None, None),
'b': (types.int32, 2, None, None)},
size=6, aligned=False)
dtype = np.dtype([('a', np.int16), ('b', np.int32)], align=True)
check(dtype,
fields={'a': (types.int16, 0, None, None),
'b': (types.int32, 4, None, None)},
size=8, aligned=True)
dtype = np.dtype([('m', np.int32), ('n', 'S5')])
check(dtype,
fields={'m': (types.int32, 0, None, None),
'n': (types.CharSeq(5), 4, None, None)},
size=9, aligned=False)
def test_enum_type(self):
def check(base_inst, enum_def, type_class):
np_dt = np.dtype(base_inst)
nb_ty = numpy_support.from_dtype(np_dt)
inst = type_class(enum_def, nb_ty)
recovered = numpy_support.as_dtype(inst)
self.assertEqual(np_dt, recovered)
dts = [np.float64, np.int32, np.complex128, np.bool_]
enums = [Shake, RequestError]
for dt, enum in product(dts, enums):
check(dt, enum, types.EnumMember)
for dt, enum in product(dts, enums):
check(dt, enum, types.IntEnumMember)
class ValueTypingTestBase(object):
"""
Common tests for the typing of values. Also used by test_special.
"""
def check_number_values(self, func):
"""
Test *func*() with scalar numeric values.
"""
f = func
# Standard Python types get inferred by numpy
self.assertIn(f(1), (types.int32, types.int64))
self.assertIn(f(2**31 - 1), (types.int32, types.int64))
self.assertIn(f(-2**31), (types.int32, types.int64))
self.assertIs(f(1.0), types.float64)
self.assertIs(f(1.0j), types.complex128)
self.assertIs(f(True), types.bool_)
self.assertIs(f(False), types.bool_)
# Numpy scalar types get converted by from_dtype()
for name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64', 'intc', 'uintc', 'intp', 'uintp',
'float32', 'float64', 'complex64', 'complex128',
'bool_'):
val = getattr(np, name)()
self.assertIs(f(val), getattr(types, name))
def _base_check_datetime_values(self, func, np_type, nb_type):
f = func
for unit in [
'', 'Y', 'M', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as',
]:
if unit:
t = np_type(3, unit)
else:
# "generic" datetime / timedelta
t = np_type('Nat')
tp = f(t)
# This ensures the unit hasn't been lost
self.assertEqual(tp, nb_type(unit))
def check_datetime_values(self, func):
"""
Test *func*() with np.datetime64 values.
"""
self._base_check_datetime_values(func, np.datetime64, types.NPDatetime)
def check_timedelta_values(self, func):
"""
Test *func*() with np.timedelta64 values.
"""
self._base_check_datetime_values(func, np.timedelta64,
types.NPTimedelta)
class TestArrayScalars(ValueTypingTestBase, TestCase):
def test_number_values(self):
"""
Test map_arrayscalar_type() with scalar number values.
"""
self.check_number_values(numpy_support.map_arrayscalar_type)
def test_datetime_values(self):
"""
Test map_arrayscalar_type() with np.datetime64 values.
"""
f = numpy_support.map_arrayscalar_type
self.check_datetime_values(f)
# datetime64s with a non-one factor shouldn't be supported
t = np.datetime64('2014', '10Y')
with self.assertRaises(NotImplementedError):
f(t)
def test_timedelta_values(self):
"""
Test map_arrayscalar_type() with np.timedelta64 values.
"""
f = numpy_support.map_arrayscalar_type
self.check_timedelta_values(f)
# timedelta64s with a non-one factor shouldn't be supported
t = np.timedelta64(10, '10Y')
with self.assertRaises(NotImplementedError):
f(t)
class FakeUFunc(object):
__slots__ = ('nin', 'nout', 'types', 'ntypes')
def __init__(self, types):
self.types = types
in_, out = self.types[0].split('->')
self.nin = len(in_)
self.nout = len(out)
self.ntypes = len(types)
for tp in types:
in_, out = self.types[0].split('->')
assert len(in_) == self.nin
assert len(out) == self.nout
_add_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I',
'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d',
'gg->g', 'FF->F', 'DD->D', 'GG->G', 'Mm->M', 'mm->m', 'mM->M',
'OO->O']
_mul_types = ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I',
'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ee->e', 'ff->f', 'dd->d',
'gg->g', 'FF->F', 'DD->D', 'GG->G', 'mq->m', 'qm->m', 'md->m',
'dm->m', 'OO->O']
_isnan_types = ['e->?', 'f->?', 'd->?', 'g->?', 'F->?', 'D->?', 'G->?']
_sqrt_types = ['e->e', 'f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
class TestUFuncs(TestCase):
"""
Test ufunc helpers.
"""
def test_ufunc_find_matching_loop(self):
f = numpy_support.ufunc_find_matching_loop
np_add = FakeUFunc(_add_types)
np_mul = FakeUFunc(_mul_types)
np_isnan = FakeUFunc(_isnan_types)
np_sqrt = FakeUFunc(_sqrt_types)
def check(ufunc, input_types, sigs, output_types=()):
"""
Check that ufunc_find_matching_loop() finds one of the given
*sigs* for *ufunc*, *input_types* and optional *output_types*.
"""
loop = f(ufunc, input_types + output_types)
self.assertTrue(loop)
if isinstance(sigs, str):
sigs = (sigs,)
self.assertIn(loop.ufunc_sig, sigs,
"inputs=%s and outputs=%s should have selected "
"one of %s, got %s"
% (input_types, output_types, sigs, loop.ufunc_sig))
self.assertEqual(len(loop.numpy_inputs), len(loop.inputs))
self.assertEqual(len(loop.numpy_outputs), len(loop.outputs))
if not output_types:
# Add explicit outputs and check the result is the same
loop_explicit = f(ufunc, list(input_types) + loop.outputs)
self.assertEqual(loop_explicit, loop)
else:
self.assertEqual(loop.outputs, list(output_types))
# Round-tripping inputs and outputs
loop_rt = f(ufunc, loop.inputs + loop.outputs)
self.assertEqual(loop_rt, loop)
return loop
def check_exact(ufunc, input_types, sigs, output_types=()):
"""
Like check(), but also ensure no casting of inputs occurred.
"""
loop = check(ufunc, input_types, sigs, output_types)
self.assertEqual(loop.inputs, list(input_types))
def check_no_match(ufunc, input_types):
loop = f(ufunc, input_types)
self.assertIs(loop, None)
# Exact matching for number types
check_exact(np_add, (types.bool_, types.bool_), '??->?')
check_exact(np_add, (types.int8, types.int8), 'bb->b')
check_exact(np_add, (types.uint8, types.uint8), 'BB->B')
check_exact(np_add, (types.int64, types.int64), ('ll->l', 'qq->q'))
check_exact(np_add, (types.uint64, types.uint64), ('LL->L', 'QQ->Q'))
check_exact(np_add, (types.float32, types.float32), 'ff->f')
check_exact(np_add, (types.float64, types.float64), 'dd->d')
check_exact(np_add, (types.complex64, types.complex64), 'FF->F')
check_exact(np_add, (types.complex128, types.complex128), 'DD->D')
# Exact matching for datetime64 and timedelta64 types
check_exact(np_add, (types.NPTimedelta('s'), types.NPTimedelta('s')),
'mm->m', output_types=(types.NPTimedelta('s'),))
check_exact(np_add, (types.NPTimedelta('ms'), types.NPDatetime('s')),
'mM->M', output_types=(types.NPDatetime('ms'),))
check_exact(np_add, (types.NPDatetime('s'), types.NPTimedelta('s')),
'Mm->M', output_types=(types.NPDatetime('s'),))
check_exact(np_mul, (types.NPTimedelta('s'), types.int64),
'mq->m', output_types=(types.NPTimedelta('s'),))
check_exact(np_mul, (types.float64, types.NPTimedelta('s')),
'dm->m', output_types=(types.NPTimedelta('s'),))
# Mix and match number types, with casting
check(np_add, (types.bool_, types.int8), 'bb->b')
check(np_add, (types.uint8, types.bool_), 'BB->B')
check(np_add, (types.int16, types.uint16), 'ii->i')
check(np_add, (types.complex64, types.float64), 'DD->D')
check(np_add, (types.float64, types.complex64), 'DD->D')
# Integers, when used together with floating-point numbers,
# should cast to any real or complex (see #2006)
int_types = [types.int32, types.uint32, types.int64, types.uint64]
for intty in int_types:
check(np_add, (types.float32, intty), 'ff->f')
check(np_add, (types.float64, intty), 'dd->d')
check(np_add, (types.complex64, intty), 'FF->F')
check(np_add, (types.complex128, intty), 'DD->D')
# However, when used alone, they should cast only to
# floating-point types of sufficient precision
# (typical use case: np.sqrt(2) should give an accurate enough value)
for intty in int_types:
check(np_sqrt, (intty,), 'd->d')
check(np_isnan, (intty,), 'd->?')
# With some timedelta64 arguments as well
check(np_mul, (types.NPTimedelta('s'), types.int32),
'mq->m', output_types=(types.NPTimedelta('s'),))
check(np_mul, (types.NPTimedelta('s'), types.uint32),
'mq->m', output_types=(types.NPTimedelta('s'),))
check(np_mul, (types.NPTimedelta('s'), types.float32),
'md->m', output_types=(types.NPTimedelta('s'),))
check(np_mul, (types.float32, types.NPTimedelta('s')),
'dm->m', output_types=(types.NPTimedelta('s'),))
# No match
check_no_match(np_add, (types.NPDatetime('s'), types.NPDatetime('s')))
# No implicit casting from int64 to timedelta64 (Numpy would allow
# this).
check_no_match(np_add, (types.NPTimedelta('s'), types.int64))
def test_layout_checker(self):
def check_arr(arr):
dims = arr.shape
strides = arr.strides
itemsize = arr.dtype.itemsize
is_c = numpy_support.is_contiguous(dims, strides, itemsize)
is_f = numpy_support.is_fortran(dims, strides, itemsize)
expect_c = arr.flags['C_CONTIGUOUS']
expect_f = arr.flags['F_CONTIGUOUS']
self.assertEqual(is_c, expect_c)
self.assertEqual(is_f, expect_f)
arr = np.arange(24)
# 1D
check_arr(arr)
# 2D
check_arr(arr.reshape((3, 8)))
check_arr(arr.reshape((3, 8)).T)
check_arr(arr.reshape((3, 8))[::2])
# 3D
check_arr(arr.reshape((2, 3, 4)))
check_arr(arr.reshape((2, 3, 4)).T)
# middle axis is shape 1
check_arr(arr.reshape((2, 3, 4))[:, ::3])
check_arr(arr.reshape((2, 3, 4)).T[:, ::3])
# leading axis is shape 1
check_arr(arr.reshape((2, 3, 4))[::2])
check_arr(arr.reshape((2, 3, 4)).T[:, :, ::2])
# 2 leading axis are shape 1
check_arr(arr.reshape((2, 3, 4))[::2, ::3])
check_arr(arr.reshape((2, 3, 4)).T[:, ::3, ::2])
# single item slices for all axis
check_arr(arr.reshape((2, 3, 4))[::2, ::3, ::4])
check_arr(arr.reshape((2, 3, 4)).T[::4, ::3, ::2])
# 4D
check_arr(arr.reshape((2, 2, 3, 2))[::2, ::2, ::3])
check_arr(arr.reshape((2, 2, 3, 2)).T[:, ::3, ::2, ::2])
# outer zero dims
check_arr(arr.reshape((2, 2, 3, 2))[::5, ::2, ::3])
check_arr(arr.reshape((2, 2, 3, 2)).T[:, ::3, ::2, ::5])
if __name__ == '__main__':
unittest.main()
|
import copy
import sys
import gc
import tempfile
import pytest
from os import path
from io import BytesIO
from itertools import chain
import numpy as np
from numpy.testing import (
assert_, assert_equal, IS_PYPY, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises,
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT, IS_PYSTON
)
from numpy.testing._private.utils import _no_tracing, requires_memory
from numpy.compat import asbytes, asunicode, pickle
try:
RecursionError
except NameError:
RecursionError = RuntimeError # python < 3.5
class TestRegression:
def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(a, f, protocol=proto)
f.seek(0)
b = pickle.load(f)
assert_array_equal(a, b)
def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
idx = np.array(origidx)
c[idx]
assert_array_equal(idx, origidx)
def test_char_dump(self):
# Ticket #50
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(ca, f, protocol=proto)
f.seek(0)
ca = np.load(f, allow_pickle=True)
def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
def rs():
b.shape = (10,)
assert_raises(AttributeError, rs)
def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
assert_(a[1] == 'auto')
assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
assert_(b != 'auto')
assert_(b[0] != 'auto')
def test_unicode_swapping(self):
# Ticket #79
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
def test_mem_dtype_align(self):
# Ticket #93
assert_raises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
xa = np.where((a > 2) & (a < 6))
xb = np.where((b > 2) & (b < 6))
ya = ((a > 2) & (a < 6))
yb = ((b > 2) & (b < 6))
assert_array_almost_equal(xa, ya.nonzero())
assert_array_almost_equal(xb, yb.nonzero())
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
net[2] = 0.605202
max_net = net.max()
test = np.where(net <= 0., max_net, net)
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
('data', '>u4', 2),
])
buf = np.recarray(1, dtype=dt)
buf[0]['head'] = 1
buf[0]['data'][:] = [1, 1]
h = buf[0]['head']
d = buf[0]['data'][0]
buf[0]['head'] = h
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
# Dummy array to detect bad memory access:
_z = np.ones(10)
_dummy = np.empty((0, 10))
z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)
np.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
# Do the same for the built-in dot:
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
assert_array_equal(ref, x)
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
def test_arange_inf_step(self):
ref = np.arange(0, 1, 10)
x = np.arange(0, 1, np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, 1, -10)
x = np.arange(0, 1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, -10)
x = np.arange(0, -1, -np.inf)
assert_array_equal(ref, x)
ref = np.arange(0, -1, 10)
x = np.arange(0, -1, np.inf)
assert_array_equal(ref, x)
def test_arange_underflow_stop_and_step(self):
finfo = np.finfo(np.float64)
ref = np.arange(0, finfo.eps, 2 * finfo.eps)
x = np.arange(0, finfo.eps, finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, finfo.eps, -2 * finfo.eps)
x = np.arange(0, finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, -2 * finfo.eps)
x = np.arange(0, -finfo.eps, -finfo.max)
assert_array_equal(ref, x)
ref = np.arange(0, -finfo.eps, 2 * finfo.eps)
x = np.arange(0, -finfo.eps, finfo.max)
assert_array_equal(ref, x)
def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
assert_raises(ValueError, np.hstack, (x, y))
def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
def test_numpy_float_python_long_addition(self):
# Check that numpy float and python longs can be added correctly.
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
(2, 2.2, '2.0')], dtype=descr)
x[0].tolist()
[i for i in x[0]]
def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))
def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
assert_array_almost_equal(x, [[0], [1], [2]])
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
def bfa():
x[:] = np.arange(3)
def bfb():
x[:] = np.arange(3, dtype=float)
assert_raises(ValueError, bfa)
assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
# and equivalent unsafe casted array assignment
a = np.arange(10)
b = np.ones(10, dtype=bool)
r = np.arange(10)
def assign(a, b, c):
a[b] = c
assert_raises(ValueError, assign, a, b, np.nan)
a[b] = np.array(np.nan) # but not this.
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(dt, f, protocol=proto)
f.seek(0)
dt_ = pickle.load(f)
assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])
assert_array_equal(a, b)
assert_equal(a, r[0][0])
def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
assert_raises(IndexError, index_tmp)
def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = b'a '
x = x.rstrip()
assert_equal(x[0], b'a')
def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))
assert_equal(np.array([], dtype=object).shape, (0,))
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
class BuggySequence:
def __len__(self):
return 4
def __getitem__(self, key):
raise KeyError
assert_raises(KeyError, np.lexsort, BuggySequence())
def test_lexsort_zerolen_custom_strides(self):
# Ticket #14228
xs = np.array([], dtype='i8')
assert xs.strides == (8,)
assert np.lexsort((xs,)).shape[0] == 0 # Works
xs.strides = (16,)
assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
def test_lexsort_zerolen_custom_strides_2d(self):
xs = np.array([], dtype='i8')
xs.shape = (0, 2)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 0
xs.shape = (2, 0)
xs.strides = (16, 16)
assert np.lexsort((xs,), axis=0).shape[0] == 2
def test_lexsort_invalid_axis(self):
assert_raises(np.AxisError, np.lexsort, (np.arange(1),), axis=2)
assert_raises(np.AxisError, np.lexsort, (np.array([]),), axis=1)
assert_raises(np.AxisError, np.lexsort, (np.array(1),), axis=10)
def test_lexsort_zerolen_element(self):
dt = np.dtype([]) # a void dtype with no fields
xs = np.empty(4, dt)
assert np.lexsort((xs,)).shape[0] == xs.shape[0]
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
test_data = [
# (original, py2_pickle)
(np.unicode_('\u6f2c'),
b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\n"
b"I0\ntp6\nbS',o\\x00\\x00'\np7\ntp8\nRp9\n."),
(np.array([9e123], dtype=np.float64),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\n"
b"p1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\n"
b"p7\n(S'f8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'<'\np11\nNNNI-1\nI-1\n"
b"I0\ntp12\nbI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np13\ntp14\nb."),
(np.array([(9e123,)], dtype=[('name', float)]),
b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n"
b"(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n"
b"(S'V8'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'name'\np12\ntp13\n"
b"(dp14\ng12\n(g7\n(S'f8'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'<'\np18\nNNNI-1\n"
b"I-1\nI0\ntp19\nbI0\ntp20\nsI8\nI1\nI0\ntp21\n"
b"bI00\nS'O\\x81\\xb7Z\\xaa:\\xabY'\np22\ntp23\nb."),
]
for original, data in test_data:
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
if isinstance(result, np.ndarray) and result.dtype.names is not None:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self):
# Ticket #251
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
pickle.dumps(float, protocol=proto)
def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
def test_object_array_from_list(self):
# Ticket #270 (gh-868)
assert_(np.array([1, None, 'A']).shape == (3,))
def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
('sometrue', 'any'),
('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),
'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',
'round', 'min', 'max', 'argsort', 'sort']
funcs2 = ['compress', 'take', 'repeat']
for func in funcs1:
arr = np.random.rand(8, 7)
arr2 = arr.copy()
if isinstance(func, tuple):
func_meth = func[1]
func = func[0]
else:
func_meth = func
res1 = getattr(arr, func_meth)()
res2 = getattr(np, func)(arr2)
if res1 is None:
res1 = arr
if res1.dtype.kind in 'uib':
assert_((res1 == res2).all(), func)
else:
assert_(abs(res1-res2).max() < 1e-8, func)
for func in funcs2:
arr1 = np.random.rand(8, 7)
arr2 = np.random.rand(8, 7)
res1 = None
if func == 'compress':
arr1 = arr1.ravel()
res1 = getattr(arr2, func)(arr1)
else:
arr2 = (15*arr2).astype(int).ravel()
if res1 is None:
res1 = getattr(arr1, func)(arr2)
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
rb = np.rec.array(ra, dtype=dt)
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
def test_numeric_carray_compare(self):
# Ticket #341
assert_equal(np.array(['X'], 'c'), b'X')
def test_string_array_size(self):
# Ticket #342
assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
# With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride debug")
def test_reshape_trailing_ones_strides(self):
# GitHub issue gh-2949, bad strides for trailing ones of new shape
a = np.zeros(12, dtype=np.int32)[::2] # not contiguous
strides_c = (16, 8, 8, 8)
strides_f = (8, 24, 48, 48)
assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
for a in [np.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)], "i4,i4"),
np.rec.array([(1, 2), (3, 4)]),
np.rec.fromarrays([(1, 2), (3, 4)], "i4,i4"),
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
assert_equal(np.subtract.reduce(tosubtract), -10)
assert_equal(np.divide.reduce(todivide), 16.0)
assert_array_equal(np.subtract.accumulate(tosubtract),
np.array([0, -1, -3, -6, -10]))
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
assert_raises(ValueError, np.convolve, [], [1])
assert_raises(ValueError, np.convolve, [1], [])
def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
r = np.array([[b'abc']], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == b'abc')
def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
b = np.zeros_like(a)
np.take(x, [0, 2], axis=1, out=b)
assert_array_equal(a, b)
def test_take_object_fail(self):
# Issue gh-3001
d = 123.
a = np.array([d, 1], dtype=object)
if HAS_REFCOUNT:
ref_d = sys.getrefcount(d)
try:
a.take([0, 100])
except IndexError:
pass
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
big_endian = np.array([83.4, 83.5], dtype='>f8')
little_endian = np.array([83.4, 83.5], dtype='<f8')
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
del a, b
def test_mem_on_invalid_dtype(self):
"Ticket #583"
assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
z = y[::-1].copy()
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
def rs():
x = np.ones([484, 286])
y = np.zeros([484, 286])
x |= y
assert_raises(TypeError, rs)
def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
new = pickle.loads(pickle.dumps(el, protocol=proto))
assert_equal(new, el)
def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
assert_equal(np.arange(0, dtype=dt).dtype, dt)
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
x = np.frombuffer(b"\x00\x02\x00\x01", dtype="|S2")
y = np.frombuffer(b"\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
def test_sign_bit(self):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[0. 0. 0.]')
def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
# Cannot test if NPY_RELAXED_STRIDES_DEBUG changes the strides.
# With NPY_RELAXED_STRIDES_DEBUG the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,
reason="Using relaxed stride debug")
def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
# NB. this is probably CPython-specific
cnt = sys.getrefcount
a = object()
b = object()
c = object()
cnt0_a = cnt(a)
cnt0_b = cnt(b)
cnt0_c = cnt(c)
# -- 0d -> 1-d broadcast slice assignment
arr = np.zeros(5, dtype=np.object_)
arr[:] = a
assert_equal(cnt(a), cnt0_a + 5)
arr[:] = b
assert_equal(cnt(a), cnt0_a)
assert_equal(cnt(b), cnt0_b + 5)
arr[:2] = c
assert_equal(cnt(b), cnt0_b + 3)
assert_equal(cnt(c), cnt0_c + 2)
del arr
# -- 1-d -> 2-d broadcast slice assignment
arr = np.zeros((5, 2), dtype=np.object_)
arr0 = np.zeros(2, dtype=np.object_)
arr0[0] = a
assert_(cnt(a) == cnt0_a + 1)
arr0[1] = b
assert_(cnt(b) == cnt0_b + 1)
arr[:, :] = arr0
assert_(cnt(a) == cnt0_a + 6)
assert_(cnt(b) == cnt0_b + 6)
arr[:, 0] = None
assert_(cnt(a) == cnt0_a + 1)
del arr, arr0
# -- 2-d copying + flattening
arr = np.zeros((5, 2), dtype=np.object_)
arr[:, 0] = a
arr[:, 1] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
arr2 = arr[:, 0].copy()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 5)
arr2 = arr.flatten()
assert_(cnt(a) == cnt0_a + 10)
assert_(cnt(b) == cnt0_b + 10)
del arr, arr2
# -- concatenate, repeat, take, choose
arr1 = np.zeros((5, 1), dtype=np.object_)
arr2 = np.zeros((5, 1), dtype=np.object_)
arr1[...] = a
arr2[...] = b
assert_(cnt(a) == cnt0_a + 5)
assert_(cnt(b) == cnt0_b + 5)
tmp = np.concatenate((arr1, arr2))
assert_(cnt(a) == cnt0_a + 5 + 5)
assert_(cnt(b) == cnt0_b + 5 + 5)
tmp = arr1.repeat(3, axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3*5)
tmp = arr1.take([1, 2, 3], axis=0)
assert_(cnt(a) == cnt0_a + 5 + 3)
x = np.array([[0], [1], [0], [1], [1]], int)
tmp = x.choose(arr1, arr2)
assert_(cnt(a) == cnt0_a + 5 + 2)
assert_(cnt(b) == cnt0_b + 5 + 3)
del tmp # Avoid pyflakes unused variable warning
def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat:
def __float__(self):
return 1.0
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject:
deleted = False
def __del__(self):
self.deleted = True
d = VictimObject()
arr = np.zeros(5, dtype=np.object_)
arr[:] = d
del d
arr[:] = arr # refcount of 'd' might hit zero here
assert_(not arr[0].deleted)
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
dtype = np.dtype('>i4')
else:
dtype = np.dtype('<i4')
x = np.empty([1], dtype=dtype)
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
y = pickle.loads(pickle.dumps(x, protocol=proto))
# y is now typically not aligned on a 8-byte boundary
z = np.ones((1, y.shape[0]))
# This shouldn't cause a segmentation fault:
np.dot(z, y)
def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
data_dir = path.join(path.dirname(__file__), 'data')
filename = path.join(data_dir, "astype_copy.pkl")
with open(filename, 'rb') as f:
xp = pickle.load(f, encoding='latin1')
xpd = xp.astype(np.float64)
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
a = np.array([[1, 2], [3, 4]])
b = np.zeros((2, 1), dtype=np.single)
try:
a.compress([True, False], axis=1, out=b)
raise AssertionError("compress with an out which cannot be "
"safely casted should not return "
"successfully")
except TypeError:
pass
def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, 'info', '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
assert_(dat.info == 'jubba')
dat.resize((4, 2))
assert_(dat.info == 'jubba')
dat.sort()
assert_(dat.info == 'jubba')
dat.fill(2)
assert_(dat.info == 'jubba')
dat.put([2, 3, 4], [6, 3, 4])
assert_(dat.info == 'jubba')
dat.setfield(4, np.int32, 0)
assert_(dat.info == 'jubba')
dat.setflags()
assert_(dat.info == 'jubba')
assert_(dat.all(1).info == 'jubba')
assert_(dat.any(1).info == 'jubba')
assert_(dat.argmax(1).info == 'jubba')
assert_(dat.argmin(1).info == 'jubba')
assert_(dat.argsort(1).info == 'jubba')
assert_(dat.astype(TestArray).info == 'jubba')
assert_(dat.byteswap().info == 'jubba')
assert_(dat.clip(2, 7).info == 'jubba')
assert_(dat.compress([0, 1, 1]).info == 'jubba')
assert_(dat.conj().info == 'jubba')
assert_(dat.conjugate().info == 'jubba')
assert_(dat.copy().info == 'jubba')
dat2 = TestArray([2, 3, 1, 0], 'jubba')
choices = [[0, 1, 2, 3], [10, 11, 12, 13],
[20, 21, 22, 23], [30, 31, 32, 33]]
assert_(dat2.choose(choices).info == 'jubba')
assert_(dat.cumprod(1).info == 'jubba')
assert_(dat.cumsum(1).info == 'jubba')
assert_(dat.diagonal().info == 'jubba')
assert_(dat.flatten().info == 'jubba')
assert_(dat.getfield(np.int32, 0).info == 'jubba')
assert_(dat.imag.info == 'jubba')
assert_(dat.max(1).info == 'jubba')
assert_(dat.mean(1).info == 'jubba')
assert_(dat.min(1).info == 'jubba')
assert_(dat.newbyteorder().info == 'jubba')
assert_(dat.prod(1).info == 'jubba')
assert_(dat.ptp(1).info == 'jubba')
assert_(dat.ravel().info == 'jubba')
assert_(dat.real.info == 'jubba')
assert_(dat.repeat(2).info == 'jubba')
assert_(dat.reshape((2, 4)).info == 'jubba')
assert_(dat.round().info == 'jubba')
assert_(dat.squeeze().info == 'jubba')
assert_(dat.std(1).info == 'jubba')
assert_(dat.sum(1).info == 'jubba')
assert_(dat.swapaxes(0, 1).info == 'jubba')
assert_(dat.take([2, 3, 5]).info == 'jubba')
assert_(dat.transpose().info == 'jubba')
assert_(dat.T.info == 'jubba')
assert_(dat.var(1).info == 'jubba')
assert_(dat.view(TestArray).info == 'jubba')
# These methods do not preserve subclasses
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
buf = np.zeros(40, dtype=np.int8)
a = np.recarray(2, formats="i4,f8,f8", names="id,x,y", buf=buf)
b = a.tolist()
assert_( a[0].tolist() == b[0])
assert_( a[1].tolist() == b[1])
def test_nonscalar_item_method(self):
# Make sure that .item() fails graciously when it should
a = np.arange(5)
assert_raises(ValueError, a.item)
def test_char_array_creation(self):
a = np.array('123', dtype='c')
b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])
x = np.array([(b'a', u'b')], dtype=t)
assert_equal(str(x), "[(b'a', 'b')]", err_msg=msg)
def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
have = np.sign(C)
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
b = list(a.dtype.names)
b[0] = "notfoo"
a.dtype.names = b
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
b2 = np.object_(3.0)
c = np.object_([4, 5])
d = np.object_([None, {}, []])
assert_(a is None)
assert_(type(b) is int)
assert_(type(b2) is float)
assert_(type(c) is np.ndarray)
assert_(c.dtype == object)
assert_(d.dtype == object)
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
assert_raises(TypeError, x.resize, (2, 2), order='C')
def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
assert_raises(ValueError, lambda: np.array([1], ndmin=33))
def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
arr = np.array(data, dtype=dtype1)
assert_(arr[0][0] == 'john')
assert_(arr[0][1] == 4)
def test_void_scalar_constructor(self):
#Issue #1550
#Create test string data, construct void scalar from data and assert
#that void scalar contains original data.
test_string = np.array("test")
test_string_void_scalar = np.core.multiarray.scalar(
np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes())
assert_(test_string_void_scalar.view(test_string.dtype) == test_string)
#Create record scalar, construct from data and assert that
#reconstructed scalar is correct.
test_record = np.ones((), "i,i")
test_record_void_scalar = np.core.multiarray.scalar(
test_record.dtype, test_record.tobytes())
assert_(test_record_void_scalar == test_record)
# Test pickle and unpickle of void and record scalars
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(
pickle.dumps(test_string, protocol=proto)) == test_string)
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
@_no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
for n in [0, 1, 2]:
for k in range(3):
# Try to ensure that x->data contains non-zero floats
x = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
x.resize((m, 0), refcheck=False)
else:
x.resize((m, 0))
y = np.array([123456789e199], dtype=np.float64)
if IS_PYPY:
y.resize((0, n), refcheck=False)
else:
y.resize((0, n))
# `dot` should just return zero (m, n) matrix
z = np.dot(x, y)
assert_(np.all(z == 0))
assert_(z.shape == (m, n))
def test_zeros(self):
# Regression test for #1061.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed dimension exceeded'):
np.empty(sz)
def test_huge_arange(self):
# Regression test for #1062.
# Set a size which cannot fit into a 64 bits signed integer
sz = 2 ** 64
with assert_raises_regex(ValueError,
'Maximum allowed size exceeded'):
np.arange(sz)
assert_(np.size == sz)
def test_fromiter_bytes(self):
# Ticket #1058
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_array_from_sequence_scalar_array(self):
# Ticket #1078: segfaults when creating an array with a sequence of
# 0d arrays.
a = np.array((np.ones(2), np.array(2)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], np.ones(2))
assert_equal(a[1], np.array(2))
a = np.array(((1,), np.array(1)), dtype=object)
assert_equal(a.shape, (2,))
assert_equal(a.dtype, np.dtype(object))
assert_equal(a[0], (1,))
assert_equal(a[1], np.array(1))
def test_array_from_sequence_scalar_array2(self):
# Ticket #1081: weird array with strange input...
t = np.array([np.array([]), np.array(0, object)], dtype=object)
assert_equal(t.shape, (2,))
assert_equal(t.dtype, np.dtype(object))
def test_array_too_big(self):
# Ticket #1080.
assert_raises(ValueError, np.zeros, [975]*7, np.int8)
assert_raises(ValueError, np.zeros, [26244]*5, np.int8)
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(TypeError, dt.__getitem__, 0.0)
def test_lexsort_buffer_length(self):
# Ticket #1217, don't segfault.
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)
b = np.array(a, dtype=(np.str_, 8))
assert_equal(a, b)
c = np.array(a, dtype=(np.str_, 5))
assert_equal(c, np.array(['abcde', 'ijklm']))
d = np.array(a, dtype=(np.str_, 12))
assert_equal(a, d)
e = np.empty((2, ), dtype=(np.str_, 8))
e[:] = a[:]
assert_equal(a, e)
def test_unicode_to_string_cast(self):
# Ticket #1240.
a = np.array([[u'abc', u'\u03a3'],
[u'asdf', u'erw']],
dtype='U')
assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_unicode_to_string_cast_error(self):
# gh-15790
a = np.array([u'\x80'] * 129, dtype='U3')
assert_raises(UnicodeEncodeError, np.array, a, 'S')
b = a.reshape(3, 43)[:-1, :-1]
assert_raises(UnicodeEncodeError, np.array, b, 'S')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
assert_(a.itemsize == 16)
a = np.array([u'123', '1234'])
assert_(a.itemsize == 16)
a = np.array(['1234', u'123', '12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'12345'])
assert_(a.itemsize == 20)
a = np.array([u'123', '1234', u'1234'])
assert_(a.itemsize == 16)
def test_misaligned_objects_segfault(self):
# Ticket #1198 and #1267
a1 = np.zeros((10,), dtype='O,c')
a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')
a1['f0'] = a2
repr(a1)
np.argmax(a1['f0'])
a1['f0'][1] = "FOO"
a1['f0'] = "FOO"
np.array(a1['f0'], dtype='S')
np.nonzero(a1['f0'])
a1.sort()
copy.deepcopy(a1)
def test_misaligned_scalars_segfault(self):
# Ticket #1267
s1 = np.array(('a', 'Foo'), dtype='c,O')
s2 = np.array(('b', 'Bar'), dtype='c,O')
s1['f1'] = s2['f1']
s1['f1'] = 'Baz'
def test_misaligned_dot_product_objects(self):
# Ticket #1267
# This didn't require a fix, but it's worth testing anyway, because
# it may fail if .dot stops enforcing the arrays to be BEHAVED
a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')
b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')
np.dot(a['f0'], b['f0'])
def test_byteswap_complex_scalar(self):
# Ticket #1259 and gh-441
for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:
z = np.array([2.2-1.1j], dtype)
x = z[0] # always native-endian
y = x.byteswap()
if x.dtype.byteorder == z.dtype.byteorder:
# little-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))
else:
# big-endian machine
assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))
# double check real and imaginary parts:
assert_equal(x.real, y.real.byteswap())
assert_equal(x.imag, y.imag.byteswap())
def test_structured_arrays_with_objects1(self):
# Ticket #1299
stra = 'aaaa'
strb = 'bbbb'
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(x[0, 1] == x[0, 0])
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_structured_arrays_with_objects2(self):
# Ticket #1299 second test
stra = 'aaaa'
strb = 'bbbb'
numb = sys.getrefcount(strb)
numa = sys.getrefcount(stra)
x = np.array([[(0, stra), (1, strb)]], 'i8,O')
x[x.nonzero()] = x.ravel()[:1]
assert_(sys.getrefcount(strb) == numb)
assert_(sys.getrefcount(stra) == numa + 2)
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
def test_type(t):
min = np.array([np.iinfo(t).min])
min //= -1
with np.errstate(divide="ignore"):
for t in (np.int8, np.int16, np.int32, np.int64, int):
test_type(t)
def test_buffer_hashlib(self):
from hashlib import sha256
x = np.array([1, 2, 3], dtype=np.dtype('<i4'))
assert_equal(sha256(x).hexdigest(), '4636993d3e1da4e9d6b8f87b79e8f7c6d018580d52661950eabc3845c5897a4d')
def test_0d_string_scalar(self):
# Bug #1436; the following should succeed
np.asarray('x', '>c')
def test_log1p_compiler_shenanigans(self):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
with assert_warns(DeprecationWarning):
np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.sctypeDict.values()
if (issubclass(x, np.number)
and not issubclass(x, np.timedelta64))]
a = np.array([], np.bool_) # not x[0] because it is unordered
failures = []
for x in dtypes:
b = a.astype(x)
for y in dtypes:
c = a.astype(y)
try:
np.dot(b, c)
except TypeError:
failures.append((x, y))
if failures:
raise AssertionError("Failures: %r" % failures)
def test_ticket_1538(self):
x = np.finfo(np.float32)
for name in 'eps epsneg max min resolution tiny'.split():
assert_equal(type(getattr(x, name)), np.float32,
err_msg=name)
def test_ticket_1434(self):
# Check that the out= argument in var and std has an effect
data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))
out = np.zeros((3,))
ret = data.var(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.var(axis=1))
ret = data.std(axis=1, out=out)
assert_(ret is out)
assert_array_equal(ret, data.std(axis=1))
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
class Subclass(np.ndarray):
def __new__(cls, i):
return np.ones((i,)).view(cls)
x = Subclass(5)
x[(0,)] = 2 # shouldn't raise an exception
assert_equal(x[0], 2)
def test_ufunc_no_unnecessary_views(self):
# ticket #1548
class Subclass(np.ndarray):
pass
x = np.array([1, 2, 3]).view(Subclass)
y = np.add(x, x, x)
assert_equal(id(x), id(y))
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_take_refcount(self):
# ticket #939
a = np.arange(16, dtype=float)
a.shape = (4, 4)
lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
lut.take(a, axis=0, mode='clip', out=rgba)
except TypeError:
pass
c2 = sys.getrefcount(rgba)
assert_equal(c1, c2)
def test_fromfile_tofile_seeks(self):
# On Python 3, tofile/fromfile used to get (#1610) the Python
# file handle out of sync
f0 = tempfile.NamedTemporaryFile()
f = f0.file
f.write(np.arange(255, dtype='u1').tobytes())
f.seek(20)
ret = np.fromfile(f, count=4, dtype='u1')
assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))
assert_equal(f.tell(), 24)
f.seek(40)
np.array([1, 2, 3], dtype='u1').tofile(f)
assert_equal(f.tell(), 43)
f.seek(40)
data = f.read(3)
assert_equal(data, b"\x01\x02\x03")
f.seek(80)
f.read(4)
data = np.fromfile(f, dtype='u1', count=4)
assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))
f.close()
def test_complex_scalar_warning(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_warns(np.ComplexWarning, float, x)
with suppress_warnings() as sup:
sup.filter(np.ComplexWarning)
assert_equal(float(x), float(x.real))
def test_complex_scalar_complex_cast(self):
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = tp(1+2j)
assert_equal(complex(x), 1+2j)
def test_complex_boolean_cast(self):
# Ticket #2218
for tp in [np.csingle, np.cdouble, np.clongdouble]:
x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)
assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))
assert_(np.any(x))
assert_(np.all(x[1:]))
def test_uint_int_conversion(self):
x = 2**64 - 1
assert_equal(int(np.uint64(x)), x)
def test_duplicate_field_names_assign(self):
ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')
ra.dtype.names = ('f1', 'f2')
repr(ra) # should not cause a segmentation fault
assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))
def test_eq_string_and_object_array(self):
# From e-mail thread "__eq__ with str and object" (Keith Goodman)
a1 = np.array(['a', 'b'], dtype=object)
a2 = np.array(['a', 'c'])
assert_array_equal(a1 == a2, [True, False])
assert_array_equal(a2 == a1, [True, False])
def test_nonzero_byteswap(self):
a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)
a.dtype = np.float32
assert_equal(a.nonzero()[0], [1])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap
def test_find_common_type_boolean(self):
# Ticket #1695
assert_(np.find_common_type([], ['?', '?']) == '?')
def test_empty_mul(self):
a = np.array([1.])
a[1:1] *= 2
assert_equal(a, [1.])
def test_array_side_effect(self):
# The second use of itemsize was throwing an exception because in
# ctors.c, discover_itemsize was calling PyObject_Length without
# checking the return code. This failed to get the length of the
# number 2, and the exception hung around until something checked
# PyErr_Occurred() and returned an error.
assert_equal(np.dtype('S10').itemsize, 10)
np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)
assert_equal(np.dtype('S10').itemsize, 10)
def test_any_float(self):
# all and any for floats
a = np.array([0.1, 0.9])
assert_(np.any(a))
assert_(np.all(a))
def test_large_float_sum(self):
a = np.arange(10000, dtype='f')
assert_equal(a.sum(dtype='d'), a.astype('d').sum())
def test_ufunc_casting_out(self):
a = np.array(1.0, dtype=np.float32)
b = np.array(1.0, dtype=np.float64)
c = np.array(1.0, dtype=np.float32)
np.add(a, b, out=c)
assert_equal(c, 2.0)
def test_array_scalar_contiguous(self):
# Array scalars are both C and Fortran contiguous
assert_(np.array(1.0).flags.c_contiguous)
assert_(np.array(1.0).flags.f_contiguous)
assert_(np.array(np.float32(1.0)).flags.c_contiguous)
assert_(np.array(np.float32(1.0)).flags.f_contiguous)
def test_squeeze_contiguous(self):
# Similar to GitHub issue #387
a = np.zeros((1, 2)).squeeze()
b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.f_contiguous)
def test_squeeze_axis_handling(self):
# Issue #10779
# Ensure proper handling of objects
# that don't support axis specification
# when squeezing
class OldSqueeze(np.ndarray):
def __new__(cls,
input_array):
obj = np.asarray(input_array).view(cls)
return obj
# it is perfectly reasonable that prior
# to numpy version 1.7.0 a subclass of ndarray
# might have been created that did not expect
# squeeze to have an axis argument
# NOTE: this example is somewhat artificial;
# it is designed to simulate an old API
# expectation to guard against regression
def squeeze(self):
return super().squeeze()
oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))
# if no axis argument is specified the old API
# expectation should give the correct result
assert_equal(np.squeeze(oldsqueeze),
np.array([1,2,3]))
# likewise, axis=None should work perfectly well
# with the old API expectation
assert_equal(np.squeeze(oldsqueeze, axis=None),
np.array([1,2,3]))
# however, specification of any particular axis
# should raise a TypeError in the context of the
# old API specification, even when using a valid
# axis specification like 1 for this array
with assert_raises(TypeError):
# this would silently succeed for array
# subclasses / objects that did not support
# squeeze axis argument handling before fixing
# Issue #10779
np.squeeze(oldsqueeze, axis=1)
# check for the same behavior when using an invalid
# axis specification -- in this case axis=0 does not
# have size 1, but the priority should be to raise
# a TypeError for the axis argument and NOT a
# ValueError for squeezing a non-empty dimension
with assert_raises(TypeError):
np.squeeze(oldsqueeze, axis=0)
# the new API knows how to handle the axis
# argument and will return a ValueError if
# attempting to squeeze an axis that is not
# of length 1
with assert_raises(ValueError):
np.squeeze(np.array([[1],[2],[3]]), axis=0)
def test_reduce_contiguous(self):
# GitHub issue #387
a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))
b = np.add.reduce(np.zeros((2, 1, 2)), 1)
assert_(a.flags.c_contiguous)
assert_(a.flags.f_contiguous)
assert_(b.flags.c_contiguous)
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_object_array_self_reference(self):
# Object arrays with references to themselves can cause problems
a = np.array(0, dtype=object)
a[()] = a
assert_raises(RecursionError, int, a)
assert_raises(RecursionError, float, a)
a[()] = None
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
def test_object_array_circular_reference(self):
# Test the same for a circular reference.
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
b[()] = a
assert_raises(RecursionError, int, a)
# NumPy has no tp_traverse currently, so circular references
# cannot be detected. So resolve it:
a[()] = None
# This was causing a to become like the above
a = np.array(0, dtype=object)
a[...] += 1
assert_equal(a, 1)
def test_object_array_nested(self):
# but is fine with a reference to a different array
a = np.array(0, dtype=object)
b = np.array(0, dtype=object)
a[()] = b
assert_equal(int(a), int(0))
assert_equal(float(a), float(0))
def test_object_array_self_copy(self):
# An object array being copied into itself DECREF'ed before INCREF'ing
# causing segmentation faults (gh-3787)
a = np.array(object(), dtype=object)
np.copyto(a, a)
if HAS_REFCOUNT:
assert_(sys.getrefcount(a[()]) == 2)
a[()].__class__ # will segfault if object was deleted
def test_zerosize_accumulate(self):
"Ticket #1733"
x = np.array([[42, 0]], dtype=np.uint32)
assert_equal(np.add.accumulate(x[:-1, 0]), [])
def test_objectarray_setfield(self):
# Setfield should not overwrite Object fields with non-Object data
x = np.array([1, 2, 3], dtype=object)
assert_raises(TypeError, x.setfield, 4, np.int32, 0)
def test_setting_rank0_string(self):
"Ticket #1736"
s1 = b"hello1"
s2 = b"hello2"
a = np.zeros((), dtype="S10")
a[()] = s1
assert_equal(a, np.array(s1))
a[()] = np.array(s2)
assert_equal(a, np.array(s2))
a = np.zeros((), dtype='f4')
a[()] = 3
assert_equal(a, np.array(3))
a[()] = np.array(4)
assert_equal(a, np.array(4))
def test_string_astype(self):
"Ticket #1748"
s1 = b'black'
s2 = b'white'
s3 = b'other'
a = np.array([[s1], [s2], [s3]])
assert_equal(a.dtype, np.dtype('S5'))
b = a.astype(np.dtype('S0'))
assert_equal(b.dtype, np.dtype('S5'))
def test_ticket_1756(self):
# Ticket #1756
s = b'0123456789abcdef'
a = np.array([s]*5)
for i in range(1, 17):
a1 = np.array(a, "|S%d" % i)
a2 = np.array([s[:i]]*5)
assert_equal(a1, a2)
def test_fields_strides(self):
"gh-2355"
r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')
assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])
assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])
assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])
assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)
def test_alignment_update(self):
# Check that alignment flag is updated on stride setting
a = np.arange(10)
assert_(a.flags.aligned)
a.strides = 3
assert_(not a.flags.aligned)
def test_ticket_1770(self):
"Should not segfault on python 3k"
import numpy as np
try:
a = np.zeros((1,), dtype=[('f1', 'f')])
a['f1'] = 1
a['f2'] = 1
except ValueError:
pass
except Exception:
raise AssertionError
def test_ticket_1608(self):
"x.flat shouldn't modify data"
x = np.array([[1, 2], [3, 4]]).T
np.array(x.flat)
assert_equal(x, [[1, 3], [2, 4]])
def test_pickle_string_overwrite(self):
import re
data = np.array([1], dtype='b')
blob = pickle.dumps(data, protocol=1)
data = pickle.loads(blob)
# Check that loads does not clobber interned strings
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
data[0] = 0xbb
s = re.sub("a(.)", "\x01\\1", "a_")
assert_equal(s[0], "\x01")
def test_pickle_bytes_overwrite(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
data = np.array([1], dtype='b')
data = pickle.loads(pickle.dumps(data, protocol=proto))
data[0] = 0xdd
bytestring = "\x01 ".encode('ascii')
assert_equal(bytestring[0:1], '\x01'.encode('ascii'))
def test_pickle_py2_array_latin1_hack(self):
# Check that unpickling hacks in Py3 that support
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(numpy.array([129], dtype='b'))
data = (b"cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\n"
b"tp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'i1'\np8\n"
b"I0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nNNNI-1\nI-1\nI0\ntp12\nbI00\nS'\\x81'\n"
b"p13\ntp14\nb.")
# This should work:
result = pickle.loads(data, encoding='latin1')
assert_array_equal(result, np.array([129], dtype='b'))
# Should not segfault:
assert_raises(Exception, pickle.loads, data, encoding='koi8-r')
def test_pickle_py2_scalar_latin1_hack(self):
# Check that scalar unpickling hack in Py3 that supports
# encoding='latin1' work correctly.
# Python2 output for pickle.dumps(...)
datas = [
# (original, python2_pickle, koi8r_validity)
(np.unicode_('\u6bd2'),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n"
b"(S'U1'\np2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI4\nI4\nI0\n"
b"tp6\nbS'\\xd2k\\x00\\x00'\np7\ntp8\nRp9\n."),
'invalid'),
(np.float64(9e123),
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'f8'\n"
b"p2\nI0\nI1\ntp3\nRp4\n(I3\nS'<'\np5\nNNNI-1\nI-1\nI0\ntp6\n"
b"bS'O\\x81\\xb7Z\\xaa:\\xabY'\np7\ntp8\nRp9\n."),
'invalid'),
(np.bytes_(b'\x9c'), # different 8-bit code point in KOI8-R vs latin1
(b"cnumpy.core.multiarray\nscalar\np0\n(cnumpy\ndtype\np1\n(S'S1'\np2\n"
b"I0\nI1\ntp3\nRp4\n(I3\nS'|'\np5\nNNNI1\nI1\nI0\ntp6\nbS'\\x9c'\np7\n"
b"tp8\nRp9\n."),
'different'),
]
for original, data, koi8r_validity in datas:
result = pickle.loads(data, encoding='latin1')
assert_equal(result, original)
# Decoding under non-latin1 encoding (e.g.) KOI8-R can
# produce bad results, but should not segfault.
if koi8r_validity == 'different':
# Unicode code points happen to lie within latin1,
# but are different in koi8-r, resulting to silent
# bogus results
result = pickle.loads(data, encoding='koi8-r')
assert_(result != original)
elif koi8r_validity == 'invalid':
# Unicode code points outside latin1, so results
# to an encoding exception
assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')
else:
raise ValueError(koi8r_validity)
def test_structured_type_to_object(self):
a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')
a_obj = np.empty((2,), dtype=object)
a_obj[0] = (0, 1)
a_obj[1] = (3, 2)
# astype records -> object
assert_equal(a_rec.astype(object), a_obj)
# '=' records -> object
b = np.empty_like(a_obj)
b[...] = a_rec
assert_equal(b, a_obj)
# '=' object -> records
b = np.empty_like(a_rec)
b[...] = a_obj
assert_equal(b, a_rec)
def test_assign_obj_listoflists(self):
# Ticket # 1870
# The inner list should get assigned to the object elements
a = np.zeros(4, dtype=object)
b = a.copy()
a[0] = [1]
a[1] = [2]
a[2] = [3]
a[3] = [4]
b[...] = [[1], [2], [3], [4]]
assert_equal(a, b)
# The first dimension should get broadcast
a = np.zeros((2, 2), dtype=object)
a[...] = [[1, 2]]
assert_equal(a, [[1, 2], [1, 2]])
@pytest.mark.slow_pypy
def test_memoryleak(self):
# Ticket #1917 - ensure that array data doesn't leak
for i in range(1000):
# 100MB times 1000 would give 100GB of memory usage if it leaks
a = np.empty((100000000,), dtype='i1')
del a
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_ufunc_reduce_memoryleak(self):
a = np.arange(6)
acnt = sys.getrefcount(a)
np.add.reduce(a)
assert_equal(sys.getrefcount(a), acnt)
def test_search_sorted_invalid_arguments(self):
# Ticket #2021, should not segfault.
x = np.arange(0, 4, dtype='datetime64[D]')
assert_raises(TypeError, x.searchsorted, 1)
def test_string_truncation(self):
# Ticket #1990 - Data can be truncated in creation of an array from a
# mixed sequence of numeric values and strings (gh-2583)
for val in [True, 1234, 123.4, complex(1, 234)]:
for tostr, dtype in [(asunicode, "U"), (asbytes, "S")]:
b = np.array([val, tostr('xx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
# test also with longer strings
b = np.array([val, tostr('xxxxxxxxxx')], dtype=dtype)
assert_equal(tostr(b[0]), tostr(val))
b = np.array([tostr('xxxxxxxxxx'), val], dtype=dtype)
assert_equal(tostr(b[1]), tostr(val))
def test_string_truncation_ucs2(self):
# Ticket #2081. Python compiled with two byte unicode
# can lead to truncation if itemsize is not properly
# adjusted for NumPy's four byte unicode.
a = np.array(['abcd'])
assert_equal(a.dtype.itemsize, 16)
def test_unique_stable(self):
# Ticket #2063 must always choose stable sort for argsort to
# get consistent results
v = np.array(([0]*5 + [1]*6 + [2]*6)*4)
res = np.unique(v, return_index=True)
tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))
assert_equal(res, tgt)
def test_unicode_alloc_dealloc_match(self):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
a = np.array(['abc'], dtype=np.unicode_)[0]
del a
def test_refcount_error_in_clip(self):
# Ticket #1588
a = np.zeros((2,), dtype='>i2').clip(min=0)
x = a + a
# This used to segfault:
y = str(x)
# Check the final string:
assert_(y == "[0 0]")
def test_searchsorted_wrong_dtype(self):
# Ticket #2189, it used to segfault, so we check that it raises the
# proper exception.
a = np.array([('a', 1)], dtype='S1, int')
assert_raises(TypeError, np.searchsorted, a, 1.2)
# Ticket #2066, similar problem:
dtype = np.format_parser(['i4', 'i4'], [], [])
a = np.recarray((2,), dtype)
a[...] = [(1, 2), (3, 4)]
assert_raises(TypeError, np.searchsorted, a, 1)
def test_complex64_alignment(self):
# Issue gh-2668 (trac 2076), segfault on sparc due to misalignment
dtt = np.complex64
arr = np.arange(10, dtype=dtt)
# 2D array
arr2 = np.reshape(arr, (2, 5))
# Fortran write followed by (C or F) read caused bus error
data_str = arr2.tobytes('F')
data_back = np.ndarray(arr2.shape,
arr2.dtype,
buffer=data_str,
order='F')
assert_array_equal(arr2, data_back)
def test_structured_count_nonzero(self):
arr = np.array([0, 1]).astype('i4, (2)i4')[:1]
count = np.count_nonzero(arr)
assert_equal(count, 0)
def test_copymodule_preserves_f_contiguity(self):
a = np.empty((2, 2), order='F')
b = copy.copy(a)
c = copy.deepcopy(a)
assert_(b.flags.fortran)
assert_(b.flags.f_contiguous)
assert_(c.flags.fortran)
assert_(c.flags.f_contiguous)
def test_fortran_order_buffer(self):
import numpy as np
a = np.array([['Hello', 'Foob']], dtype='U5', order='F')
arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)
arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],
[u'F', u'o', u'o', u'b', u'']]])
assert_array_equal(arr, arr2)
def test_assign_from_sequence_error(self):
# Ticket #4024.
arr = np.array([1, 2, 3])
assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])
arr.__setitem__(slice(None), [9])
assert_equal(arr, [9, 9, 9])
def test_format_on_flex_array_element(self):
# Ticket #4369.
dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])
arr = np.array([('2000-01-01', 1)], dt)
formatted = '{0}'.format(arr[0])
assert_equal(formatted, str(arr[0]))
def test_deepcopy_on_0d_array(self):
# Ticket #3311.
arr = np.array(3)
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
assert_(arr is not arr_cp)
assert_(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
a = {'a': 1}
b = {'b': 2}
arr = np.array([[a, b], [a, b]], order='F')
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
assert_(arr is not arr_cp)
# Ensure that we have actually copied the item.
assert_(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
assert_(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
assert_(arr_cp[0, 1] is arr_cp[1, 1])
def test_deepcopy_empty_object_array(self):
# Ticket #8536.
# Deepcopy should succeed
a = np.array([], dtype=object)
b = copy.deepcopy(a)
assert_(a.shape == b.shape)
def test_bool_subscript_crash(self):
# gh-4494
c = np.rec.array([(1, 2, 3), (4, 5, 6)])
masked = c[np.array([True, False])]
base = masked.base
del masked, c
base.dtype
def test_richcompare_crash(self):
# gh-4613
import operator as op
# dummy class where __array__ throws exception
class Foo:
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
raise Exception()
rhs = Foo()
lhs = np.array(1)
for f in [op.lt, op.le, op.gt, op.ge]:
assert_raises(TypeError, f, lhs, rhs)
assert_(not op.eq(lhs, rhs))
assert_(op.ne(lhs, rhs))
def test_richcompare_scalar_and_subclass(self):
# gh-4709
class Foo(np.ndarray):
def __eq__(self, other):
return "OK"
x = np.array([1, 2, 3]).view(Foo)
assert_equal(10 == x, "OK")
assert_equal(np.int32(10) == x, "OK")
assert_equal(np.array([10]) == x, "OK")
def test_pickle_empty_string(self):
# gh-3926
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
test_string = np.string_('')
assert_equal(pickle.loads(
pickle.dumps(test_string, protocol=proto)), test_string)
def test_frompyfunc_many_args(self):
# gh-5672
def passer(*args):
pass
assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
def test_repeat_broadcasting(self):
# gh-5743
a = np.arange(60).reshape(3, 4, 5)
for axis in chain(range(-a.ndim, a.ndim), [None]):
assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
def test_frompyfunc_nout_0(self):
# gh-2014
def f(x):
x[0], x[-1] = x[-1], x[0]
uf = np.frompyfunc(f, 1, 0)
a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object)
assert_equal(uf(a), ())
expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object)
assert_array_equal(a, expected)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_leak_in_structured_dtype_comparison(self):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
for i in range(100):
a == a
assert_(sys.getrefcount(a) < 10)
# The case in the bug report.
before = sys.getrefcount(a)
u, v = a[0], a[1]
u == v
del u, v
gc.collect()
after = sys.getrefcount(a)
assert_equal(before, after)
def test_empty_percentile(self):
# gh-6530 / gh-6553
assert_array_equal(np.percentile(np.arange(10), []), np.array([]))
def test_void_compare_segfault(self):
# gh-6922. The following should not segfault
a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])
a.sort()
def test_reshape_size_overflow(self):
# gh-7455
a = np.ones(20)[::2]
if np.dtype(np.intp).itemsize == 8:
# 64 bit. The following are the prime factors of 2**63 + 5,
# plus a leading 2, so when multiplied together as int64,
# the result overflows to a total size of 10.
new_shape = (2, 13, 419, 691, 823, 2977518503)
else:
# 32 bit. The following are the prime factors of 2**31 + 5,
# plus a leading 2, so when multiplied together as int32,
# the result overflows to a total size of 10.
new_shape = (2, 7, 7, 43826197)
assert_raises(ValueError, a.reshape, new_shape)
def test_invalid_structured_dtypes(self):
# gh-2865
# mapping python objects to other dtypes
assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))
assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))
assert_raises(ValueError, np.dtype,
('i8', [('name', [('name', 'O')])]))
assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))
assert_raises(ValueError, np.dtype, ('i8', 'O'))
# wrong number/type of tuple elements in dict
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 0, 'title', 'oops')}))
assert_raises(ValueError, np.dtype,
('i', {'name': ('i', 'wrongtype', 'title')}))
# disallowed as of 1.13
assert_raises(ValueError, np.dtype,
([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))
# allowed as a special case due to existing use, see gh-2798
a = np.ones(1, dtype=('O', [('name', 'O')]))
assert_equal(a[0], 1)
# In particular, the above union dtype (and union dtypes in general)
# should mainly behave like the main (object) dtype:
assert a[0] is a.item()
assert type(a[0]) is int
def test_correct_hash_dict(self):
# gh-8887 - __hash__ would be None despite tp_hash being set
all_types = set(np.sctypeDict.values()) - {np.void}
for t in all_types:
val = t()
try:
hash(val)
except TypeError as e:
assert_equal(t.__hash__, None)
else:
assert_(t.__hash__ != None)
def test_scalar_copy(self):
scalar_types = set(np.sctypeDict.values())
values = {
np.void: b"a",
np.bytes_: b"a",
np.unicode_: "a",
np.datetime64: "2017-08-25",
}
for sctype in scalar_types:
item = sctype(values.get(sctype, 1))
item2 = copy.copy(item)
assert_equal(item, item2)
def test_void_item_memview(self):
va = np.zeros(10, 'V4')
x = va[:1].item()
va[0] = b'\xff\xff\xff\xff'
del va
assert_equal(x, b'\x00\x00\x00\x00')
def test_void_getitem(self):
# Test fix for gh-11668.
assert_(np.array([b'a'], 'V1').astype('O') == b'a')
assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')
assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')
assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')
def test_structarray_title(self):
# The following used to segfault on pypy, due to NPY_TITLE_KEY
# not working properly and resulting to double-decref of the
# structured array field items:
# See: https://bitbucket.org/pypy/pypy/issues/2789
for j in range(5):
structure = np.array([1], dtype=[(('x', 'X'), np.object_)])
structure[0]['x'] = np.array([2])
gc.collect()
def test_dtype_scalar_squeeze(self):
# gh-11384
values = {
'S': b"a",
'M': "2018-06-20",
}
for ch in np.typecodes['All']:
if ch in 'O':
continue
sctype = np.dtype(ch).type
scvalue = sctype(values.get(ch, 3))
for axis in [None, ()]:
squeezed = scvalue.squeeze(axis=axis)
assert_equal(squeezed, scvalue)
assert_equal(type(squeezed), type(scvalue))
def test_field_access_by_title(self):
# gh-11507
s = 'Some long field name'
if HAS_REFCOUNT:
base = sys.getrefcount(s)
t = np.dtype([((s, 'f1'), np.float64)])
data = np.zeros(10, t)
for i in range(10):
str(data[['f1']])
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
@pytest.mark.parametrize('val', [
# arrays and scalars
np.ones((10, 10), dtype='int32'),
np.uint64(10),
])
@pytest.mark.parametrize('protocol',
range(2, pickle.HIGHEST_PROTOCOL + 1)
)
def test_pickle_module(self, protocol, val):
# gh-12837
s = pickle.dumps(val, protocol)
assert b'_multiarray_umath' not in s
if protocol == 5 and len(val.shape) > 0:
# unpickling ndarray goes through _frombuffer for protocol 5
assert b'numpy.core.numeric' in s
else:
assert b'numpy.core.multiarray' in s
def test_object_casting_errors(self):
# gh-11993 update to ValueError (see gh-16909), since strings can in
# principle be converted to complex, but this string cannot.
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
assert_raises(ValueError, arr.astype, 'c8')
def test_eff1d_casting(self):
# gh-12711
x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
# The use of safe casting means, that 1<<20 is cast unsafely, an
# error may be better, but currently there is no mechanism for it.
res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))
assert_equal(res, [0, 1, 2, 3, -7, 0])
def test_pickle_datetime64_array(self):
# gh-12745 (would fail with pickle5 installed)
d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
arr = np.array([d])
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dumped = pickle.dumps(arr, protocol=proto)
assert_equal(pickle.loads(dumped), arr)
def test_bad_array_interface(self):
class T:
__array_interface__ = {}
with assert_raises(ValueError):
np.array([T()])
def test_2d__array__shape(self):
class T:
def __array__(self):
return np.ndarray(shape=(0,0))
# Make sure __array__ is used instead of Sequence methods.
def __iter__(self):
return iter([])
def __getitem__(self, idx):
raise AssertionError("__getitem__ was called")
def __len__(self):
return 0
t = T()
# gh-13659, would raise in broadcasting [x=t for x in result]
arr = np.array([t])
assert arr.shape == (1, 0, 0)
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
reason='overflows on windows, fixed in bpo-16865')
def test_to_ctypes(self):
#gh-14214
arr = np.zeros((2 ** 31 + 1,), 'b')
assert arr.size * arr.itemsize > 2 ** 31
c_arr = np.ctypeslib.as_ctypes(arr)
assert_equal(c_arr._length_, arr.size)
def test_complex_conversion_error(self):
# gh-17068
with pytest.raises(TypeError, match=r"Unable to convert dtype.*"):
complex(np.array("now", np.datetime64))
def test__array_interface__descr(self):
# gh-17068
dt = np.dtype(dict(names=['a', 'b'],
offsets=[0, 0],
formats=[np.int64, np.int64]))
descr = np.array((1, 1), dtype=dt).__array_interface__['descr']
assert descr == [('', '|V8')] # instead of [(b'', '|V8')]
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@requires_memory(free_bytes=9e9)
def test_dot_big_stride(self):
# gh-17111
# blas stride = stride//itemsize > int32 max
int32_max = np.iinfo(np.int32).max
n = int32_max + 3
a = np.empty([n], dtype=np.float32)
b = a[::n-1]
b[...] = 1
assert b.strides[0] > int32_max * b.dtype.itemsize
assert np.dot(b, b) == 2.0
def test_frompyfunc_name(self):
# name conversion was failing for python 3 strings
# resulting in the default '?' name. Also test utf-8
# encoding using non-ascii name.
def cassé(x):
return x
f = np.frompyfunc(cassé, 1, 1)
assert str(f) == "<ufunc 'cassé (vectorized)'>"
@pytest.mark.parametrize("operation", [
'add', 'subtract', 'multiply', 'floor_divide',
'conjugate', 'fmod', 'square', 'reciprocal',
'power', 'absolute', 'negative', 'positive',
'greater', 'greater_equal', 'less',
'less_equal', 'equal', 'not_equal', 'logical_and',
'logical_not', 'logical_or', 'bitwise_and', 'bitwise_or',
'bitwise_xor', 'invert', 'left_shift', 'right_shift',
'gcd', 'lcm'
]
)
@pytest.mark.parametrize("order", [
('b->', 'B->'),
('h->', 'H->'),
('i->', 'I->'),
('l->', 'L->'),
('q->', 'Q->'),
]
)
def test_ufunc_order(self, operation, order):
# gh-18075
# Ensure signed types before unsigned
def get_idx(string, str_lst):
for i, s in enumerate(str_lst):
if string in s:
return i
raise ValueError(f"{string} not in list")
types = getattr(np, operation).types
assert get_idx(order[0], types) < get_idx(order[1], types), (
f"Unexpected types order of ufunc in {operation}"
f"for {order}. Possible fix: Use signed before unsigned"
"in generate_umath.py")
|
import json
import os
import platform
import sys
import unittest
from telemetry.internal.util import find_dependencies
from telemetry.internal.util import path
_TELEMETRY_DEPS_PATH = os.path.join(
path.GetTelemetryDir(), 'telemetry', 'TELEMETRY_DEPS')
def _GetCurrentTelemetryDependencies():
parser = find_dependencies.FindDependenciesCommand.CreateParser()
find_dependencies.FindDependenciesCommand.AddCommandLineArgs(parser, None)
options, args = parser.parse_args([''])
options.positional_args = args
return find_dependencies.FindDependencies([], options=options)
def _GetRestrictedTelemetryDeps():
with open(_TELEMETRY_DEPS_PATH, 'r') as f:
telemetry_deps = json.load(f)
# Normalize paths in telemetry_deps since TELEMETRY_DEPS file only contain
# the relative path in chromium/src/.
def NormalizePath(p):
p = p.replace('/', os.path.sep)
return os.path.realpath(os.path.join(path.GetChromiumSrcDir(), p))
telemetry_deps['file_deps'] = [
NormalizePath(p) for p in telemetry_deps['file_deps']]
telemetry_deps['directory_deps'] = [
NormalizePath(p) for p in telemetry_deps['directory_deps']]
return telemetry_deps
class TelemetryDependenciesTest(unittest.TestCase):
def testNoNewTelemetryDependencies(self):
telemetry_deps = _GetRestrictedTelemetryDeps()
current_dependencies = _GetCurrentTelemetryDependencies()
extra_dep_paths = []
for dep_path in current_dependencies:
if not (dep_path in telemetry_deps['file_deps'] or
any(path.IsSubpath(dep_path, d)
for d in telemetry_deps['directory_deps'])):
extra_dep_paths.append(dep_path)
# Temporarily ignore failure on Mac because test is failing on Mac 10.8 bot.
# crbug.com/522335
if extra_dep_paths:
if platform.system() != 'Darwin':
self.fail(
'Your patch adds new dependencies to telemetry. Please contact '
'aiolos@,dtu@, or nednguyen@ on how to proceed with this change. '
'Extra dependencies:\n%s' % '\n'.join(extra_dep_paths))
else:
print ('Dependencies check failed on mac platform. Extra deps: %s\n'
' sys.path: %s' % (extra_dep_paths, sys.path))
|
import functools
import gzip
import os
import re
from difflib import SequenceMatcher
from django.conf import settings
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, ValidationError,
)
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils.html import format_html
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _, ungettext
@functools.lru_cache(maxsize=None)
def get_default_password_validators():
return get_password_validators(settings.AUTH_PASSWORD_VALIDATORS)
def get_password_validators(validator_config):
validators = []
for validator in validator_config:
try:
klass = import_string(validator['NAME'])
except ImportError:
msg = "The module in NAME could not be imported: %s. Check your AUTH_PASSWORD_VALIDATORS setting."
raise ImproperlyConfigured(msg % validator['NAME'])
validators.append(klass(**validator.get('OPTIONS', {})))
return validators
def validate_password(password, user=None, password_validators=None):
"""
Validate whether the password meets all validator requirements.
If the password is valid, return ``None``.
If the password is invalid, raise ValidationError with all error messages.
"""
errors = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
try:
validator.validate(password, user)
except ValidationError as error:
errors.append(error)
if errors:
raise ValidationError(errors)
def password_changed(password, user=None, password_validators=None):
"""
Inform all validators that have implemented a password_changed() method
that the password has been changed.
"""
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
password_changed = getattr(validator, 'password_changed', lambda *a: None)
password_changed(password, user)
def password_validators_help_texts(password_validators=None):
"""
Return a list of all help texts of all configured validators.
"""
help_texts = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
help_texts.append(validator.get_help_text())
return help_texts
def _password_validators_help_text_html(password_validators=None):
"""
Return an HTML string with all help texts of all configured validators
in an <ul>.
"""
help_texts = password_validators_help_texts(password_validators)
help_items = [format_html('<li>{}</li>', help_text) for help_text in help_texts]
return '<ul>%s</ul>' % ''.join(help_items) if help_items else ''
password_validators_help_text_html = lazy(_password_validators_help_text_html, str)
class MinimumLengthValidator:
"""
Validate whether the password is of a minimum length.
"""
def __init__(self, min_length=8):
self.min_length = min_length
def validate(self, password, user=None):
if len(password) < self.min_length:
raise ValidationError(
ungettext(
"This password is too short. It must contain at least %(min_length)d character.",
"This password is too short. It must contain at least %(min_length)d characters.",
self.min_length
),
code='password_too_short',
params={'min_length': self.min_length},
)
def get_help_text(self):
return ungettext(
"Your password must contain at least %(min_length)d character.",
"Your password must contain at least %(min_length)d characters.",
self.min_length
) % {'min_length': self.min_length}
class UserAttributeSimilarityValidator:
"""
Validate whether the password is sufficiently different from the user's
attributes.
If no specific attributes are provided, look at a sensible list of
defaults. Attributes that don't exist are ignored. Comparison is made to
not only the full attribute value, but also its components, so that, for
example, a password is validated against either part of an email address,
as well as the full address.
"""
DEFAULT_USER_ATTRIBUTES = ('username', 'first_name', 'last_name', 'email')
def __init__(self, user_attributes=DEFAULT_USER_ATTRIBUTES, max_similarity=0.7):
self.user_attributes = user_attributes
self.max_similarity = max_similarity
def validate(self, password, user=None):
if not user:
return
for attribute_name in self.user_attributes:
value = getattr(user, attribute_name, None)
if not value or not isinstance(value, str):
continue
value_parts = re.split(r'\W+', value) + [value]
for value_part in value_parts:
if SequenceMatcher(a=password.lower(), b=value_part.lower()).quick_ratio() >= self.max_similarity:
try:
verbose_name = force_text(user._meta.get_field(attribute_name).verbose_name)
except FieldDoesNotExist:
verbose_name = attribute_name
raise ValidationError(
_("The password is too similar to the %(verbose_name)s."),
code='password_too_similar',
params={'verbose_name': verbose_name},
)
def get_help_text(self):
return _("Your password can't be too similar to your other personal information.")
class CommonPasswordValidator:
"""
Validate whether the password is a common password.
The password is rejected if it occurs in a provided list, which may be gzipped.
The list Django ships with contains 1000 common passwords, created by Mark Burnett:
https://xato.net/passwords/more-top-worst-passwords/
"""
DEFAULT_PASSWORD_LIST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'common-passwords.txt.gz'
)
def __init__(self, password_list_path=DEFAULT_PASSWORD_LIST_PATH):
try:
with gzip.open(password_list_path) as f:
common_passwords_lines = f.read().decode('utf-8').splitlines()
except IOError:
with open(password_list_path) as f:
common_passwords_lines = f.readlines()
self.passwords = {p.strip() for p in common_passwords_lines}
def validate(self, password, user=None):
if password.lower().strip() in self.passwords:
raise ValidationError(
_("This password is too common."),
code='password_too_common',
)
def get_help_text(self):
return _("Your password can't be a commonly used password.")
class NumericPasswordValidator:
"""
Validate whether the password is alphanumeric.
"""
def validate(self, password, user=None):
if password.isdigit():
raise ValidationError(
_("This password is entirely numeric."),
code='password_entirely_numeric',
)
def get_help_text(self):
return _("Your password can't be entirely numeric.")
|
"""
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import os
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
from django.utils import importlib
from django.utils import six
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
raise ImproperlyConfigured(
"Requested setting %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (name, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
self._configure_logging()
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def _configure_logging(self):
"""
Setup logging from LOGGING_CONFIG and LOGGING settings.
"""
if self.LOGGING_CONFIG:
from django.utils.log import DEFAULT_LOGGING
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
logging_config_func(DEFAULT_LOGGING)
if self.LOGGING:
# Backwards-compatibility shim for #16288 fix
compat_patch_logging_config(self.LOGGING)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
elif name == "ADMIN_MEDIA_PREFIX":
warnings.warn("The ADMIN_MEDIA_PREFIX setting has been removed; "
"use STATIC_URL instead.", DeprecationWarning)
elif name == "ALLOWED_INCLUDE_ROOTS" and isinstance(value, six.string_types):
raise ValueError("The ALLOWED_INCLUDE_ROOTS setting must be set "
"to a tuple, not a string.")
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError as e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and \
isinstance(setting_value, six.string_types):
warnings.warn("The %s setting must be a tuple. Please fix your "
"settings, as auto-correction is now deprecated." % setting,
PendingDeprecationWarning)
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
return super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
return super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
settings = LazySettings()
def compat_patch_logging_config(logging_config):
"""
Backwards-compatibility shim for #16288 fix. Takes initial value of
``LOGGING`` setting and patches it in-place (issuing deprecation warning)
if "mail_admins" logging handler is configured but has no filters.
"""
# Shim only if LOGGING["handlers"]["mail_admins"] exists,
# but has no "filters" key
if "filters" not in logging_config.get(
"handlers", {}).get(
"mail_admins", {"filters": []}):
warnings.warn(
"You have no filters defined on the 'mail_admins' logging "
"handler: adding implicit debug-false-only filter. "
"See http://docs.djangoproject.com/en/dev/releases/1.4/"
"#request-exceptions-are-now-always-logged",
DeprecationWarning)
filter_name = "require_debug_false"
filters = logging_config.setdefault("filters", {})
while filter_name in filters:
filter_name = filter_name + "_"
filters[filter_name] = {
"()": "django.utils.log.RequireDebugFalse",
}
logging_config["handlers"]["mail_admins"]["filters"] = [filter_name]
|
from __future__ import division
from functools import partial
from pathlib import Path
from menpo.base import MenpoMissingDependencyError
try:
import dlib
except ImportError:
raise MenpoMissingDependencyError('dlib')
from menpodetect.detect import detect
from menpodetect.compatibility import STRING_TYPES
from .conversion import rect_to_pointgraph
class _dlib_detect(object):
r"""
A utility callable that allows the caching of a dlib detector.
This callable is important for presenting the correct parameters to the
user. It also marshalls the return type of the detector back to
`menpo.shape.PointDirectedGraph`.
Parameters
----------
model : `Path` or `str` or `dlib.simple_object_detector`
Either a path to a `dlib.simple_object_detector` or a
`dlib.fhog_object_detector` or the detector itself.
Raises
------
ValueError
If a path was provided and it does not exist.
"""
def __init__(self, model):
if isinstance(model, STRING_TYPES) or isinstance(model, Path):
m_path = Path(model)
if not Path(m_path).exists():
raise ValueError('Model {} does not exist.'.format(m_path))
# There are two different kinds of object detector, the
# simple_object_detector and the fhog_object_detector, but we
# can't tell which is which from the file name. Therefore, try one
# and then the other. Unfortunately, it throws a runtime error,
# which we have to catch.
try:
model = dlib.simple_object_detector(str(m_path))
except RuntimeError:
model = dlib.fhog_object_detector(str(m_path))
self._dlib_model = model
def __call__(self, uint8_image, n_upscales=0):
r"""
Perform a detection using the cached dlib detector.
Parameters
----------
uint8_image : `ndarray`
An RGB (3 Channels) or Greyscale (1 Channel) numpy array of uint8
n_upscales : `int`, optional
Number of times to upscale the image when performing the detection,
may increase the chances of detecting smaller objects.
Returns
------
bounding_boxes : `list` of `menpo.shape.PointDirectedGraph`
The detected objects.
"""
# Dlib doesn't handle the dead last axis
if uint8_image.shape[-1] == 1:
uint8_image = uint8_image[..., 0]
rects = self._dlib_model(uint8_image, n_upscales)
return [rect_to_pointgraph(r) for r in rects]
class DlibDetector(object):
r"""
A generic dlib detector.
Wraps a dlib object detector inside the menpodetect framework and provides
a clean interface to expose the dlib arguments.
"""
def __init__(self, model):
self._detector = _dlib_detect(model)
def __call__(self, image, greyscale=False, image_diagonal=None,
group_prefix='dlib', n_upscales=0):
r"""
Perform a detection using the cached dlib detector.
The detections will also be attached to the image as landmarks.
Parameters
----------
image : `menpo.image.Image`
A Menpo image to detect. The bounding boxes of the detected objects
will be attached to this image.
greyscale : `bool`, optional
Convert the image to greyscale or not.
image_diagonal : `int`, optional
The total size of the diagonal of the image that should be used for
detection. This is useful for scaling images up and down for
detection.
group_prefix : `str`, optional
The prefix string to be appended to each each landmark group that is
stored on the image. Each detection will be stored as group_prefix_#
where # is a count starting from 0.
n_upscales : `int`, optional
Number of times to upscale the image when performing the detection,
may increase the chances of detecting smaller objects.
Returns
------
bounding_boxes : `list` of `menpo.shape.PointDirectedGraph`
The detected objects.
"""
detect_partial = partial(self._detector, n_upscales=n_upscales)
return detect(detect_partial, image, greyscale=greyscale,
image_diagonal=image_diagonal, group_prefix=group_prefix)
def load_dlib_frontal_face_detector():
r"""
Load the dlib frontal face detector.
Returns
-------
detector : `DlibDetector`
The frontal face detector.
"""
return DlibDetector(dlib.get_frontal_face_detector())
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
class CaseInsensitiveModelBackend(ModelBackend):
"""
By default ModelBackend does case _sensitive_ username
authentication, which isn't what is generally expected. This
backend supports case insensitive username authentication.
"""
def authenticate(self, request, username=None, password=None, autologin=None):
"""
Custom authenticate with bypass for auto-logins
Args:
request (Request): Request object.
username (str, optional): Name of user to authenticate.
password (str, optional): Password of user
autologin (Account, optional): If given, assume this is
an already authenticated account and bypass authentication.
"""
if autologin:
# Note: Setting .backend on account is critical in order to
# be allowed to call django.auth.login(account) later. This
# is necessary for the auto-login feature of the webclient,
# but it's important to make sure Django doesn't change this
# requirement or the name of the property down the line. /Griatch
autologin.backend = "evennia.web.utils.backends.CaseInsensitiveModelBackend"
return autologin
else:
# In this case .backend will be assigned automatically
# somewhere along the way.
Account = get_user_model()
try:
account = Account.objects.get(username__iexact=username)
if account.check_password(password):
return account
else:
return None
except Account.DoesNotExist:
return None
|
from pylatex.base_classes import LatexObject
from nose.tools import raises
class BadObject(LatexObject):
pass
@raises(TypeError)
def test_latex_object():
LatexObject()
@raises(TypeError)
def test_bad_object():
BadObject()
|
from filer.models.foldermodels import *
from filer.models.filemodels import *
from filer.models.imagemodels import *
from filer.models.clipboardmodels import *
from filer.models.virtualitems import *
|
def keyvault_client_factory(**_):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.mgmt.keyvault import KeyVaultManagementClient
return get_mgmt_service_client(KeyVaultManagementClient)
def keyvault_client_vaults_factory(kwargs):
return keyvault_client_factory(**kwargs).vaults
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBaseInfiniiVision import *
class agilent6000(agilentBaseInfiniiVision):
"Agilent InfiniiVision 6000 series IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilent6000, self).__init__(*args, **kwargs)
self._analog_channel_name = list()
self._analog_channel_count = 4
self._digital_channel_name = list()
self._digital_channel_count = 16
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._identity_description = "Agilent InfiniiVision 6000 series IVI oscilloscope driver"
self._identity_supported_instrument_models = ['DSO6012A','DSO6014A','DSO6032A',
'DSO6034A','DSO6052A','DSO6054A','DSO6102A','DSO6104A','MSO6012A','MSO6014A',
'MSO6032A','MSO6034A','MSO6052A','MSO6054A','MSO6102A','MSO6104A']
self._init_channels()
|
__author__ = 'sen'
|
"""DBus interface to Veusz document."""
from __future__ import division
import numpy as N
from ..compat import cstr
from ..utils import vzdbus
from . import commandinterpreter
class DBusInterface(vzdbus.Object):
"""DBus interface to Veusz document command interface."""
_ctr = 1
interface = 'org.veusz.document'
def __init__(self, doc):
root = '/Windows/%i/Document' % DBusInterface._ctr
vzdbus.Object.__init__(self, vzdbus.sessionbus, root)
self.index = DBusInterface._ctr
DBusInterface._ctr += 1
self.cmdinter = commandinterpreter.CommandInterpreter(doc)
self.ci = self.cmdinter.interface
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def RunPython(self, cmdstr):
return self.cmdinter.run(cmdstr)
@vzdbus.method(dbus_interface=interface,
in_signature='sa{sv}')
def Action(self, action, optargs):
return self.ci.Action(action, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sa{sv}',
out_signature='s')
def Add(self, wtype, optargs):
return self.ci.Add(wtype, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sssa{sv}')
def AddCustom(self, thetype, name, val, argsv):
self.ci.AddCustom(thetype, name, val, **argsv)
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def AddImportPath(self, dirname):
self.ci.AddImportPath(cstr(dirname))
@vzdbus.method(dbus_interface=interface,
in_signature='ssa{sv}', out_signature='s')
def CloneWidget(self, widget, newparent, optargs):
return self.ci.CloneWidget(cstr(widget), cstr(newparent),
**optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sssa{sv}')
def CreateHistogram(self, inexpr, outbinsds, outvalsds, optargs):
self.ci.CreateHistogram(cstr(inexpr), cstr(outbinsds),
cstr(outvalsds), **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sa{sv}a{sv}')
def DatasetPlugin(self, pluginname, fields, datasetnames):
self.ci.DatasetPlugin(cstr(pluginname), fields, datasetnames)
@vzdbus.method(dbus_interface=interface,
in_signature='sa{sv}')
def Export(self, filename, optargs):
self.ci.Export(filename, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='v')
def Get(self, val):
return self.ci.Get(val)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='as')
def GetChildren(self, where):
return self.ci.GetChildren(where=where)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='adadadad')
def GetData1D(self, datasetname):
"""Get a numeric dataset. Returns lists of numeric values
for data, symmetric error, negative error and positive error."""
def lornull(l):
"""Get blank list if None or convert to list otherwise."""
if l is None: return []
return list(l)
data, serr, nerr, perr = self.ci.GetData(cstr(datasetname))
return lornull(data), lornull(serr), lornull(nerr), lornull(perr)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='iiddddad')
def GetData2D(self, datasetname):
"""Get a 2D dataset. Returns
(X dim, Y dim, rangex min, rangex max,
rangey min, rangey max,
data (as 1d numeric array))
"""
data = self.ci.GetData(cstr(datasetname))
return ( data[0].shape[1], data[0].shape[0],
data[1][0], data[1][1], data[2][0], data[2][1],
list(data[0].flat) )
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='as')
def GetDataText(self, datasetname):
"""Get a text dataset as an array of strings."""
return self.ci.GetData(cstr(datasetname))
@vzdbus.method(dbus_interface=interface,
out_signature='as')
def GetDatasets(self):
return self.ci.GetDatasets()
@vzdbus.method(dbus_interface=interface,
in_signature='ssa{sv}')
def ImportFile(self, filename, descriptor, optargs):
self.ci.ImportFile(filename, descriptor, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sasa{sv}')
def ImportFile2D(self, filename, datasetnames, optargs):
self.ci.ImportFile2D(filename, datasetnames, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sa{sv}')
def ImportFileCSV(self, filename, optargs):
self.ci.ImportFileCSV(filename, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sssa{sv}')
def ImportFITSFile(self, dsname, filename, hdu, optargs):
self.ci.ImportFITSFile(filename, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='ssa{sv}')
def ImportFilePlugin(self, plugin, filename, optargs):
self.ci.ImportFilePlugin(plugin, filename, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='ssa{sv}')
def ImportString(self, descriptor, string, optargs):
self.ci.ImportString(cstr(descriptor), cstr(string),
**optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def Load(self, filename):
self.cmdinter.Load(filename)
@vzdbus.method(dbus_interface=interface)
def Print(self):
self.ci.Print()
@vzdbus.method(dbus_interface=interface)
def ReloadData(self):
self.ci.ReloadData()
@vzdbus.method(dbus_interface=interface,
in_signature='ss')
def Rename(self, widget, newname):
self.ci.Rename( cstr(widget), cstr(newname) )
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def Remove(self, name):
self.ci.Remove(cstr(name))
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def RemoveCustom(self, name):
self.ci.RemoveCustom(cstr(name))
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='s')
def ResolveReference(self, name):
return self.ci.ResolveReference(cstr(name))
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def Save(self, filename):
self.ci.Save(cstr(filename))
@vzdbus.method(dbus_interface=interface,
in_signature='sv')
def Set(self, name, val):
return self.ci.Set(cstr(name), val)
@vzdbus.method(dbus_interface=interface,
in_signature='ss')
def SetToReference(self, name, val):
return self.ci.SetToReference(cstr(name), cstr(val))
@vzdbus.method(dbus_interface=interface,
in_signature='sadadadad')
def SetData(self, name, data, symerr, negerr, poserr):
if not symerr: symerr = None
if not negerr: negerr = None
if not poserr: poserr = None
self.ci.SetData(cstr(name), data, symerr, negerr, poserr)
@vzdbus.method(dbus_interface=interface,
in_signature='ssssa{sv}')
def SetData2DExpressionXYZ(self, name, xexpr, yexpr, zexpr, optargs):
self.ci.SetData2DExpressionXYZ(cstr(name), xexpr, yexpr, zexpr,
**optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='s(ddd)(ddd)sa{sv}')
def SetData2DXYFunc(self, name, xstep, ystep, expr, optargs):
self.ci.SetData2DXYFunc(cstr(name), xstep, ystep, expr, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sadii(dd)(dd)')
def SetData2D(self, name, data, nx, ny, xrange, yrange):
data = N.array(data).reshape(nx, ny)
self.ci.SetData2D(cstr(name), data, xrange=xrange, yrange=yrange)
@vzdbus.method(dbus_interface=interface,
in_signature='ssa{sv}')
def SetDataExpression(self, name, val, optargs):
self.ci.SetDataExpression(cstr(name), val, **optargs)
@vzdbus.method(dbus_interface=interface,
in_signature='sas')
def SetDataText(self, name, val):
val = [cstr(x) for x in val]
self.ci.SetDataText(cstr(name), val)
@vzdbus.method(dbus_interface=interface,
in_signature='sas')
def TagDatasets(self, tag, datasets):
self.ci.TagDatasets(cstr(tag), datasets)
@vzdbus.method(dbus_interface=interface,
in_signature='s')
def To(self, path):
self.ci.To(path)
# node interface
@vzdbus.method(dbus_interface=interface,
in_signature='ss', out_signature='as')
def NodeChildren(self, path, types):
return self.ci.NodeChildren(path, types)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='s')
def NodeType(self, path):
return self.ci.NodeType(path)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='s')
def SettingType(self, path):
return self.ci.SettingType(path)
@vzdbus.method(dbus_interface=interface,
in_signature='s', out_signature='s')
def WidgetType(self, path):
return self.ci.WidgetType(path)
|
"""LatexEU serializer for records."""
from __future__ import absolute_import, division, print_function
from inspirehep.utils.latex import Latex
class LATEXEUSerializer(object):
"""LatexEU serializer for records."""
def serialize(self, pid, record, links_factory=None):
"""Serialize a single latexeu from a record.
:param pid: Persistent identifier instance.
:param record: Record instance.
:param links_factory: Factory function for the link generation,
which are added to the response.
"""
return Latex(record, 'latex_eu').format()
def serialize_search(self, pid_fetcher, search_result, links=None,
item_links_factory=None):
"""Serialize a search result.
:param pid_fetcher: Persistent identifier fetcher.
:param search_result: Elasticsearch search result.
:param links: Dictionary of links to add to response.
"""
records = []
for hit in search_result['hits']['hits']:
records.append(Latex(hit['_source'], 'latex_eu').format())
return "\n".join(records)
|
import sh
import json
import os
import ConfigParser
root_dir = os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(__file__)), '..', '..', '..'))
class Version:
_source_version = None
config = ConfigParser.ConfigParser()
config.read(os.path.join(root_dir, 'utils', 'config.ini'))
@classmethod
def get_doc_version(cls):
git = sh.Command("git")
branch = git("rev-parse", "--abbrev-ref", "HEAD").strip() if os.environ.get('TRAVIS_BRANCH') is None \
else os.environ.get('TRAVIS_BRANCH')
if branch == "develop":
return cls.config.get("DEFAULT", "develop-version-mapping")
else:
# read version
return cls.get_source_version()
@classmethod
def get_source_version(cls):
if cls._source_version is None:
with open(os.path.join(root_dir, "package.json")) as data_file:
data = json.load(data_file)
cls._source_version = data['version']
return cls._source_version
|
from django.conf import settings
from django.utils import timezone
from pytz import utc
from bedrock import externalfiles
from bedrock.externalfiles.models import ExternalFile as EFModel
from bedrock.mozorg.tests import TestCase
class TestExternalFile(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
timezone.activate(utc)
def setUp(self):
settings.EXTERNAL_FILES["test"] = {"url": "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul", "name": "there.is.no.data.xul"}
def tearDown(self):
externalfiles.ExternalFile("test").clear_cache()
del settings.EXTERNAL_FILES["test"]
def test_last_modified(self):
"""Should return the modified timestamp."""
EFModel.objects.create(name="test", content="test")
efo = EFModel.objects.get(name="test")
self.assertEqual(externalfiles.ExternalFile("test").last_modified, efo.last_modified)
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
import uuid
class Migration(migrations.Migration):
dependencies = [
('student', '0013_delete_historical_enrollment_records'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CourseEntitlement',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('course_uuid', models.UUIDField()),
('expired_at', models.DateTimeField(null=True)),
('mode', models.CharField(default=b'audit', max_length=100)),
('order_number', models.CharField(max_length=128, null=True)),
('enrollment_course_run', models.ForeignKey(to='student.CourseEnrollment', null=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
"""Testcases for cssutils.css.CSSValue and CSSPrimitiveValue."""
__version__ = '$Id: test_cssvalue.py 1473 2008-09-15 21:15:54Z cthedot $'
import xml.dom
import basetest
import cssutils
import types
class XTestCase(basetest.BaseTestCase):
def setUp(self):
cssutils.ser.prefs.useDefaults()
def tearDown(self):
cssutils.ser.prefs.useDefaults()
def test_prioriy(self):
"Property.priority"
s = cssutils.parseString(u'a { color: red }')
self.assertEqual(s.cssText, u'a {\n color: red\n }'.encode())
if __name__ == '__main__':
import unittest
unittest.main()
|
"""
Test Factory classes for ExternalUserIds
"""
from uuid import uuid4
import factory
from factory.fuzzy import FuzzyChoice, FuzzyText
from openedx.core.djangoapps.external_user_ids.models import ExternalId, ExternalIdType
class ExternalIDTypeFactory(factory.django.DjangoModelFactory): # lint-amnesty, pylint: disable=missing-class-docstring
class Meta:
model = ExternalIdType
name = FuzzyChoice([ExternalIdType.MICROBACHELORS_COACHING])
description = FuzzyText()
class ExternalIdFactory(factory.django.DjangoModelFactory): # lint-amnesty, pylint: disable=missing-class-docstring
class Meta:
model = ExternalId
external_user_id = factory.LazyFunction(uuid4)
external_id_type = factory.SubFactory(ExternalIDTypeFactory)
|
import os
import sys
execfile('/etc/courtlistener')
sys.path.append(INSTALL_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from alert import settings
from alert.corpus_importer.lawbox.import_law_box import get_court_object
from alert.lib.sunburnt import sunburnt
from alert.search.models import Document
from lxml import html
from optparse import OptionParser
def cleaner(simulate=False, verbose=False):
"""Find items that are in californiad and change them to be in caed by using an updated set of regexes.
"""
conn = sunburnt.SolrInterface(settings.SOLR_OPINION_URL, mode='rw')
q = {'fq': ['court_exact:%s' % 'californiad']}
results = conn.raw_query(**q)
for r in results:
if verbose:
print "Running tests on item %s" % r['id']
doc = Document.objects.get(pk=r['id'])
# Make the HTML element, then figure out the court
clean_html_tree = html.fromstring(doc.html_lawbox)
correct_court = get_court_object(clean_html_tree)
if verbose:
print " - https://www.courtlistener.com%s" % doc.get_absolute_url()
print " - Old value was: %s" % doc.court_id
print " - New value is: %s" % correct_court
if doc.court_id == correct_court:
# No change needed, simply move on.
if verbose:
print " - Proceeding to next item: Values are equal."
continue
elif correct_court != 'caed':
# Attempting to change to an unexpected value.
if verbose:
print " - Proceeding to next item: New value is not what we expected."
continue
else:
if verbose:
print " - Updating with new value."
if not simulate:
doc.court_id = correct_court
doc.save(index=True, force_commit=False)
# Do one big commit at the end
conn.commit()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option(
'-v',
'--verbose',
action="store_true",
dest='verbose',
default=False,
help="Display log during execution"
)
parser.add_option(
'-s',
'--simulate',
action="store_true",
dest='simulate',
default=False,
help="Simulate the corrections without actually making them."
)
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return cleaner(simulate, verbose)
if __name__ == '__main__':
main()
|
from default import Test, db, with_context
from factories import ProjectFactory, TaskFactory, UserFactory
from mock import patch
from pybossa.repositories import ProjectRepository
project_repo = ProjectRepository(db)
def configure_mock_current_user_from(user, mock):
def is_anonymous():
return user is None
mock.is_anonymous.return_value = is_anonymous()
mock.admin = user.admin if user != None else None
mock.id = user.id if user != None else None
return mock
class TestProjectPassword(Test):
from pybossa.view.projects import redirect
@patch('pybossa.view.projects.redirect', wraps=redirect)
def test_password_view_func_post(self, redirect):
"""Test when posting to /project/short_name/password and password is correct
the user is redirected to where they came from"""
project = ProjectFactory.create()
task = TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
redirect_url = '/project/%s/task/%s' % (project.short_name, task.id)
url = '/project/%s/password?next=%s' % (project.short_name, redirect_url)
res = self.app.post(url, data={'password': 'mysecret'})
redirect.assert_called_with(redirect_url)
def test_password_view_func_post_wrong_passwd(self):
"""Test when posting to /project/short_name/password and password is incorrect
an error message is flashed"""
project = ProjectFactory.create()
task = TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
url = '/project/%s/password?next=/project/%s/task/%s' % (
project.short_name, project.short_name, task.id)
res = self.app.post(url, data={'password': 'bad_passwd'})
assert 'Sorry, incorrect password' in res.data, "No error message shown"
def test_password_view_func_no_project(self):
"""Test when receiving a request to a non-existing project, return 404"""
get_res = self.app.get('/project/noapp/password')
post_res = self.app.post('/project/noapp/password')
assert get_res.status_code == 404, get_res.status_code
assert post_res.status_code == 404, post_res.status_code
def test_password_required_for_anonymous_contributors(self):
"""Test when an anonymous user wants to contribute to a password
protected project is redirected to the password view"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
res = self.app.get('/project/%s/newtask' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' in res.data
res = self.app.get('/project/%s/task/1' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' in res.data
def test_password_not_required_for_anonymous_contributors(self):
"""Test when an anonymous user wants to contribute to a non-password
protected project is able to do it"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
res = self.app.get('/project/%s/newtask' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
res = self.app.get('/project/%s/task/1' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
@patch('pybossa.password_manager.current_user')
def test_password_required_for_authenticated_contributors(self, mock_user):
"""Test when an authenticated user wants to contribute to a password
protected project is redirected to the password view"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
user = UserFactory.create()
configure_mock_current_user_from(user, mock_user)
res = self.app.get('/project/%s/newtask' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' in res.data
res = self.app.get('/project/%s/task/1' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' in res.data
@patch('pybossa.password_manager.current_user')
def test_password_not_required_for_authenticated_contributors(self, mock_user):
"""Test when an authenticated user wants to contribute to a non-password
protected project is able to do it"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
user = UserFactory.create()
configure_mock_current_user_from(user, mock_user)
res = self.app.get('/project/%s/newtask' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
res = self.app.get('/project/%s/task/1' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
@patch('pybossa.password_manager.current_user')
def test_password_not_required_for_admins(self, mock_user):
"""Test when an admin wants to contribute to a password
protected project is able to do it"""
user = UserFactory.create()
configure_mock_current_user_from(user, mock_user)
assert mock_user.admin
project = ProjectFactory.create()
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
res = self.app.get('/project/%s/newtask' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
res = self.app.get('/project/%s/task/1' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
@patch('pybossa.password_manager.current_user')
def test_password_not_required_for_owner(self, mock_user):
"""Test when the owner wants to contribute to a password
protected project is able to do it"""
owner = UserFactory.create_batch(2)[1]
configure_mock_current_user_from(owner, mock_user)
assert owner.admin is False
project = ProjectFactory.create(owner=owner)
assert project.owner.id == owner.id
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
res = self.app.get('/project/%s/newtask' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
res = self.app.get('/project/%s/task/1' % project.short_name, follow_redirects=True)
assert 'Enter the password to contribute' not in res.data
endpoints_requiring_password = ('/', '/tutorial', '/1/results.json',
'/tasks/', '/tasks/browse', '/tasks/export',
'/stats', '/blog')
def test_password_required_for_anonymous_users_to_see_project(self):
"""Test when an anonymous user wants to visit a password
protected project is redirected to the password view"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
for endpoint in self.endpoints_requiring_password:
res = self.app.get('/project/%s%s' % (project.short_name, endpoint),
follow_redirects=True)
assert 'Enter the password to contribute' in res.data, endpoint
def test_password_not_required_for_anonymous_users_to_see_project(self):
"""Test when an anonymous user wants to visit a non-password
protected project is able to do it"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
for endpoint in self.endpoints_requiring_password:
res = self.app.get('/project/%s%s' % (project.short_name, endpoint),
follow_redirects=True)
assert 'Enter the password to contribute' not in res.data, endpoint
@patch('pybossa.password_manager.current_user')
def test_password_required_for_authenticated_users_to_see_project(self, mock_user):
"""Test when an authenticated user wants to visit a password
protected project is redirected to the password view"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
user = UserFactory.create()
configure_mock_current_user_from(user, mock_user)
for endpoint in self.endpoints_requiring_password:
res = self.app.get('/project/%s%s' % (project.short_name, endpoint),
follow_redirects=True)
assert 'Enter the password to contribute' in res.data, endpoint
@patch('pybossa.password_manager.current_user')
def test_password_not_required_for_authenticated_users_to_see_project(self, mock_user):
"""Test when an authenticated user wants to visit a non-password
protected project is able to do it"""
project = ProjectFactory.create()
TaskFactory.create(project=project)
user = UserFactory.create()
configure_mock_current_user_from(user, mock_user)
for endpoint in self.endpoints_requiring_password:
res = self.app.get('/project/%s%s' % (project.short_name, endpoint),
follow_redirects=True)
assert 'Enter the password to contribute' not in res.data, endpoint
@patch('pybossa.password_manager.current_user')
def test_password_not_required_for_admins_to_see_project(self, mock_user):
"""Test when an admin wants to visit a password
protected project is able to do it"""
user = UserFactory.create()
configure_mock_current_user_from(user, mock_user)
assert mock_user.admin
project = ProjectFactory.create()
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
for endpoint in self.endpoints_requiring_password:
res = self.app.get('/project/%s%s' % (project.short_name, endpoint),
follow_redirects=True)
assert 'Enter the password to contribute' not in res.data, endpoint
@patch('pybossa.password_manager.current_user')
def test_password_not_required_for_owner_to_see_project(self, mock_user):
"""Test when the owner wants to visit a password
protected project is able to do it"""
owner = UserFactory.create_batch(2)[1]
configure_mock_current_user_from(owner, mock_user)
assert owner.admin is False
project = ProjectFactory.create(owner=owner)
assert project.owner.id == owner.id
TaskFactory.create(project=project)
project.set_password('mysecret')
project_repo.update(project)
for endpoint in self.endpoints_requiring_password:
res = self.app.get('/project/%s%s' % (project.short_name, endpoint),
follow_redirects=True)
assert 'Enter the password to contribute' not in res.data, endpoint
|
"""Extract pre-computed feature vectors from BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import collections
import json
import re
import modeling
import tokenization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("input_file", None, "")
flags.DEFINE_string("output_file", None, "")
flags.DEFINE_string("layers", "-1,-2,-3,-4", "")
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("batch_size", 32, "Batch size for predictions.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_string("master", None,
"If using a TPU, the address of the master.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"use_one_hot_embeddings", False,
"If True, tf.one_hot will be used for embedding lookups, otherwise "
"tf.nn.embedding_lookup will be used. On TPUs, this should be True "
"since it is much faster.")
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
def input_fn_builder(features, seq_length):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(bert_config, init_checkpoint, layer_indexes, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
scaffold_fn = None
(assignment_map,
initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(
tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
return output_spec
return model_fn
def convert_examples_to_features(examples, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
input_type_ids.append(1)
tokens.append("[SEP]")
input_type_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
features.append(
InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def read_examples(input_file):
"""Read a list of `InputExample`s from an input file."""
examples = []
unique_id = 0
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
examples.append(
InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b))
unique_id += 1
return examples
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
layer_indexes = [int(x) for x in FLAGS.layers.split(",")]
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
tpu_config=tf.contrib.tpu.TPUConfig(
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
examples = read_examples(FLAGS.input_file)
features = convert_examples_to_features(
examples=examples, seq_length=FLAGS.max_seq_length, tokenizer=tokenizer)
unique_id_to_feature = {}
for feature in features:
unique_id_to_feature[feature.unique_id] = feature
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
layer_indexes=layer_indexes,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_one_hot_embeddings)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
predict_batch_size=FLAGS.batch_size)
input_fn = input_fn_builder(
features=features, seq_length=FLAGS.max_seq_length)
with codecs.getwriter("utf-8")(tf.gfile.Open(FLAGS.output_file,
"w")) as writer:
for result in estimator.predict(input_fn, yield_single_examples=True):
unique_id = int(result["unique_id"])
feature = unique_id_to_feature[unique_id]
output_json = collections.OrderedDict()
output_json["linex_index"] = unique_id
all_features = []
for (i, token) in enumerate(feature.tokens):
all_layers = []
for (j, layer_index) in enumerate(layer_indexes):
layer_output = result["layer_output_%d" % j]
layers = collections.OrderedDict()
layers["index"] = layer_index
layers["values"] = [
round(float(x), 6) for x in layer_output[i:(i + 1)].flat
]
all_layers.append(layers)
features = collections.OrderedDict()
features["token"] = token
features["layers"] = all_layers
all_features.append(features)
output_json["features"] = all_features
writer.write(json.dumps(output_json) + "\n")
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("output_file")
tf.app.run()
|
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def ozoneKM():
# Connect to a pre-existing cluster
# connect to localhost:54321
train = h2o.import_file(path=h2o.locate("smalldata/glm_test/ozone.csv"))
# See that the data is ready
print train.describe()
# Run KMeans
my_km = h2o.kmeans(x=train,
k=10,
init = "PlusPlus",
max_iterations = 100)
my_km.show()
my_km.summary()
my_pred = my_km.predict(train)
my_pred.describe()
if __name__ == "__main__":
tests.run_test(sys.argv, ozoneKM)
|
import struct
from ryu import exception
from ryu.lib import mac
from ryu.lib.pack_utils import msg_pack_into
from . import ofproto_parser
from . import ofproto_v1_0
from . import inet
import logging
LOG = logging.getLogger('ryu.ofproto.nx_match')
UINT64_MAX = (1 << 64) - 1
UINT32_MAX = (1 << 32) - 1
UINT16_MAX = (1 << 16) - 1
FWW_IN_PORT = 1 << 0
FWW_DL_TYPE = 1 << 4
FWW_NW_PROTO = 1 << 5
FWW_NW_DSCP = 1 << 1
FWW_NW_ECN = 1 << 2
FWW_ARP_SHA = 1 << 3
FWW_ARP_THA = 1 << 6
FWW_IPV6_LABEL = 1 << 7
FWW_NW_TTL = 1 << 8
FWW_ALL = (1 << 13) - 1
FLOW_NW_FRAG_ANY = 1 << 0
FLOW_NW_FRAG_LATER = 1 << 1
FLOW_NW_FRAG_MASK = FLOW_NW_FRAG_ANY | FLOW_NW_FRAG_LATER
IP_ECN_MASK = 0x03
IP_DSCP_MASK = 0xfc
MF_PACK_STRING_BE64 = '!Q'
MF_PACK_STRING_BE32 = '!I'
MF_PACK_STRING_BE16 = '!H'
MF_PACK_STRING_8 = '!B'
MF_PACK_STRING_MAC = '!6s'
MF_PACK_STRING_IPV6 = '!8H'
_MF_FIELDS = {}
FLOW_N_REGS = 8 # ovs 1.5
class Flow(ofproto_parser.StringifyMixin):
def __init__(self):
self.in_port = 0
self.dl_vlan = 0
self.dl_vlan_pcp = 0
self.dl_src = mac.DONTCARE
self.dl_dst = mac.DONTCARE
self.dl_type = 0
self.tp_dst = 0
self.tp_src = 0
self.nw_tos = 0
self.vlan_tci = 0
self.nw_ttl = 0
self.nw_proto = 0
self.arp_sha = 0
self.arp_tha = 0
self.nw_src = 0
self.nw_dst = 0
self.tun_id = 0
self.arp_spa = 0
self.arp_tpa = 0
self.ipv6_src = []
self.ipv6_dst = []
self.nd_target = []
self.nw_frag = 0
self.regs = [0] * FLOW_N_REGS
self.ipv6_label = 0
self.pkt_mark = 0
class FlowWildcards(ofproto_parser.StringifyMixin):
def __init__(self):
self.dl_src_mask = 0
self.dl_dst_mask = 0
self.tp_src_mask = 0
self.tp_dst_mask = 0
self.nw_src_mask = 0
self.nw_dst_mask = 0
self.tun_id_mask = 0
self.arp_spa_mask = 0
self.arp_tpa_mask = 0
self.vlan_tci_mask = 0
self.ipv6_src_mask = []
self.ipv6_dst_mask = []
self.nd_target_mask = []
self.nw_frag_mask = 0
self.regs_bits = 0
self.regs_mask = [0] * FLOW_N_REGS
self.wildcards = ofproto_v1_0.OFPFW_ALL
self.pkt_mark_mask = 0
class ClsRule(ofproto_parser.StringifyMixin):
"""describe a matching rule for OF 1.0 OFPMatch (and NX).
"""
def __init__(self, **kwargs):
self.wc = FlowWildcards()
self.flow = Flow()
for key, value in kwargs.items():
if key[:3] == 'reg':
register = int(key[3:] or -1)
self.set_reg(register, value)
continue
setter = getattr(self, 'set_' + key, None)
if not setter:
LOG.error('Invalid kwarg specified to ClsRule (%s)', key)
continue
if not isinstance(value, (tuple, list)):
value = (value, )
setter(*value)
def set_in_port(self, port):
self.wc.wildcards &= ~FWW_IN_PORT
self.flow.in_port = port
def set_dl_vlan(self, dl_vlan):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
self.flow.dl_vlan = dl_vlan
def set_dl_vlan_pcp(self, dl_vlan_pcp):
self.wc.wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
self.flow.dl_vlan_pcp = dl_vlan_pcp
def set_dl_dst(self, dl_dst):
self.flow.dl_dst = dl_dst
def set_dl_dst_masked(self, dl_dst, mask):
self.wc.dl_dst_mask = mask
# bit-wise and of the corresponding elements of dl_dst and mask
self.flow.dl_dst = mac.haddr_bitand(dl_dst, mask)
def set_dl_src(self, dl_src):
self.flow.dl_src = dl_src
def set_dl_src_masked(self, dl_src, mask):
self.wc.dl_src_mask = mask
self.flow.dl_src = mac.haddr_bitand(dl_src, mask)
def set_dl_type(self, dl_type):
self.wc.wildcards &= ~FWW_DL_TYPE
self.flow.dl_type = dl_type
def set_dl_tci(self, tci):
self.set_dl_tci_masked(tci, UINT16_MAX)
def set_dl_tci_masked(self, tci, mask):
self.wc.vlan_tci_mask = mask
self.flow.vlan_tci = tci
def set_tp_src(self, tp_src):
self.set_tp_src_masked(tp_src, UINT16_MAX)
def set_tp_src_masked(self, tp_src, mask):
self.wc.tp_src_mask = mask
self.flow.tp_src = tp_src & mask
def set_tp_dst(self, tp_dst):
self.set_tp_dst_masked(tp_dst, UINT16_MAX)
def set_tp_dst_masked(self, tp_dst, mask):
self.wc.tp_dst_mask = mask
self.flow.tp_dst = tp_dst & mask
def set_nw_proto(self, nw_proto):
self.wc.wildcards &= ~FWW_NW_PROTO
self.flow.nw_proto = nw_proto
def set_nw_src(self, nw_src):
self.set_nw_src_masked(nw_src, UINT32_MAX)
def set_nw_src_masked(self, nw_src, mask):
self.flow.nw_src = nw_src
self.wc.nw_src_mask = mask
def set_nw_dst(self, nw_dst):
self.set_nw_dst_masked(nw_dst, UINT32_MAX)
def set_nw_dst_masked(self, nw_dst, mask):
self.flow.nw_dst = nw_dst
self.wc.nw_dst_mask = mask
def set_nw_dscp(self, nw_dscp):
self.wc.wildcards &= ~FWW_NW_DSCP
self.flow.nw_tos &= ~IP_DSCP_MASK
self.flow.nw_tos |= nw_dscp & IP_DSCP_MASK
def set_icmp_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmp_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_tun_id(self, tun_id):
self.set_tun_id_masked(tun_id, UINT64_MAX)
def set_tun_id_masked(self, tun_id, mask):
self.wc.tun_id_mask = mask
self.flow.tun_id = tun_id & mask
def set_nw_ecn(self, nw_ecn):
self.wc.wildcards &= ~FWW_NW_ECN
self.flow.nw_tos &= ~IP_ECN_MASK
self.flow.nw_tos |= nw_ecn & IP_ECN_MASK
def set_nw_ttl(self, nw_ttl):
self.wc.wildcards &= ~FWW_NW_TTL
self.flow.nw_ttl = nw_ttl
def set_nw_frag(self, nw_frag):
self.wc.nw_frag_mask |= FLOW_NW_FRAG_MASK
self.flow.nw_frag = nw_frag
def set_nw_frag_masked(self, nw_frag, mask):
self.wc.nw_frag_mask = mask
self.flow.nw_frag = nw_frag & mask
def set_arp_spa(self, spa):
self.set_arp_spa_masked(spa, UINT32_MAX)
def set_arp_spa_masked(self, spa, mask):
self.flow.arp_spa = spa
self.wc.arp_spa_mask = mask
def set_arp_tpa(self, tpa):
self.set_arp_tpa_masked(tpa, UINT32_MAX)
def set_arp_tpa_masked(self, tpa, mask):
self.flow.arp_tpa = tpa
self.wc.arp_tpa_mask = mask
def set_arp_sha(self, sha):
self.wc.wildcards &= ~FWW_ARP_SHA
self.flow.arp_sha = sha
def set_arp_tha(self, tha):
self.wc.wildcards &= ~FWW_ARP_THA
self.flow.arp_tha = tha
def set_icmpv6_type(self, icmp_type):
self.set_tp_src(icmp_type)
def set_icmpv6_code(self, icmp_code):
self.set_tp_dst(icmp_code)
def set_ipv6_label(self, label):
self.wc.wildcards &= ~FWW_IPV6_LABEL
self.flow.ipv6_label = label
def set_ipv6_src_masked(self, src, mask):
self.wc.ipv6_src_mask = mask
self.flow.ipv6_src = [x & y for (x, y) in zip(src, mask)]
def set_ipv6_src(self, src):
self.flow.ipv6_src = src
def set_ipv6_dst_masked(self, dst, mask):
self.wc.ipv6_dst_mask = mask
self.flow.ipv6_dst = [x & y for (x, y) in zip(dst, mask)]
def set_ipv6_dst(self, dst):
self.flow.ipv6_dst = dst
def set_nd_target_masked(self, target, mask):
self.wc.nd_target_mask = mask
self.flow.nd_target = [x & y for (x, y) in
zip(target, mask)]
def set_nd_target(self, target):
self.flow.nd_target = target
def set_reg(self, reg_idx, value):
self.set_reg_masked(reg_idx, value, 0)
def set_reg_masked(self, reg_idx, value, mask):
self.wc.regs_mask[reg_idx] = mask
self.flow.regs[reg_idx] = value
self.wc.regs_bits |= (1 << reg_idx)
def set_pkt_mark_masked(self, pkt_mark, mask):
self.flow.pkt_mark = pkt_mark
self.wc.pkt_mark_mask = mask
def flow_format(self):
# Tunnel ID is only supported by NXM
if self.wc.tun_id_mask != 0:
return ofproto_v1_0.NXFF_NXM
# Masking DL_DST is only supported by NXM
if self.wc.dl_dst_mask:
return ofproto_v1_0.NXFF_NXM
# Masking DL_SRC is only supported by NXM
if self.wc.dl_src_mask:
return ofproto_v1_0.NXFF_NXM
# ECN is only supported by NXM
if not self.wc.wildcards & FWW_NW_ECN:
return ofproto_v1_0.NXFF_NXM
if self.wc.regs_bits > 0:
return ofproto_v1_0.NXFF_NXM
return ofproto_v1_0.NXFF_OPENFLOW10
def match_tuple(self):
"""return a tuple which can be used as *args for
ofproto_v1_0_parser.OFPMatch.__init__().
see Datapath.send_flow_mod.
"""
assert self.flow_format() == ofproto_v1_0.NXFF_OPENFLOW10
wildcards = ofproto_v1_0.OFPFW_ALL
if not self.wc.wildcards & FWW_IN_PORT:
wildcards &= ~ofproto_v1_0.OFPFW_IN_PORT
if self.flow.dl_src != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_SRC
if self.flow.dl_dst != mac.DONTCARE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_DST
if not self.wc.wildcards & FWW_DL_TYPE:
wildcards &= ~ofproto_v1_0.OFPFW_DL_TYPE
if self.flow.dl_vlan != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN
if self.flow.dl_vlan_pcp != 0:
wildcards &= ~ofproto_v1_0.OFPFW_DL_VLAN_PCP
if self.flow.nw_tos != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_TOS
if self.flow.nw_proto != 0:
wildcards &= ~ofproto_v1_0.OFPFW_NW_PROTO
if self.wc.nw_src_mask != 0 and "01" not in bin(self.wc.nw_src_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_SRC_MASK
maskbits = (bin(self.wc.nw_src_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_SRC_SHIFT)
if self.wc.nw_dst_mask != 0 and "01" not in bin(self.wc.nw_dst_mask):
wildcards &= ~ofproto_v1_0.OFPFW_NW_DST_MASK
maskbits = (bin(self.wc.nw_dst_mask).count("0") - 1)
wildcards |= (maskbits << ofproto_v1_0.OFPFW_NW_DST_SHIFT)
if self.flow.tp_src != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_SRC
if self.flow.tp_dst != 0:
wildcards &= ~ofproto_v1_0.OFPFW_TP_DST
return (wildcards, self.flow.in_port, self.flow.dl_src,
self.flow.dl_dst, self.flow.dl_vlan, self.flow.dl_vlan_pcp,
self.flow.dl_type, self.flow.nw_tos & IP_DSCP_MASK,
self.flow.nw_proto, self.flow.nw_src, self.flow.nw_dst,
self.flow.tp_src, self.flow.tp_dst)
def _set_nxm_headers(nxm_headers):
'''Annotate corresponding NXM header'''
def _set_nxm_headers_dec(self):
self.nxm_headers = nxm_headers
return self
return _set_nxm_headers_dec
def _register_make(cls):
'''class decorator to Register mf make'''
assert cls.nxm_headers is not None
assert cls.nxm_headers is not []
for nxm_header in cls.nxm_headers:
assert nxm_header not in _MF_FIELDS
_MF_FIELDS[nxm_header] = cls.make
return cls
def mf_from_nxm_header(nxm_header):
if nxm_header not in _MF_FIELDS:
return None
make = _MF_FIELDS.get(nxm_header)
assert make is not None
return make(nxm_header)
class MFField(object):
_FIELDS_HEADERS = {}
@staticmethod
def register_field_header(headers):
def _register_field_header(cls):
for header in headers:
MFField._FIELDS_HEADERS[header] = cls
return cls
return _register_field_header
def __init__(self, nxm_header, pack_str):
self.nxm_header = nxm_header
self.pack_str = pack_str
self.n_bytes = struct.calcsize(pack_str)
self.n_bits = self.n_bytes * 8
@classmethod
def parser(cls, buf, offset):
(header,) = struct.unpack_from('!I', buf, offset)
cls_ = MFField._FIELDS_HEADERS.get(header)
if cls_:
field = cls_.field_parser(header, buf, offset)
else:
# print 'unknown field type'
raise
field.length = (header & 0xff) + 4
return field
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
mask = None
if hasmask:
pack_str = '!' + cls.pack_str[1:] * 2
(value, mask) = struct.unpack_from(pack_str, buf,
offset + 4)
else:
(value,) = struct.unpack_from(cls.pack_str, buf,
offset + 4)
return cls(header, value, mask)
def _put(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, value)
return self.n_bytes
def putw(self, buf, offset, value, mask):
len_ = self._put(buf, offset, value)
return len_ + self._put(buf, offset + len_, mask)
def _is_all_ones(self, value):
return value == (1 << self.n_bits) - 1
def putm(self, buf, offset, value, mask):
if mask == 0:
return 0
elif self._is_all_ones(mask):
return self._put(buf, offset, value)
else:
return self.putw(buf, offset, value, mask)
def _putv6(self, buf, offset, value):
msg_pack_into(self.pack_str, buf, offset, *value)
return self.n_bytes
def putv6(self, buf, offset, value, mask):
len_ = self._putv6(buf, offset, value)
if len(mask):
return len_ + self._putv6(buf, offset + len_, mask)
return len_
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IN_PORT])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IN_PORT])
class MFInPort(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFInPort, self).__init__(header, MFInPort.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFInPort.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.in_port)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_DST, ofproto_v1_0.NXM_OF_ETH_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_DST,
ofproto_v1_0.NXM_OF_ETH_DST_W])
class MFEthDst(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthDst, self).__init__(header, MFEthDst.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthDst.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_dst_mask:
return self.putw(buf, offset, rule.flow.dl_dst,
rule.wc.dl_dst_mask)
else:
return self._put(buf, offset, rule.flow.dl_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_SRC, ofproto_v1_0.NXM_OF_ETH_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_SRC,
ofproto_v1_0.NXM_OF_ETH_SRC_W])
class MFEthSrc(MFField):
pack_str = MF_PACK_STRING_MAC
def __init__(self, header, value, mask=None):
super(MFEthSrc, self).__init__(header, MFEthSrc.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthSrc.pack_str)
def put(self, buf, offset, rule):
if rule.wc.dl_src_mask:
return self.putw(buf, offset, rule.flow.dl_src,
rule.wc.dl_src_mask)
else:
return self._put(buf, offset, rule.flow.dl_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ETH_TYPE])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_ETH_TYPE])
class MFEthType(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFEthType, self).__init__(header, MFEthType.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFEthType.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.dl_type)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_VLAN_TCI,
ofproto_v1_0.NXM_OF_VLAN_TCI_W])
class MFVlan(MFField):
pack_str = MF_PACK_STRING_BE16
def __init__(self, header, value, mask=None):
super(MFVlan, self).__init__(header, MFVlan.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFVlan.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.vlan_tci,
rule.wc.vlan_tci_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_TOS])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_TOS])
class MFIPDSCP(MFField):
pack_str = MF_PACK_STRING_8
def __init__(self, header, value, mask=None):
super(MFIPDSCP, self).__init__(header, MFIPDSCP.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFIPDSCP.pack_str)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_DSCP_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_TUN_ID,
ofproto_v1_0.NXM_NX_TUN_ID_W])
class MFTunId(MFField):
pack_str = MF_PACK_STRING_BE64
def __init__(self, header, value, mask=None):
super(MFTunId, self).__init__(header, MFTunId.pack_str)
self.value = value
@classmethod
def make(cls, header):
return cls(header, MFTunId.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tun_id, rule.wc.tun_id_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_SRC, ofproto_v1_0.NXM_OF_IP_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_SRC,
ofproto_v1_0.NXM_OF_IP_SRC_W])
class MFIPSrc(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPSrc, self).__init__(header, MFIPSrc.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPSrc.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_src, rule.wc.nw_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_DST, ofproto_v1_0.NXM_OF_IP_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_OF_IP_DST,
ofproto_v1_0.NXM_OF_IP_DST_W])
class MFIPDst(MFField):
pack_str = MF_PACK_STRING_BE32
def __init__(self, header, value, mask=None):
super(MFIPDst, self).__init__(header, MFIPDst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, MFIPDst.pack_str)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.nw_dst, rule.wc.nw_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_ECN])
class MFIPECN(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset,
rule.flow.nw_tos & IP_ECN_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_TTL])
class MFIPTTL(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_ttl)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_IP_PROTO])
class MFIPProto(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.nw_proto)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_SRC, ofproto_v1_0.NXM_OF_TCP_SRC_W,
ofproto_v1_0.NXM_OF_UDP_SRC, ofproto_v1_0.NXM_OF_UDP_SRC_W])
class MFTPSRC(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_src, rule.wc.tp_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_TCP_DST, ofproto_v1_0.NXM_OF_TCP_DST_W,
ofproto_v1_0.NXM_OF_UDP_DST, ofproto_v1_0.NXM_OF_UDP_DST_W])
class MFTPDST(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE16)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.tp_dst, rule.wc.tp_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_SPA, ofproto_v1_0.NXM_OF_ARP_SPA_W])
class MFArpSpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_spa, rule.wc.arp_spa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ARP_TPA, ofproto_v1_0.NXM_OF_ARP_TPA_W])
class MFArpTpa(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.arp_tpa, rule.wc.arp_tpa_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_SHA])
class MFArpSha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_sha)
class MFIPV6(object):
pack_str = MF_PACK_STRING_IPV6
@classmethod
def field_parser(cls, header, buf, offset):
hasmask = (header >> 8) & 1
if hasmask:
pack_string = '!' + cls.pack_str[1:] * 2
value = struct.unpack_from(pack_string, buf, offset + 4)
return cls(header, list(value[:8]), list(value[8:]))
else:
value = struct.unpack_from(cls.pack_str, buf, offset + 4)
return cls(header, list(value))
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_SRC,
ofproto_v1_0.NXM_NX_IPV6_SRC_W])
class MFIPV6Src(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Src, self).__init__(header, MFIPV6Src.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_src,
rule.wc.ipv6_src_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
@MFField.register_field_header([ofproto_v1_0.NXM_NX_IPV6_DST,
ofproto_v1_0.NXM_NX_IPV6_DST_W])
class MFIPV6Dst(MFIPV6, MFField):
def __init__(self, header, value, mask=None):
super(MFIPV6Dst, self).__init__(header, MFIPV6Dst.pack_str)
self.value = value
self.mask = mask
@classmethod
def make(cls, header):
return cls(header, cls.pack_str)
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.ipv6_dst,
rule.wc.ipv6_dst_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ND_TARGET,
ofproto_v1_0.NXM_NX_ND_TARGET_W])
class MFNdTarget(MFField):
@classmethod
def make(cls, header):
return cls(header, '!4I')
def put(self, buf, offset, rule):
return self.putv6(buf, offset,
rule.flow.nd_target,
rule.wc.nd_target_mask)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IP_FRAG,
ofproto_v1_0.NXM_NX_IP_FRAG_W])
class MFIpFrag(MFField):
@classmethod
def make(cls, header):
return cls(header, '!B')
def put(self, buf, offset, rule):
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
return self._put(buf, offset, rule.flow.nw_frag)
else:
return self.putw(buf, offset, rule.flow.nw_frag,
rule.wc.nw_frag_mask & FLOW_NW_FRAG_MASK)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ARP_THA])
class MFArpTha(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_MAC)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.arp_tha)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_TYPE])
class MFICMPType(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_OF_ICMP_CODE])
class MFICMPCode(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_TYPE])
class MFICMPV6Type(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_src)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_ICMPV6_CODE])
class MFICMPV6Code(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_8)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.tp_dst)
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_IPV6_LABEL])
class MFICMPV6Label(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self._put(buf, offset, rule.flow.ipv6_label)
@_register_make
@_set_nxm_headers([ofproto_v1_0.nxm_nx_reg(i) for i in range(FLOW_N_REGS)]
+ [ofproto_v1_0.nxm_nx_reg_w(i) for i in range(FLOW_N_REGS)])
class MFRegister(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
for i in range(FLOW_N_REGS):
if (ofproto_v1_0.nxm_nx_reg(i) == self.nxm_header or
ofproto_v1_0.nxm_nx_reg_w(i) == self.nxm_header):
if rule.wc.regs_mask[i]:
return self.putm(buf, offset, rule.flow.regs[i],
rule.wc.regs_mask[i])
else:
return self._put(buf, offset, rule.flow.regs[i])
@_register_make
@_set_nxm_headers([ofproto_v1_0.NXM_NX_PKT_MARK,
ofproto_v1_0.NXM_NX_PKT_MARK_W])
class MFPktMark(MFField):
@classmethod
def make(cls, header):
return cls(header, MF_PACK_STRING_BE32)
def put(self, buf, offset, rule):
return self.putm(buf, offset, rule.flow.pkt_mark,
rule.wc.pkt_mark_mask)
def serialize_nxm_match(rule, buf, offset):
old_offset = offset
if not rule.wc.wildcards & FWW_IN_PORT:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IN_PORT, rule)
# Ethernet.
if rule.flow.dl_dst != mac.DONTCARE:
if rule.wc.dl_dst_mask:
header = ofproto_v1_0.NXM_OF_ETH_DST_W
else:
header = ofproto_v1_0.NXM_OF_ETH_DST
offset += nxm_put(buf, offset, header, rule)
if rule.flow.dl_src != mac.DONTCARE:
if rule.wc.dl_src_mask:
header = ofproto_v1_0.NXM_OF_ETH_SRC_W
else:
header = ofproto_v1_0.NXM_OF_ETH_SRC
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_DL_TYPE:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ETH_TYPE, rule)
# 802.1Q
if rule.wc.vlan_tci_mask != 0:
if rule.wc.vlan_tci_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_VLAN_TCI
else:
header = ofproto_v1_0.NXM_OF_VLAN_TCI_W
offset += nxm_put(buf, offset, header, rule)
# L3
if not rule.wc.wildcards & FWW_NW_DSCP:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_TOS, rule)
if not rule.wc.wildcards & FWW_NW_ECN:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_ECN, rule)
if not rule.wc.wildcards & FWW_NW_TTL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IP_TTL, rule)
if not rule.wc.wildcards & FWW_NW_PROTO:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_IP_PROTO, rule)
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMP):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_TYPE, rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_OF_ICMP_CODE, rule)
if rule.flow.tp_src != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_SRC
else:
header = ofproto_v1_0.NXM_OF_TCP_SRC_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_src_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_SRC
else:
header = ofproto_v1_0.NXM_OF_UDP_SRC_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
if rule.flow.tp_dst != 0:
if rule.flow.nw_proto == 6:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_TCP_DST
else:
header = ofproto_v1_0.NXM_OF_TCP_DST_W
elif rule.flow.nw_proto == 17:
if rule.wc.tp_dst_mask == UINT16_MAX:
header = ofproto_v1_0.NXM_OF_UDP_DST
else:
header = ofproto_v1_0.NXM_OF_UDP_DST_W
else:
header = 0
if header != 0:
offset += nxm_put(buf, offset, header, rule)
# IP Source and Destination
if rule.flow.nw_src != 0:
if rule.wc.nw_src_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_SRC
else:
header = ofproto_v1_0.NXM_OF_IP_SRC_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.nw_dst != 0:
if rule.wc.nw_dst_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_IP_DST
else:
header = ofproto_v1_0.NXM_OF_IP_DST_W
offset += nxm_put(buf, offset, header, rule)
# IPv6
if not rule.wc.wildcards & FWW_NW_PROTO and (rule.flow.nw_proto
== inet.IPPROTO_ICMPV6):
if rule.wc.tp_src_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_TYPE,
rule)
if rule.wc.tp_dst_mask != 0:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ICMPV6_CODE,
rule)
if not rule.wc.wildcards & FWW_IPV6_LABEL:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_IPV6_LABEL, rule)
if len(rule.flow.ipv6_src):
if len(rule.wc.ipv6_src_mask):
header = ofproto_v1_0.NXM_NX_IPV6_SRC_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_SRC
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.ipv6_dst):
if len(rule.wc.ipv6_dst_mask):
header = ofproto_v1_0.NXM_NX_IPV6_DST_W
else:
header = ofproto_v1_0.NXM_NX_IPV6_DST
offset += nxm_put(buf, offset, header, rule)
if len(rule.flow.nd_target):
if len(rule.wc.nd_target_mask):
header = ofproto_v1_0.NXM_NX_ND_TARGET_W
else:
header = ofproto_v1_0.NXM_NX_ND_TARGET
offset += nxm_put(buf, offset, header, rule)
# ARP
if rule.flow.arp_spa != 0:
if rule.wc.arp_spa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_SPA
else:
header = ofproto_v1_0.NXM_OF_ARP_SPA_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.arp_tpa != 0:
if rule.wc.arp_tpa_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_OF_ARP_TPA
else:
header = ofproto_v1_0.NXM_OF_ARP_TPA_W
offset += nxm_put(buf, offset, header, rule)
if not rule.wc.wildcards & FWW_ARP_SHA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_SHA, rule)
if not rule.wc.wildcards & FWW_ARP_THA:
offset += nxm_put(buf, offset, ofproto_v1_0.NXM_NX_ARP_THA, rule)
if rule.flow.nw_frag:
if rule.wc.nw_frag_mask == FLOW_NW_FRAG_MASK:
header = ofproto_v1_0.NXM_NX_IP_FRAG
else:
header = ofproto_v1_0.NXM_NX_IP_FRAG_W
offset += nxm_put(buf, offset, header, rule)
if rule.flow.pkt_mark != 0:
if rule.wc.pkt_mark_mask == UINT32_MAX:
header = ofproto_v1_0.NXM_NX_PKT_MARK
else:
header = ofproto_v1_0.NXM_NX_PKT_MARK_W
offset += nxm_put(buf, offset, header, rule)
# Tunnel Id
if rule.wc.tun_id_mask != 0:
if rule.wc.tun_id_mask == UINT64_MAX:
header = ofproto_v1_0.NXM_NX_TUN_ID
else:
header = ofproto_v1_0.NXM_NX_TUN_ID_W
offset += nxm_put(buf, offset, header, rule)
# XXX: Cookie
for i in range(FLOW_N_REGS):
if rule.wc.regs_bits & (1 << i):
if rule.wc.regs_mask[i]:
header = ofproto_v1_0.nxm_nx_reg_w(i)
else:
header = ofproto_v1_0.nxm_nx_reg(i)
offset += nxm_put(buf, offset, header, rule)
# Pad
pad_len = round_up(offset) - offset
msg_pack_into("%dx" % pad_len, buf, offset)
# The returned length, the match_len, does not include the pad
return offset - old_offset
def nxm_put(buf, offset, header, rule):
nxm = NXMatch(header)
len_ = nxm.put_header(buf, offset)
mf = mf_from_nxm_header(nxm.header)
return len_ + mf.put(buf, offset + len_, rule)
def round_up(length):
return (length + 7) // 8 * 8 # Round up to a multiple of 8
class NXMatch(object):
def __init__(self, header):
self.header = header
@classmethod
def parser(cls, buf, offset, match_len):
if match_len < 4:
raise exception.OFPMalformedMessage
(header,) = struct.unpack_from(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset)
instance = cls(header)
payload_len = instance.length()
if payload_len == 0 or match_len < payload_len + 4:
raise exception.OFPMalformedMessage
return instance
def vendor(self):
return self.header >> 16
def field(self):
return (self.header >> 9) % 0x7f
def type(self):
return (self.header >> 9) % 0x7fffff
def hasmask(self):
return (self.header >> 8) & 1
def length(self):
return self.header & 0xff
def show(self):
return ('%08x (vendor=%x, field=%x, hasmask=%x len=%x)' %
(self.header, self.vendor(), self.field(),
self.hasmask(), self.length()))
def put_header(self, buf, offset):
msg_pack_into(ofproto_v1_0.NXM_HEADER_PACK_STRING,
buf, offset, self.header)
return struct.calcsize(ofproto_v1_0.NXM_HEADER_PACK_STRING)
|
from functools import wraps
import os
from docker.utils.ports import split_port
import json
from jsonschema import Draft4Validator, FormatChecker, ValidationError
from .errors import ConfigurationError
DOCKER_CONFIG_HINTS = {
'cpu_share': 'cpu_shares',
'add_host': 'extra_hosts',
'hosts': 'extra_hosts',
'extra_host': 'extra_hosts',
'device': 'devices',
'link': 'links',
'memory_swap': 'memswap_limit',
'port': 'ports',
'privilege': 'privileged',
'priviliged': 'privileged',
'privilige': 'privileged',
'volume': 'volumes',
'workdir': 'working_dir',
}
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
@FormatChecker.cls_checks(format="ports", raises=ValidationError("Invalid port formatting, it should be '[[remote_ip:]remote_port:]port[/protocol]'"))
def format_ports(instance):
try:
split_port(instance)
except ValueError:
return False
return True
def validate_service_names(func):
@wraps(func)
def func_wrapper(config):
for service_name in config.keys():
if type(service_name) is int:
raise ConfigurationError(
"Service name: {} needs to be a string, eg '{}'".format(service_name, service_name)
)
return func(config)
return func_wrapper
def validate_top_level_object(func):
@wraps(func)
def func_wrapper(config):
if not isinstance(config, dict):
raise ConfigurationError(
"Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level."
)
return func(config)
return func_wrapper
def get_unsupported_config_msg(service_name, error_key):
msg = "Unsupported config option for '{}' service: '{}'".format(service_name, error_key)
if error_key in DOCKER_CONFIG_HINTS:
msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
return msg
def process_errors(errors):
"""
jsonschema gives us an error tree full of information to explain what has
gone wrong. Process each error and pull out relevant information and re-write
helpful error messages that are relevant.
"""
def _parse_key_from_error_msg(error):
return error.message.split("'")[1]
def _clean_error_message(message):
return message.replace("u'", "'")
def _parse_valid_types_from_schema(schema):
"""
Our defined types using $ref in the schema require some extra parsing
retrieve a helpful type for error message display.
"""
if '$ref' in schema:
return schema['$ref'].replace("#/definitions/", "").replace("_", " ")
else:
return str(schema['type'])
root_msgs = []
invalid_keys = []
required = []
type_errors = []
other_errors = []
for error in errors:
# handle root level errors
if len(error.path) == 0:
if error.validator == 'type':
msg = "Top level object needs to be a dictionary. Check your .yml file that you have defined a service at the top level."
root_msgs.append(msg)
elif error.validator == 'additionalProperties':
invalid_service_name = _parse_key_from_error_msg(error)
msg = "Invalid service name '{}' - only {} characters are allowed".format(invalid_service_name, VALID_NAME_CHARS)
root_msgs.append(msg)
else:
root_msgs.append(_clean_error_message(error.message))
else:
# handle service level errors
service_name = error.path[0]
# pop the service name off our path
error.path.popleft()
if error.validator == 'additionalProperties':
invalid_config_key = _parse_key_from_error_msg(error)
invalid_keys.append(get_unsupported_config_msg(service_name, invalid_config_key))
elif error.validator == 'anyOf':
if 'image' in error.instance and 'build' in error.instance:
required.append("Service '{}' has both an image and build path specified. A service can either be built to image or use an existing image, not both.".format(service_name))
elif 'image' not in error.instance and 'build' not in error.instance:
required.append("Service '{}' has neither an image nor a build path specified. Exactly one must be provided.".format(service_name))
else:
required.append(_clean_error_message(error.message))
elif error.validator == 'oneOf':
config_key = error.path[0]
valid_types = [_parse_valid_types_from_schema(schema) for schema in error.schema['oneOf']]
valid_type_msg = " or ".join(valid_types)
type_errors.append("Service '{}' configuration key '{}' contains an invalid type, valid types are {}".format(
service_name, config_key, valid_type_msg)
)
elif error.validator == 'type':
msg = "a"
if error.validator_value == "array":
msg = "an"
if len(error.path) > 0:
config_key = " ".join(["'%s'" % k for k in error.path])
type_errors.append("Service '{}' configuration key {} contains an invalid type, it should be {} {}".format(service_name, config_key, msg, error.validator_value))
else:
root_msgs.append("Service '{}' doesn\'t have any configuration options. All top level keys in your docker-compose.yml must map to a dictionary of configuration options.'".format(service_name))
elif error.validator == 'required':
config_key = error.path[0]
required.append("Service '{}' option '{}' is invalid, {}".format(service_name, config_key, _clean_error_message(error.message)))
elif error.validator == 'dependencies':
dependency_key = error.validator_value.keys()[0]
required_keys = ",".join(error.validator_value[dependency_key])
required.append("Invalid '{}' configuration for '{}' service: when defining '{}' you must set '{}' as well".format(
dependency_key, service_name, dependency_key, required_keys))
else:
config_key = " ".join(["'%s'" % k for k in error.path])
err_msg = "Service '{}' configuration key {} value {}".format(service_name, config_key, error.message)
other_errors.append(err_msg)
return "\n".join(root_msgs + invalid_keys + required + type_errors + other_errors)
def validate_against_schema(config):
config_source_dir = os.path.dirname(os.path.abspath(__file__))
schema_file = os.path.join(config_source_dir, "schema.json")
with open(schema_file, "r") as schema_fh:
schema = json.load(schema_fh)
validation_output = Draft4Validator(schema, format_checker=FormatChecker(["ports"]))
errors = [error for error in sorted(validation_output.iter_errors(config), key=str)]
if errors:
error_msg = process_errors(errors)
raise ConfigurationError("Validation failed, reason(s):\n{}".format(error_msg))
|
from ryu.base import app_manager
from ryu.topology import event
def get_switch(app, dpid=None):
rep = app.send_request(event.EventSwitchRequest(dpid))
return rep.switches
def get_all_switch(app):
return get_switch(app)
def get_link(app, dpid=None):
rep = app.send_request(event.EventLinkRequest(dpid))
return rep.links
def get_all_link(app):
return get_link(app)
app_manager.require_app('ryu.topology.switches')
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetEntitiesWithRelationship(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetEntitiesWithRelationship Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetEntitiesWithRelationship, self).__init__(temboo_session, '/Library/LittleSis/Entity/GetEntitiesWithRelationship')
def new_input_set(self):
return GetEntitiesWithRelationshipInputSet()
def _make_result_set(self, result, path):
return GetEntitiesWithRelationshipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetEntitiesWithRelationshipChoreographyExecution(session, exec_id, path)
class GetEntitiesWithRelationshipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetEntitiesWithRelationship
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from LittleSis.org.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('APIKey', value)
def set_CategoryIDs(self, value):
"""
Set the value of the CategoryIDs input for this Choreo. ((optional, string) Comma delimited list of category IDs of the categories to which the resulting Entities should belong.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('CategoryIDs', value)
def set_Current(self, value):
"""
Set the value of the Current input for this Choreo. ((optional, integer) Set to 1 to limit the relationships returned to only past relationships. Set to 0 to limit relationships returned to only current relationships. Defaults to all.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('Current', value)
def set_EntityID(self, value):
"""
Set the value of the EntityID input for this Choreo. ((required, integer) The ID of the person or organization fro which a record is to be returned.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('EntityID', value)
def set_Number(self, value):
"""
Set the value of the Number input for this Choreo. ((optional, integer) Specifies what number of results to show. Used in conjunction with Page parameter, a Number of 20 and a Page of 6 will show results 100-120.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('Number', value)
def set_Order(self, value):
"""
Set the value of the Order input for this Choreo. ((optional, integer) Specifies what order the given entity must have in the relationship.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('Order', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) Specifies what page of results to show. Used in conjunction with Number parameter. A number of 20 and a Page of 6 will show results 100-120.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('Page', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Format of the response returned by LittleSis.org. Acceptable inputs: xml or json. Defaults to xml)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('ResponseFormat', value)
def set_SortBy(self, value):
"""
Set the value of the SortBy input for this Choreo. ((optional, string) Defaults to sorting by entity, which returns a list of relationships grouped by related entity. Specify another sort order for the results. Acceptable inputs: category or relationship.)
"""
super(GetEntitiesWithRelationshipInputSet, self)._set_input('SortBy', value)
class GetEntitiesWithRelationshipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetEntitiesWithRelationship Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LittleSis.org.)
"""
return self._output.get('Response', None)
class GetEntitiesWithRelationshipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetEntitiesWithRelationshipResultSet(response, path)
|
"""The tests for the Scene component."""
import io
import unittest
from homeassistant.setup import setup_component
from homeassistant.components import light, scene
from homeassistant.util import yaml
from tests.common import get_test_home_assistant
from tests.components.light import common as common_light
from tests.components.scene import common
class TestScene(unittest.TestCase):
"""Test the scene component."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
test_light = getattr(self.hass.components, 'test.light')
test_light.init()
assert setup_component(self.hass, light.DOMAIN, {
light.DOMAIN: {'platform': 'test'}
})
self.light_1, self.light_2 = test_light.DEVICES[0:2]
common_light.turn_off(
self.hass, [self.light_1.entity_id, self.light_2.entity_id])
self.hass.block_till_done()
assert not self.light_1.is_on
assert not self.light_2.is_on
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_config_yaml_alias_anchor(self):
"""Test the usage of YAML aliases and anchors.
The following test scene configuration is equivalent to:
scene:
- name: test
entities:
light_1: &light_1_state
state: 'on'
brightness: 100
light_2: *light_1_state
When encountering a YAML alias/anchor, the PyYAML parser will use a
reference to the original dictionary, instead of creating a copy, so
care needs to be taken to not modify the original.
"""
entity_state = {
'state': 'on',
'brightness': 100,
}
assert setup_component(self.hass, scene.DOMAIN, {
'scene': [{
'name': 'test',
'entities': {
self.light_1.entity_id: entity_state,
self.light_2.entity_id: entity_state,
}
}]
})
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_1.last_call('turn_on')[1].get('brightness')
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
def test_config_yaml_bool(self):
"""Test parsing of booleans in yaml config."""
config = (
'scene:\n'
' - name: test\n'
' entities:\n'
' {0}: on\n'
' {1}:\n'
' state: on\n'
' brightness: 100\n').format(
self.light_1.entity_id, self.light_2.entity_id)
with io.StringIO(config) as file:
doc = yaml.yaml.safe_load(file)
assert setup_component(self.hass, scene.DOMAIN, doc)
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
def test_activate_scene(self):
"""Test active scene."""
assert setup_component(self.hass, scene.DOMAIN, {
'scene': [{
'name': 'test',
'entities': {
self.light_1.entity_id: 'on',
self.light_2.entity_id: {
'state': 'on',
'brightness': 100,
}
}
}]
})
common.activate(self.hass, 'scene.test')
self.hass.block_till_done()
assert self.light_1.is_on
assert self.light_2.is_on
assert 100 == self.light_2.last_call('turn_on')[1].get('brightness')
|
import os
import sys
import logging
import crontab
from website import settings
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def app_prefix(path):
return os.path.join(settings.APP_PATH, path)
def ensure_item(cron, command):
items = list(cron.find_command(command))
return items[0] if items else cron.new(command)
def main(dry_run=True):
cron = crontab.CronTab(user=settings.CRON_USER)
analytics = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/analytics.sh')))
analytics.hour.on(2)
analytics.minute.on(0) # Daily 2:00 a.m.
digest = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/send_digest.sh')))
digest.hour.on(2)
digest.minute.on(0) # Daily 2:00 a.m.
box = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/refresh_box_tokens.sh')))
box.hour.on(2)
box.minute.on(0) # Daily 2:00 a.m.
retractions = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/retract_registrations.sh')))
retractions.hour.on(0)
retractions.minute.on(0) # Daily 12 a.m.
embargoes = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/embargo_registrations.sh')))
embargoes.hour.on(0)
embargoes.minute.on(0) # Daily 12 a.m.
registration_approvals = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/approve_registrations.sh')))
registration_approvals.hour.on(0)
registration_approvals.minute.on(0) # Daily 12 a.m.
files_audit = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/files_audit.sh')))
files_audit.dow.on(0)
files_audit.hour.on(2)
files_audit.minute.on(0) # Sunday 2:00 a.m.
glacier_inventory = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/glacier_inventory.sh')))
glacier_inventory.dow.on(0)
glacier_inventory.hour.on(0)
glacier_inventory.minute.on(0) # Sunday 12:00 a.m.
glacier_audit = ensure_item(cron, 'bash {}'.format(app_prefix('scripts/osfstorage/glacier_audit.sh')))
glacier_audit.dow.on(0)
glacier_audit.hour.on(6)
glacier_audit.minute.on(0) # Sunday 6:00 a.m.
logger.info('Updating crontab file:')
logger.info(cron.render())
if not dry_run:
cron.write_to_user(settings.CRON_USER)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
main(dry_run=dry_run)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.