gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
import mock
import datetime as dt
from nose.tools import * # noqa (PEP8 asserts)
import pytest
from osf_tests.factories import (
ProjectFactory,
UserFactory,
RegistrationFactory,
NodeFactory,
CollectionFactory,
)
from osf.models import NodeRelation
from tests.base import OsfTestCase, get_default_metaschema
from framework.auth import Auth
from website.project.views.node import _view_project, _serialize_node_search, _get_children, _get_readable_descendants
from website.views import serialize_node_summary
from website.profile import utils
from website import filters, settings
from website.util import permissions
pytestmark = pytest.mark.django_db
class TestUserSerializers(OsfTestCase):
def test_serialize_user(self):
master = UserFactory()
user = UserFactory()
master.merge_user(user)
d = utils.serialize_user(user)
assert_equal(d['id'], user._primary_key)
assert_equal(d['url'], user.url)
assert_equal(d.get('username', None), None)
assert_equal(d['fullname'], user.fullname)
assert_equal(d['registered'], user.is_registered)
assert_equal(d['absolute_url'], user.absolute_url)
assert_equal(d['date_registered'], user.date_registered.strftime('%Y-%m-%d'))
assert_equal(d['active'], user.is_active)
def test_serialize_user_merged(self):
master = UserFactory()
user = UserFactory()
master.merge_user(user)
d = utils.serialize_user(user, full=True)
assert_true(d['is_merged'])
assert_equal(d['merged_by']['url'], user.merged_by.url)
assert_equal(d['merged_by']['absolute_url'], user.merged_by.absolute_url)
def test_serialize_user_full(self):
user = UserFactory()
ProjectFactory(creator=user, is_public=False)
NodeFactory(creator=user)
ProjectFactory(creator=user, is_public=True)
CollectionFactory(creator=user)
d = utils.serialize_user(user, full=True, include_node_counts=True)
profile_image_url = filters.profile_image_url(settings.PROFILE_IMAGE_PROVIDER,
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_LARGE)
assert_equal(d['id'], user._primary_key)
assert_equal(d['url'], user.url)
assert_equal(d.get('username'), None)
assert_equal(d['fullname'], user.fullname)
assert_equal(d['registered'], user.is_registered)
assert_equal(d['profile_image_url'], profile_image_url)
assert_equal(d['absolute_url'], user.absolute_url)
assert_equal(d['date_registered'], user.date_registered.strftime('%Y-%m-%d'))
projects = [
node
for node in user.contributed
if node.category == 'project'
and not node.is_registration
and not node.is_deleted
]
public_projects = [p for p in projects if p.is_public]
assert_equal(d['number_projects'], len(projects))
assert_equal(d['number_public_projects'], len(public_projects))
class TestNodeSerializers(OsfTestCase):
# Regression test for #489
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/489
def test_serialize_node_summary_private_node_should_include_id_and_primary_boolean_reg_and_fork(self):
user = UserFactory()
# user cannot see this node
node = ProjectFactory(is_public=False)
result = serialize_node_summary(
node, auth=Auth(user),
primary=True,
)
# serialized result should have id and primary
assert_equal(result['id'], node._primary_key)
assert_true(result['primary'], True)
assert_equal(result['is_registration'], node.is_registration)
assert_equal(result['is_fork'], node.is_fork)
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/668
def test_serialize_node_summary_for_registration_uses_correct_date_format(self):
reg = RegistrationFactory()
res = serialize_node_summary(reg, auth=Auth(reg.creator))
assert_equal(res['registered_date'],
reg.registered_date.strftime('%Y-%m-%d %H:%M UTC'))
# https://github.com/CenterForOpenScience/openscienceframework.org/issues/858
def test_serialize_node_summary_private_registration_should_include_is_registration(self):
user = UserFactory()
# non-contributor cannot see private registration of public project
node = ProjectFactory(is_public=True)
reg = RegistrationFactory(project=node, user=node.creator)
res = serialize_node_summary(reg, auth=Auth(user))
# serialized result should have is_registration
assert_true(res['is_registration'])
# https://openscience.atlassian.net/browse/OSF-4618
def test_get_children_only_returns_child_nodes_with_admin_permissions(self):
user = UserFactory()
admin_project = ProjectFactory()
admin_project.add_contributor(user, auth=Auth(admin_project.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_project.save()
admin_component = NodeFactory(parent=admin_project)
admin_component.add_contributor(user, auth=Auth(admin_component.creator),
permissions=permissions.expand_permissions(permissions.ADMIN))
admin_component.save()
read_and_write = NodeFactory(parent=admin_project)
read_and_write.add_contributor(user, auth=Auth(read_and_write.creator),
permissions=permissions.expand_permissions(permissions.WRITE))
read_and_write.save()
read_only = NodeFactory(parent=admin_project)
read_only.add_contributor(user, auth=Auth(read_only.creator),
permissions=permissions.expand_permissions(permissions.READ))
read_only.save()
non_contributor = NodeFactory(parent=admin_project)
components = _get_children(admin_project, Auth(user))
assert_equal(len(components), 1)
def test_serialize_node_summary_private_fork_should_include_is_fork(self):
user = UserFactory()
# non-contributor cannot see private fork of public project
node = ProjectFactory(is_public=True)
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = serialize_node_summary(
fork, auth=Auth(user),
primary=True,
)
# serialized result should have is_fork
assert_true(res['is_fork'])
def test_serialize_node_summary_private_fork_private_project_should_include_is_fork(self):
# contributor on a private project
user = UserFactory()
node = ProjectFactory(is_public=False)
node.add_contributor(user)
# contributor cannot see private fork of this project
consolidated_auth = Auth(user=node.creator)
fork = node.fork_node(consolidated_auth)
res = serialize_node_summary(
fork, auth=Auth(user),
primary=True,
)
# serialized result should have is_fork
assert_false(res['can_view'])
assert_true(res['is_fork'])
def test_serialize_node_summary_child_exists(self):
user = UserFactory()
parent_node = ProjectFactory(creator=user)
linked_node = ProjectFactory(creator=user)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], False)
parent_node.add_node_link(linked_node, Auth(user), save=True)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], False)
child_component = NodeFactory(creator=user, parent=parent_node)
result = _view_project(parent_node, Auth(user))
assert_equal(result['node']['child_exists'], True)
def test_serialize_node_search_returns_only_visible_contributors(self):
node = NodeFactory()
non_visible_contributor = UserFactory()
node.add_contributor(non_visible_contributor, visible=False)
serialized_node = _serialize_node_search(node)
assert_equal(serialized_node['firstAuthor'], node.visible_contributors[0].family_name)
assert_equal(len(node.visible_contributors), 1)
assert_false(serialized_node['etal'])
class TestViewProject(OsfTestCase):
def setUp(self):
super(TestViewProject, self).setUp()
self.user = UserFactory()
self.node = ProjectFactory(creator=self.user)
def test_view_project_pending_registration_for_admin_contributor_does_contain_cancel_link(self):
pending_reg = RegistrationFactory(project=self.node, archive=True)
assert_true(pending_reg.is_pending_registration)
result = _view_project(pending_reg, Auth(self.user))
assert_not_equal(result['node']['disapproval_link'], '')
assert_in('/?token=', result['node']['disapproval_link'])
pending_reg.remove()
def test_view_project_pending_registration_for_write_contributor_does_not_contain_cancel_link(self):
write_user = UserFactory()
self.node.add_contributor(write_user, permissions=permissions.WRITE,
auth=Auth(self.user), save=True)
pending_reg = RegistrationFactory(project=self.node, archive=True)
assert_true(pending_reg.is_pending_registration)
result = _view_project(pending_reg, Auth(write_user))
assert_equal(result['node']['disapproval_link'], '')
pending_reg.remove()
def test_view_project_child_exists(self):
linked_node = ProjectFactory(creator=self.user)
result = _view_project(self.node, Auth(self.user))
assert_equal(result['node']['child_exists'], False)
self.node.add_node_link(linked_node, Auth(self.user), save=True)
result = _view_project(self.node, Auth(self.user))
assert_equal(result['node']['child_exists'], False)
child_component = NodeFactory(creator=self.user, parent=self.node)
result = _view_project(self.node, Auth(self.user))
assert_equal(result['node']['child_exists'], True)
class TestViewProjectEmbeds(OsfTestCase):
def setUp(self):
super(TestViewProjectEmbeds, self).setUp()
self.user = UserFactory()
self.project = ProjectFactory(creator=self.user)
def test_view_project_embed_forks_excludes_registrations(self):
project = ProjectFactory()
fork = project.fork_node(Auth(project.creator))
reg = RegistrationFactory(project=fork)
res = _view_project(project, auth=Auth(project.creator), embed_forks=True)
assert_in('forks', res['node'])
assert_equal(len(res['node']['forks']), 1)
assert_equal(res['node']['forks'][0]['id'], fork._id)
# Regression test for https://github.com/CenterForOpenScience/osf.io/issues/1478
@mock.patch('website.archiver.tasks.archive')
def test_view_project_embed_registrations_includes_contribution_count(self, mock_archive):
self.project.register_node(get_default_metaschema(), Auth(user=self.project.creator), '', None)
data = _view_project(node=self.project, auth=Auth(self.project.creator), embed_registrations=True)
assert_is_not_none(data['node']['registrations'][0]['nlogs'])
# Regression test
def test_view_project_embed_registrations_sorted_by_registered_date_descending(self):
# register a project several times, with various registered_dates
registrations = []
for days_ago in (21, 3, 2, 8, 13, 5, 1):
registration = RegistrationFactory(project=self.project)
reg_date = registration.registered_date - dt.timedelta(days_ago)
registration.registered_date = reg_date
registration.save()
registrations.append(registration)
registrations.sort(key=lambda r: r.registered_date, reverse=True)
expected = [r._id for r in registrations]
data = _view_project(node=self.project, auth=Auth(self.project.creator), embed_registrations=True)
actual = [n['id'] for n in data['node']['registrations']]
assert_equal(actual, expected)
def test_view_project_embed_descendants(self):
child = NodeFactory(parent=self.project, creator=self.user)
res = _view_project(self.project, auth=Auth(self.project.creator), embed_descendants=True)
assert_in('descendants', res['node'])
assert_equal(len(res['node']['descendants']), 1)
assert_equal(res['node']['descendants'][0]['id'], child._id)
class TestGetReadableDescendants(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = UserFactory()
def test__get_readable_descendants(self):
project = ProjectFactory(creator=self.user)
child = NodeFactory(parent=project, creator=self.user)
nodes, all_readable = _get_readable_descendants(auth=Auth(project.creator), node=project)
assert_equal(nodes[0]._id, child._id)
assert_true(all_readable)
def test__get_readable_descendants_includes_pointers(self):
project = ProjectFactory(creator=self.user)
pointed = ProjectFactory()
node_relation = project.add_pointer(pointed, auth=Auth(self.user))
project.save()
nodes, all_readable = _get_readable_descendants(auth=Auth(project.creator), node=project)
assert_equal(len(nodes), 1)
assert_equal(nodes[0].title, pointed.title)
assert_equal(nodes[0]._id, pointed._id)
assert_true(all_readable)
def test__get_readable_descendants_masked_by_permissions(self):
# Users should be able to see through components they do not have
# permissions to.
# Users should not be able to see through links to nodes they do not
# have permissions to.
#
# 1(AB)
# / | \
# * | \
# / | \
# 2(A) 4(B) 7(A)
# | | | \
# | | | \
# 3(AB) 5(B) 8(AB) 9(B)
# |
# |
# 6(A)
#
#
userA = UserFactory(fullname='User A')
userB = UserFactory(fullname='User B')
project1 = ProjectFactory(creator=self.user, title='One')
project1.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
project1.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component2 = ProjectFactory(creator=self.user, title='Two')
component2.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component3 = ProjectFactory(creator=self.user, title='Three')
component3.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component3.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component4 = ProjectFactory(creator=self.user, title='Four')
component4.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component5 = ProjectFactory(creator=self.user, title='Five')
component5.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component6 = ProjectFactory(creator=self.user, title='Six')
component6.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component7 = ProjectFactory(creator=self.user, title='Seven')
component7.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component8 = ProjectFactory(creator=self.user, title='Eight')
component8.add_contributor(userA, auth=Auth(self.user), permissions=['read'])
component8.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
component9 = ProjectFactory(creator=self.user, title='Nine')
component9.add_contributor(userB, auth=Auth(self.user), permissions=['read'])
project1.add_pointer(component2, Auth(self.user))
NodeRelation.objects.create(parent=project1, child=component4)
NodeRelation.objects.create(parent=project1, child=component7)
NodeRelation.objects.create(parent=component2, child=component3)
NodeRelation.objects.create(parent=component4, child=component5)
NodeRelation.objects.create(parent=component5, child=component6)
NodeRelation.objects.create(parent=component7, child=component8)
NodeRelation.objects.create(parent=component7, child=component9)
nodes, all_readable = _get_readable_descendants(auth=Auth(userA), node=project1)
assert_equal(len(nodes), 3)
assert_false(all_readable)
for node in nodes:
assert_in(node.title, ['Two', 'Six', 'Seven'])
nodes, all_readable = _get_readable_descendants(auth=Auth(userB), node=project1)
assert_equal(len(nodes), 3)
assert_false(all_readable)
for node in nodes:
assert_in(node.title, ['Four', 'Eight', 'Nine'])
class TestNodeLogSerializers(OsfTestCase):
def test_serialize_node_for_logs(self):
node = NodeFactory()
d = node.serialize()
assert_equal(d['id'], node._primary_key)
assert_equal(d['category'], node.category_display)
assert_equal(d['node_type'], node.project_or_component)
assert_equal(d['url'], node.url)
assert_equal(d['title'], node.title)
assert_equal(d['api_url'], node.api_url)
assert_equal(d['is_public'], node.is_public)
assert_equal(d['is_registration'], node.is_registration)
class TestAddContributorJson(OsfTestCase):
def setUp(self):
super(TestAddContributorJson, self).setUp()
self.user = UserFactory()
self.profile = self.user.profile_url
self.user_id = self.user._primary_key
self.fullname = self.user.fullname
self.username = self.user.username
self.jobs = [{
'institution': 'School of Lover Boys',
'department': 'Fancy Patter',
'title': 'Lover Boy',
'start': None,
'end': None,
}]
self.schools = [{
'degree': 'Vibing',
'institution': 'Queens University',
'department': '',
'location': '',
'start': None,
'end': None,
}]
def test_add_contributor_json(self):
# User with no employment or education info listed
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], None)
assert_equal(user_info['education'], None)
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_edu(self):
# Test user with only education information
self.user.schools = self.schools
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], None)
assert_equal(user_info['education'], self.user.schools[0]['institution'])
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_job(self):
# Test user with only employment information
self.user.jobs = self.jobs
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], self.user.jobs[0]['institution'])
assert_equal(user_info['education'], None)
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
def test_add_contributor_json_with_job_and_edu(self):
# User with both employment and education information
self.user.jobs = self.jobs
self.user.schools = self.schools
user_info = utils.add_contributor_json(self.user)
assert_equal(user_info['fullname'], self.fullname)
assert_equal(user_info['email'], self.username)
assert_equal(user_info['id'], self.user_id)
assert_equal(user_info['employment'], self.user.jobs[0]['institution'])
assert_equal(user_info['education'], self.user.schools[0]['institution'])
assert_equal(user_info['n_projects_in_common'], 0)
assert_equal(user_info['registered'], True)
assert_equal(user_info['active'], True)
assert_in('secure.gravatar.com', user_info['profile_image_url'])
assert_equal(user_info['profile_url'], self.profile)
| |
"""Support for statistics for sensor values."""
from collections import deque
import logging
import statistics
import voluptuous as vol
from homeassistant.components.recorder.models import States
from homeassistant.components.recorder.util import execute, session_scope
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.event import (
async_track_point_in_utc_time,
async_track_state_change_event,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.util import dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
ATTR_AVERAGE_CHANGE = "average_change"
ATTR_CHANGE = "change"
ATTR_CHANGE_RATE = "change_rate"
ATTR_COUNT = "count"
ATTR_MAX_AGE = "max_age"
ATTR_MAX_VALUE = "max_value"
ATTR_MEAN = "mean"
ATTR_MEDIAN = "median"
ATTR_MIN_AGE = "min_age"
ATTR_MIN_VALUE = "min_value"
ATTR_QUANTILES = "quantiles"
ATTR_SAMPLING_SIZE = "sampling_size"
ATTR_STANDARD_DEVIATION = "standard_deviation"
ATTR_TOTAL = "total"
ATTR_VARIANCE = "variance"
CONF_SAMPLING_SIZE = "sampling_size"
CONF_MAX_AGE = "max_age"
CONF_PRECISION = "precision"
CONF_QUANTILE_INTERVALS = "quantile_intervals"
CONF_QUANTILE_METHOD = "quantile_method"
DEFAULT_NAME = "Stats"
DEFAULT_SIZE = 20
DEFAULT_PRECISION = 2
DEFAULT_QUANTILE_INTERVALS = 4
DEFAULT_QUANTILE_METHOD = "exclusive"
ICON = "mdi:calculator"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SAMPLING_SIZE, default=DEFAULT_SIZE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_MAX_AGE): cv.time_period,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
vol.Optional(
CONF_QUANTILE_INTERVALS, default=DEFAULT_QUANTILE_INTERVALS
): vol.All(vol.Coerce(int), vol.Range(min=2)),
vol.Optional(CONF_QUANTILE_METHOD, default=DEFAULT_QUANTILE_METHOD): vol.In(
["exclusive", "inclusive"]
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Statistics sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
entity_id = config.get(CONF_ENTITY_ID)
name = config.get(CONF_NAME)
sampling_size = config.get(CONF_SAMPLING_SIZE)
max_age = config.get(CONF_MAX_AGE)
precision = config.get(CONF_PRECISION)
quantile_intervals = config.get(CONF_QUANTILE_INTERVALS)
quantile_method = config.get(CONF_QUANTILE_METHOD)
async_add_entities(
[
StatisticsSensor(
entity_id,
name,
sampling_size,
max_age,
precision,
quantile_intervals,
quantile_method,
)
],
True,
)
return True
class StatisticsSensor(SensorEntity):
"""Representation of a Statistics sensor."""
def __init__(
self,
entity_id,
name,
sampling_size,
max_age,
precision,
quantile_intervals,
quantile_method,
):
"""Initialize the Statistics sensor."""
self._entity_id = entity_id
self.is_binary = self._entity_id.split(".")[0] == "binary_sensor"
self._name = name
self._sampling_size = sampling_size
self._max_age = max_age
self._precision = precision
self._quantile_intervals = quantile_intervals
self._quantile_method = quantile_method
self._unit_of_measurement = None
self.states = deque(maxlen=self._sampling_size)
self.ages = deque(maxlen=self._sampling_size)
self.count = 0
self.mean = self.median = self.quantiles = self.stdev = self.variance = None
self.total = self.min = self.max = None
self.min_age = self.max_age = None
self.change = self.average_change = self.change_rate = None
self._update_listener = None
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def async_stats_sensor_state_listener(event):
"""Handle the sensor state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
self._add_state_to_queue(new_state)
self.async_schedule_update_ha_state(True)
@callback
def async_stats_sensor_startup(_):
"""Add listener and get recorded state."""
_LOGGER.debug("Startup for %s", self.entity_id)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._entity_id], async_stats_sensor_state_listener
)
)
if "recorder" in self.hass.config.components:
# Only use the database if it's configured
self.hass.async_create_task(self._async_initialize_from_database())
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_stats_sensor_startup
)
def _add_state_to_queue(self, new_state):
"""Add the state to the queue."""
if new_state.state in (STATE_UNKNOWN, STATE_UNAVAILABLE):
return
try:
if self.is_binary:
self.states.append(new_state.state)
else:
self.states.append(float(new_state.state))
self.ages.append(new_state.last_updated)
except ValueError:
_LOGGER.error(
"%s: parsing error, expected number and received %s",
self.entity_id,
new_state.state,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self.mean if not self.is_binary else self.count
@property
def native_unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement if not self.is_binary else None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
if not self.is_binary:
return {
ATTR_SAMPLING_SIZE: self._sampling_size,
ATTR_COUNT: self.count,
ATTR_MEAN: self.mean,
ATTR_MEDIAN: self.median,
ATTR_QUANTILES: self.quantiles,
ATTR_STANDARD_DEVIATION: self.stdev,
ATTR_VARIANCE: self.variance,
ATTR_TOTAL: self.total,
ATTR_MIN_VALUE: self.min,
ATTR_MAX_VALUE: self.max,
ATTR_MIN_AGE: self.min_age,
ATTR_MAX_AGE: self.max_age,
ATTR_CHANGE: self.change,
ATTR_AVERAGE_CHANGE: self.average_change,
ATTR_CHANGE_RATE: self.change_rate,
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def _purge_old(self):
"""Remove states which are older than self._max_age."""
now = dt_util.utcnow()
_LOGGER.debug(
"%s: purging records older then %s(%s)",
self.entity_id,
dt_util.as_local(now - self._max_age),
self._max_age,
)
while self.ages and (now - self.ages[0]) > self._max_age:
_LOGGER.debug(
"%s: purging record with datetime %s(%s)",
self.entity_id,
dt_util.as_local(self.ages[0]),
(now - self.ages[0]),
)
self.ages.popleft()
self.states.popleft()
def _next_to_purge_timestamp(self):
"""Find the timestamp when the next purge would occur."""
if self.ages and self._max_age:
# Take the oldest entry from the ages list and add the configured max_age.
# If executed after purging old states, the result is the next timestamp
# in the future when the oldest state will expire.
return self.ages[0] + self._max_age
return None
async def async_update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("%s: updating statistics", self.entity_id)
if self._max_age is not None:
self._purge_old()
self.count = len(self.states)
if not self.is_binary:
try: # require only one data point
self.mean = round(statistics.mean(self.states), self._precision)
self.median = round(statistics.median(self.states), self._precision)
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.mean = self.median = STATE_UNKNOWN
try: # require at least two data points
self.stdev = round(statistics.stdev(self.states), self._precision)
self.variance = round(statistics.variance(self.states), self._precision)
if self._quantile_intervals < self.count:
self.quantiles = [
round(quantile, self._precision)
for quantile in statistics.quantiles(
self.states,
n=self._quantile_intervals,
method=self._quantile_method,
)
]
except statistics.StatisticsError as err:
_LOGGER.debug("%s: %s", self.entity_id, err)
self.stdev = self.variance = self.quantiles = STATE_UNKNOWN
if self.states:
self.total = round(sum(self.states), self._precision)
self.min = round(min(self.states), self._precision)
self.max = round(max(self.states), self._precision)
self.min_age = self.ages[0]
self.max_age = self.ages[-1]
self.change = self.states[-1] - self.states[0]
self.average_change = self.change
self.change_rate = 0
if len(self.states) > 1:
self.average_change /= len(self.states) - 1
time_diff = (self.max_age - self.min_age).total_seconds()
if time_diff > 0:
self.change_rate = self.change / time_diff
self.change = round(self.change, self._precision)
self.average_change = round(self.average_change, self._precision)
self.change_rate = round(self.change_rate, self._precision)
else:
self.total = self.min = self.max = STATE_UNKNOWN
self.min_age = self.max_age = dt_util.utcnow()
self.change = self.average_change = STATE_UNKNOWN
self.change_rate = STATE_UNKNOWN
# If max_age is set, ensure to update again after the defined interval.
next_to_purge_timestamp = self._next_to_purge_timestamp()
if next_to_purge_timestamp:
_LOGGER.debug(
"%s: scheduling update at %s", self.entity_id, next_to_purge_timestamp
)
if self._update_listener:
self._update_listener()
self._update_listener = None
@callback
def _scheduled_update(now):
"""Timer callback for sensor update."""
_LOGGER.debug("%s: executing scheduled update", self.entity_id)
self.async_schedule_update_ha_state(True)
self._update_listener = None
self._update_listener = async_track_point_in_utc_time(
self.hass, _scheduled_update, next_to_purge_timestamp
)
async def _async_initialize_from_database(self):
"""Initialize the list of states from the database.
The query will get the list of states in DESCENDING order so that we
can limit the result to self._sample_size. Afterwards reverse the
list so that we get it in the right order again.
If MaxAge is provided then query will restrict to entries younger then
current datetime - MaxAge.
"""
_LOGGER.debug("%s: initializing values from the database", self.entity_id)
with session_scope(hass=self.hass) as session:
query = session.query(States).filter(
States.entity_id == self._entity_id.lower()
)
if self._max_age is not None:
records_older_then = dt_util.utcnow() - self._max_age
_LOGGER.debug(
"%s: retrieve records not older then %s",
self.entity_id,
records_older_then,
)
query = query.filter(States.last_updated >= records_older_then)
else:
_LOGGER.debug("%s: retrieving all records", self.entity_id)
query = query.order_by(States.last_updated.desc()).limit(
self._sampling_size
)
states = execute(query, to_native=True, validate_entity_ids=False)
for state in reversed(states):
self._add_state_to_queue(state)
self.async_schedule_update_ha_state(True)
_LOGGER.debug("%s: initializing from database completed", self.entity_id)
| |
# coding=utf-8
# Copyright 2021 The Reach ML Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluates TF-Agents policies."""
import functools
import os
import shutil
from absl import app
from absl import flags
from absl import logging
import gin
# Need import to get env resgistration.
from ibc.environments.block_pushing import block_pushing # pylint: disable=unused-import
from ibc.environments.block_pushing import block_pushing_discontinuous
from ibc.environments.block_pushing import block_pushing_multimodal
from ibc.environments.collect.utils import get_oracle as get_oracle_module
from ibc.environments.particle import particle # pylint: disable=unused-import
from ibc.environments.particle import particle_oracles
from tf_agents.drivers import py_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import wrappers
from tf_agents.metrics import py_metrics
# Need import to get tensorflow_probability registration.
from tf_agents.policies import greedy_policy # pylint: disable=unused-import
from tf_agents.policies import py_tf_eager_policy
from tf_agents.policies import random_py_policy
from tf_agents.system import system_multiprocessing as multiprocessing
from tf_agents.utils import example_encoding_dataset
flags.DEFINE_multi_string('gin_file', None, 'Paths to the gin-config files.')
flags.DEFINE_multi_string('gin_bindings', None, 'Gin binding parameters.')
flags.DEFINE_integer('num_episodes', 5, 'Number of episodes to evaluate.')
flags.DEFINE_integer('history_length', None,
'If set the previous n observations are stacked.')
flags.DEFINE_bool('video', False,
'If true record a video of the evaluations.')
flags.DEFINE_bool('viz_img', False,
'If true records an img of evaluation trajectories.')
flags.DEFINE_string('output_path', '/tmp/ibc/policy_eval/',
'Path to save videos at.')
flags.DEFINE_enum(
'task', None,
['REACH', 'PUSH', 'INSERT', 'REACH_NORMALIZED', 'PUSH_NORMALIZED',
'PARTICLE', 'PUSH_DISCONTINUOUS', 'PUSH_MULTIMODAL'],
'Which task of the enum to evaluate.')
flags.DEFINE_bool('use_image_obs', False,
'Whether to include image observations.')
flags.DEFINE_bool('flatten_env', False,
'If True the environment observations are flattened.')
flags.DEFINE_bool('shared_memory', False,
'If True the connection to pybullet uses shared memory.')
flags.DEFINE_string('saved_model_path', None,
'Path to the saved_model policy to eval.')
flags.DEFINE_string('checkpoint_path', None,
'Path to the checkpoint to evaluate.')
flags.DEFINE_enum('policy', None, [
'random', 'oracle_reach', 'oracle_push', 'oracle_reach_normalized',
'oracle_push_normalized', 'particle_green_then_blue'
], 'Static policies to evaluate.')
flags.DEFINE_string(
'dataset_path', None,
'If set a dataset of the policy evaluation will be saved '
'to the given path.')
flags.DEFINE_integer('replicas', None,
'Number of parallel replicas generating evaluations.')
def evaluate(num_episodes,
task,
use_image_obs,
shared_memory,
flatten_env,
saved_model_path=None,
checkpoint_path=None,
static_policy=None,
dataset_path=None,
history_length=None,
video=False,
viz_img=False,
output_path=None):
"""Evaluates the given policy for n episodes."""
if task in ['REACH', 'PUSH', 'INSERT', 'REACH_NORMALIZED', 'PUSH_NORMALIZED']:
# Options are supported through flags to build_env_name, and different
# registered envs.
env_name = block_pushing.build_env_name(task, shared_memory, use_image_obs)
elif task in ['PUSH_DISCONTINUOUS']:
env_name = block_pushing_discontinuous.build_env_name(
task, shared_memory, use_image_obs)
elif task in ['PUSH_MULTIMODAL']:
env_name = block_pushing_multimodal.build_env_name(
task, shared_memory, use_image_obs)
elif task == 'PARTICLE':
# Options are supported through gin, registered env is the same.
env_name = 'Particle-v0'
assert not (shared_memory or use_image_obs) # Not supported.
else:
raise ValueError("I don't recognize this task to eval.")
if flatten_env:
env = suite_gym.load(
env_name, env_wrappers=[wrappers.FlattenObservationsWrapper])
else:
env = suite_gym.load(env_name)
if history_length:
env = wrappers.HistoryWrapper(
env, history_length=history_length, tile_first_step_obs=True)
if video:
video_path = output_path
if saved_model_path:
policy_name = os.path.basename(os.path.normpath(saved_model_path))
checkpoint_ref = checkpoint_path.split('_')[-1]
video_path = os.path.join(video_path,
policy_name + '_' + checkpoint_ref + 'vid.mp4')
if static_policy:
video_path = os.path.join(video_path, static_policy, 'vid.mp4')
if saved_model_path and static_policy:
raise ValueError(
'Only pass in either a `saved_model_path` or a `static_policy`.')
if saved_model_path:
if not checkpoint_path:
raise ValueError('Must provide a `checkpoint_path` with a saved_model.')
policy = py_tf_eager_policy.SavedModelPyTFEagerPolicy(
saved_model_path, load_specs_from_pbtxt=True)
policy.update_from_checkpoint(checkpoint_path)
else:
if static_policy == 'random':
policy = random_py_policy.RandomPyPolicy(env.time_step_spec(),
env.action_spec())
elif task == 'PARTICLE':
if static_policy == 'particle_green_then_blue':
# TODO(peteflorence): support more particle oracle options.
policy = particle_oracles.ParticleOracle(env)
else:
raise ValueError('Unknown policy for given task: %s: ' % static_policy)
elif task != 'PARTICLE':
# Get an oracle.
policy = get_oracle_module.get_oracle(env, flags.FLAGS.task)
else:
raise ValueError('Unknown policy: %s: ' % static_policy)
metrics = [
py_metrics.AverageReturnMetric(buffer_size=num_episodes),
py_metrics.AverageEpisodeLengthMetric(buffer_size=num_episodes),
]
env_metrics, success_metric = env.get_metrics(num_episodes)
metrics += env_metrics
observers = metrics[:]
if viz_img and ('Particle' in env_name):
visualization_dir = '/tmp/particle_oracle'
shutil.rmtree(visualization_dir, ignore_errors=True)
env.set_img_save_dir(visualization_dir)
observers += [env.save_image]
if dataset_path:
# TODO(oars, peteflorence): Consider a custom observer to filter only
# positive examples.
observers.append(
example_encoding_dataset.TFRecordObserver(
dataset_path,
policy.collect_data_spec,
py_mode=True,
compress_image=True))
driver = py_driver.PyDriver(env, policy, observers, max_episodes=num_episodes)
time_step = env.reset()
initial_policy_state = policy.get_initial_state(1)
driver.run(time_step, initial_policy_state)
log = ['{0} = {1}'.format(m.name, m.result()) for m in metrics]
logging.info('\n\t\t '.join(log))
env.close()
def main(_):
logging.set_verbosity(logging.INFO)
gin.add_config_file_search_path(os.getcwd())
gin.parse_config_files_and_bindings(flags.FLAGS.gin_file,
flags.FLAGS.gin_bindings)
if flags.FLAGS.replicas:
jobs = []
if not flags.FLAGS.dataset_path:
raise ValueError(
'A dataset_path must be provided when replicas are specified.')
dataset_split_path = os.path.splitext(flags.FLAGS.dataset_path)
context = multiprocessing.get_context()
for i in range(flags.FLAGS.replicas):
dataset_path = dataset_split_path[0] + '_%d' % i + dataset_split_path[1]
kwargs = dict(
num_episodes=flags.FLAGS.num_episodes,
task=flags.FLAGS.task,
use_image_obs=flags.FLAGS.use_image_obs,
shared_memory=flags.FLAGS.shared_memory,
flatten_env=flags.FLAGS.flatten_env,
saved_model_path=flags.FLAGS.saved_model_path,
checkpoint_path=flags.FLAGS.checkpoint_path,
static_policy=flags.FLAGS.policy,
dataset_path=dataset_path,
history_length=flags.FLAGS.history_length
)
job = context.Process(target=evaluate, kwargs=kwargs)
job.start()
jobs.append(job)
for job in jobs:
job.join()
else:
evaluate(
num_episodes=flags.FLAGS.num_episodes,
task=flags.FLAGS.task,
use_image_obs=flags.FLAGS.use_image_obs,
shared_memory=flags.FLAGS.shared_memory,
flatten_env=flags.FLAGS.flatten_env,
saved_model_path=flags.FLAGS.saved_model_path,
checkpoint_path=flags.FLAGS.checkpoint_path,
static_policy=flags.FLAGS.policy,
dataset_path=flags.FLAGS.dataset_path,
history_length=flags.FLAGS.history_length,
video=flags.FLAGS.video,
viz_img=flags.FLAGS.viz_img,
output_path=flags.FLAGS.output_path,
)
if __name__ == '__main__':
multiprocessing.handle_main(functools.partial(app.run, main))
| |
"""A Scheme interpreter and its read-eval-print loop."""
from scheme_primitives import *
from scheme_reader import *
from ucb import main, trace
##############
# Eval/Apply #
##############
def scheme_eval(expr, env, _=None): # Optional third argument is ignored
"""Evaluate Scheme expression EXPR in environment ENV.
>>> expr = read_line("(+ 2 2)")
>>> expr
Pair('+', Pair(2, Pair(2, nil)))
>>> scheme_eval(expr, create_global_frame())
4
"""
# Atoms
assert expr is not None
if scheme_symbolp(expr):
return env.lookup(expr)
elif self_evaluating(expr):
return expr
# Combinations
if not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
first, rest = expr.first, expr.second
if scheme_symbolp(first) and first in SPECIAL_FORMS:
result = SPECIAL_FORMS[first](rest, env)
else:
procedure = scheme_eval(first, env)
args = rest.map(lambda operand: scheme_eval(operand, env))
result = scheme_apply(procedure, args, env)
return result
def self_evaluating(expr):
"""Return whether EXPR evaluates to itself."""
return scheme_atomp(expr) or scheme_stringp(expr) or expr is okay
def scheme_apply(procedure, args, env):
"""Apply Scheme PROCEDURE to argument values ARGS in environment ENV."""
if isinstance(procedure, PrimitiveProcedure):
return apply_primitive(procedure, args, env)
elif isinstance(procedure, UserDefinedProcedure):
new_env = make_call_frame(procedure, args, env)
return eval_all(procedure.body, new_env)
else:
raise SchemeError("cannot call: {0}".format(str(procedure)))
def apply_primitive(procedure, args_scheme_list, env):
"""Apply PrimitiveProcedure PROCEDURE to ARGS_SCHEME_LIST in ENV.
>>> env = create_global_frame()
>>> plus = env.bindings["+"]
>>> twos = Pair(2, Pair(2, nil))
>>> apply_primitive(plus, twos, env)
4
"""
# Convert a Scheme list to a Python list
args = []
while args_scheme_list is not nil:
args.append(args_scheme_list.first)
args_scheme_list = args_scheme_list.second
# BEGIN Question 4
if procedure.use_env:
args.append(env)
try:
return procedure.fn(*args)
except TypeError:
raise SchemeError
# END Question 4
def eval_all(expressions, env):
"""Evaluate a Scheme list of EXPRESSIONS & return the value of the last."""
# BEGIN Question 7
"*** REPLACE THIS LINE ***"
if expressions == nil:
return okay
val = scheme_eval(expressions.first, env)
if expressions.second == nil:
return val
return eval_all(expressions.second, env)
# END Question 7
def make_call_frame(procedure, args, env):
"""Make a frame that binds the formal parameters of PROCEDURE to ARGS."""
# BEGIN Question 12
if isinstance(procedure, MuProcedure):
return env.make_child_frame(procedure.formals, args)
return procedure.env.make_child_frame(procedure.formals, args)
# END Question 12
################
# Environments #
################
class Frame:
"""An environment frame binds Scheme symbols to Scheme values."""
def __init__(self, parent):
"""An empty frame with a PARENT frame (which may be None)."""
self.bindings = {}
self.parent = parent
def __repr__(self):
if self.parent is None:
return "<Global Frame>"
else:
s = sorted('{0}: {1}'.format(k,v) for k,v in self.bindings.items())
return "<{{{0}}} -> {1}>".format(', '.join(s), repr(self.parent))
def lookup(self, symbol):
"""Return the value bound to SYMBOL. Errors if SYMBOL is not found."""
# BEGIN Question 3
frame = self
while frame is not None and symbol not in frame.bindings:
frame = frame.parent
if frame is not None:
return frame.bindings[symbol]
# END Question 3
raise SchemeError("unknown identifier: {0}".format(symbol))
def make_child_frame(self, formals, vals):
"""Return a new local frame whose parent is SELF, in which the symbols
in a Scheme list of formal parameters FORMALS are bound to the Scheme
values in the Scheme list VALS. Raise an error if too many or too few
vals are given.
>>> env = create_global_frame()
>>> formals, expressions = read_line("(a b c)"), read_line("(1 2 3)")
>>> env.make_child_frame(formals, expressions)
<{a: 1, b: 2, c: 3} -> <Global Frame>>
"""
child = Frame(self) # Create a new child with self as the parent
# BEGIN Question 10
if len(formals) != len(vals):
raise SchemeError("Formal parameter and Arguments sizes don't match")
if not (formals == nil and vals == nil):
for para, argu in zip(formals, vals):
child.bindings[para] = argu
# END Question 10
return child
def define(self, symbol, value):
"""Define Scheme SYMBOL to have VALUE."""
self.bindings[symbol] = value
class UserDefinedProcedure:
"""A procedure defined by an expression."""
class LambdaProcedure(UserDefinedProcedure):
"""A procedure defined by a lambda expression or a define form."""
def __init__(self, formals, body, env):
"""A procedure with formal parameter list FORMALS (a Scheme list),
a Scheme list of BODY expressions, and a parent environment that
starts with Frame ENV.
"""
self.formals = formals
self.body = body
self.env = env
def __str__(self):
return str(Pair("lambda", Pair(self.formals, self.body)))
def __repr__(self):
return "LambdaProcedure({!r}, {!r}, {!r})".format(
self.formals, self.body, self.env)
#################
# Special forms #
#################
def do_define_form(expressions, env):
"""Evaluate a define form."""
check_form(expressions, 2)
target = expressions.first
value_expr = expressions.second.first
if scheme_symbolp(target):
check_form(expressions, 2, 2)
# BEGIN Question 5A
env.define(target, scheme_eval(value_expr, env))
return target
# END Question 5A
elif isinstance(target, Pair) and scheme_symbolp(target.first):
# BEGIN Question 9A
formals, body = target.second, expressions.second
lambdaproc = LambdaProcedure(formals, body, env)
env.define(target.first, lambdaproc)
return target.first
# END Question 9A
else:
bad = target.first if isinstance(target, Pair) else target
raise SchemeError("Non-symbol: {}".format(bad))
def do_quote_form(expressions, env):
"""Evaluate a quote form."""
check_form(expressions, 1, 1)
# BEGIN Question 6B
value_expr = expressions.first
return value_expr
# END Question 6B
def do_begin_form(expressions, env):
"""Evaluate begin form."""
check_form(expressions, 1)
return eval_all(expressions, env)
def do_lambda_form(expressions, env):
"""Evaluate a lambda form."""
check_form(expressions, 2)
formals = expressions.first
check_formals(formals)
# BEGIN Question 8
body = expressions.second
return LambdaProcedure(formals, body, env)
# END Question 8
def do_if_form(expressions, env):
"""Evaluate an if form."""
check_form(expressions, 2, 3)
# BEGIN Question 13
predicate, tbranch = expressions.first, expressions.second.first
if scheme_true(scheme_eval(predicate, env)):
return scheme_eval(tbranch, env)
else:
if expressions.second.second == nil:
return okay
return scheme_eval(expressions.second.second.first, env)
# END Question 13
def do_and_form(expressions, env):
"""Evaluate a short-circuited and form."""
# BEGIN Question 14B
if expressions == nil:
return True
cur, subexpr = expressions.first, expressions.second
evaluation = scheme_eval(cur, env)
if scheme_true(evaluation):
if subexpr == nil:
return evaluation
else:
return do_and_form(subexpr, env)
else:
return False
# END Question 14B
def do_or_form(expressions, env):
"""Evaluate a short-circuited or form."""
# BEGIN Question 14B
if expressions == nil:
return False
cur, subexpr = expressions.first, expressions.second
evaluation = scheme_eval(cur, env)
if scheme_true(evaluation):
return evaluation
else:
return do_or_form(subexpr, env)
# END Question 14B
def do_cond_form(expressions, env):
"""Evaluate a cond form."""
num_clauses = len(expressions)
i = 0
while expressions is not nil:
clause = expressions.first
check_form(clause, 1)
if clause.first == "else":
if i < num_clauses-1:
raise SchemeError("else must be last")
test = True
else:
test = scheme_eval(clause.first, env)
# We don't need to declare test to use it in
# current environment!
if scheme_true(test):
# BEGIN Question 15A
if clause.second == nil:
return test
return eval_all(clause.second, env)
# END Question 15A
expressions = expressions.second
i += 1
return okay
def do_let_form(expressions, env):
"""Evaluate a let form."""
check_form(expressions, 2)
let_env = make_let_frame(expressions.first, env)
return eval_all(expressions.second, let_env)
def make_let_frame(bindings, env):
"""Create a frame containing bindings from a let expression."""
if not scheme_listp(bindings):
raise SchemeError("bad bindings list in let form")
# BEGIN Question 16
formals = Pair(nil, nil)
args = Pair(nil, nil)
f, a = formals, args
while bindings is not nil:
clause = bindings.first
check_form(clause, 2, 2)
if not scheme_symbolp(clause.first):
raise SchemeError("{0} is Not a symbol for let expression".format(clause.first))
f.second = Pair(clause.first, nil)
f = f.second
a.second = Pair(scheme_eval(clause.second.first, env), nil)
a = a.second
bindings = bindings.second
return env.make_child_frame(formals.second, args.second)
# END Question 16
SPECIAL_FORMS = {
"and": do_and_form,
"begin": do_begin_form,
"cond": do_cond_form,
"define": do_define_form,
"if": do_if_form,
"lambda": do_lambda_form,
"let": do_let_form,
"or": do_or_form,
"quote": do_quote_form,
}
# Utility methods for checking the structure of Scheme programs
def check_form(expr, min, max=float('inf')):
"""Check EXPR is a proper list whose length is at least MIN and no more
than MAX (default: no maximum). Raises a SchemeError if this is not the
case.
"""
if not scheme_listp(expr):
raise SchemeError("badly formed expression: " + str(expr))
length = len(expr)
if length < min:
raise SchemeError("too few operands in form")
elif length > max:
raise SchemeError("too many operands in form")
def check_formals(formals):
"""Check that FORMALS is a valid parameter list, a Scheme list of symbols
in which each symbol is distinct. Raise a SchemeError if the list of
formals is not a well-formed list of symbols or if any symbol is repeated.
>>> check_formals(read_line("(a b c)"))
"""
# BEGIN Question 11B
def check_formals_helper(formals, used):
if formals == nil:
return True
elif not (isinstance(formals.second, Pair) or formals.second == nil):
raise SchemeError("Not a well-formed list of parameters")
if not scheme_symbolp(formals.first):
raise SchemeError("{0} is Not a valid symbol in parameters".format(formals.first))
elif formals.first in used:
raise SchemeError("{0} is already in previous parameters list".format(formals.first))
used.add(formals.first)
return check_formals_helper(formals.second, used)
check_formals_helper(formals, set())
# END Question 11B
#################
# Dynamic Scope #
#################
class MuProcedure(UserDefinedProcedure):
"""A procedure defined by a mu expression, which has dynamic scope.
_________________
< Scheme is cool! >
-----------------
\ ^__^
\ (oo)\_______
(__)\ )\/\
||----w |
|| ||
"""
def __init__(self, formals, body):
"""A procedure with formal parameter list FORMALS (a Scheme list) and a
Scheme list of BODY expressions.
"""
self.formals = formals
self.body = body
def __str__(self):
return str(Pair("mu", Pair(self.formals, self.body)))
def __repr__(self):
return "MuProcedure({!r}, {!r})".format(self.formals, self.body)
def do_mu_form(expressions, env):
"""Evaluate a mu form."""
check_form(expressions, 2)
formals = expressions.first
body = expressions.second
check_formals(formals)
# BEGIN Question 17
return MuProcedure(formals, body)
# END Question 17
SPECIAL_FORMS["mu"] = do_mu_form
##################
# Tail Recursion #
##################
class Evaluate:
"""An expression EXPR to be evaluated in environment ENV."""
def __init__(self, expr, env):
self.expr = expr
self.env = env
def scheme_optimized_eval(expr, env, tail=False):
"""Evaluate Scheme expression EXPR in environment ENV."""
# Evaluate Atoms
assert expr is not None
if scheme_symbolp(expr):
return env.lookup(expr)
elif self_evaluating(expr):
return expr
if tail:
# BEGIN Extra Credit
"*** REPLACE THIS LINE ***"
# END Extra Credit
else:
result = Evaluate(expr, env)
while isinstance(result, Evaluate):
expr, env = result.expr, result.env
# All non-atomic expressions are lists (combinations)
if not scheme_listp(expr):
raise SchemeError("malformed list: {0}".format(str(expr)))
first, rest = expr.first, expr.second
if (scheme_symbolp(first) and first in SPECIAL_FORMS):
result = SPECIAL_FORMS[first](rest, env)
else:
procedure = scheme_eval(first, env)
args = rest.map(lambda operand: scheme_eval(operand, env))
result = scheme_apply(procedure, args, env)
return result
################################################################
# Uncomment the following line to apply tail call optimization #
################################################################
# scheme_eval = scheme_optimized_eval
################
# Input/Output #
################
def read_eval_print_loop(next_line, env, interactive=False, quiet=False,
startup=False, load_files=()):
"""Read and evaluate input until an end of file or keyboard interrupt."""
if startup:
for filename in load_files:
scheme_load(filename, True, env)
while True:
try:
src = next_line()
while src.more_on_line:
expression = scheme_read(src)
result = scheme_eval(expression, env)
if not quiet and result is not None:
print(result)
except (SchemeError, SyntaxError, ValueError, RuntimeError) as err:
if (isinstance(err, RuntimeError) and
'maximum recursion depth exceeded' not in getattr(err, 'args')[0]):
raise
elif isinstance(err, RuntimeError):
print("Error: maximum recursion depth exceeded")
else:
print("Error:", err)
except KeyboardInterrupt: # <Control>-C
if not startup:
raise
print()
print("KeyboardInterrupt")
if not interactive:
return
except EOFError: # <Control>-D, etc.
print()
return
def scheme_load(*args):
"""Load a Scheme source file. ARGS should be of the form (SYM, ENV) or (SYM,
QUIET, ENV). The file named SYM is loaded in environment ENV, with verbosity
determined by QUIET (default true)."""
if not (2 <= len(args) <= 3):
expressions = args[:-1]
raise SchemeError('"load" given incorrect number of arguments: '
'{0}'.format(len(expressions)))
sym = args[0]
quiet = args[1] if len(args) > 2 else True
env = args[-1]
if (scheme_stringp(sym)):
sym = eval(sym)
check_type(sym, scheme_symbolp, 0, "load")
with scheme_open(sym) as infile:
lines = infile.readlines()
args = (lines, None) if quiet else (lines,)
def next_line():
return buffer_lines(*args)
read_eval_print_loop(next_line, env, quiet=quiet)
return okay
def scheme_open(filename):
"""If either FILENAME or FILENAME.scm is the name of a valid file,
return a Python file opened to it. Otherwise, raise an error."""
try:
return open(filename)
except IOError as exc:
if filename.endswith('.scm'):
raise SchemeError(str(exc))
try:
return open(filename + '.scm')
except IOError as exc:
raise SchemeError(str(exc))
def create_global_frame():
"""Initialize and return a single-frame environment with built-in names."""
env = Frame(None)
env.define("eval", PrimitiveProcedure(scheme_eval, True))
env.define("apply", PrimitiveProcedure(scheme_apply, True))
env.define("load", PrimitiveProcedure(scheme_load, True))
add_primitives(env)
return env
@main
def run(*argv):
import argparse
parser = argparse.ArgumentParser(description='CS 61A Scheme interpreter')
parser.add_argument('-load', '-i', action='store_true',
help='run file interactively')
parser.add_argument('file', nargs='?',
type=argparse.FileType('r'), default=None,
help='Scheme file to run')
args = parser.parse_args()
next_line = buffer_input
interactive = True
load_files = []
if args.file is not None:
if args.load:
load_files.append(getattr(args.file, 'name'))
else:
lines = args.file.readlines()
def next_line():
return buffer_lines(lines)
interactive = False
read_eval_print_loop(next_line, create_global_frame(), startup=True,
interactive=interactive, load_files=load_files)
tscheme_exitonclick()
| |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import Gaffer
import GafferTest
class CompoundDataPlugTest( GafferTest.TestCase ) :
def test( self ) :
p = Gaffer.CompoundDataPlug()
m1 = p.addMember( "a", IECore.IntData( 10 ) )
self.failUnless( isinstance( m1, Gaffer.CompoundPlug ) )
self.assertEqual( m1.getName(), "member1" )
self.assertEqual( m1["name"].getValue(), "a" )
self.assertEqual( m1["value"].getValue(), 10 )
self.failIf( "enabled" in m1 )
d, n = p.memberDataAndName( m1 )
self.assertEqual( d, IECore.IntData( 10 ) )
self.assertEqual( n, "a" )
m1["name"].setValue( "b" )
d, n = p.memberDataAndName( m1 )
self.assertEqual( d, IECore.IntData( 10 ) )
self.assertEqual( n, "b" )
m2 = p.addMember( "c", IECore.FloatData( .5 ) )
self.failUnless( isinstance( m2, Gaffer.CompoundPlug ) )
self.assertEqual( m2.getName(), "member2" )
self.assertEqual( m2["name"].getValue(), "c" )
self.assertEqual( m2["value"].getValue(), .5 )
self.failIf( "enabled" in m2 )
d, n = p.memberDataAndName( m2 )
self.assertEqual( d, IECore.FloatData( .5 ) )
self.assertEqual( n, "c" )
m3 = p.addOptionalMember( "o", IECore.StringData( "--" ), plugName = "m", enabled = True )
self.failUnless( isinstance( m3, Gaffer.CompoundPlug ) )
self.assertEqual( m3.getName(), "m" )
self.assertEqual( m3["name"].getValue(), "o" )
self.assertEqual( m3["value"].getValue(), "--" )
self.failUnless( "enabled" in m3 )
self.assertEqual( m3["enabled"].getValue(), True )
d, n = p.memberDataAndName( m3 )
self.assertEqual( d, IECore.StringData( "--" ) )
self.assertEqual( n, "o" )
m3["enabled"].setValue( False )
d, n = p.memberDataAndName( m3 )
self.assertEqual( d, None )
self.assertEqual( n, "" )
def testVectorData( self ) :
p = Gaffer.CompoundDataPlug()
m1 = p.addMember( "a", IECore.FloatVectorData( [ 1, 2, 3 ] ) )
self.failUnless( isinstance( m1, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m1 )
self.assertEqual( d, IECore.FloatVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( n, "a" )
m2 = p.addMember( "b", IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.failUnless( isinstance( m2, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m2 )
self.assertEqual( d, IECore.IntVectorData( [ 1, 2, 3 ] ) )
self.assertEqual( n, "b" )
m3 = p.addMember( "c", IECore.StringVectorData( [ "1", "2", "3" ] ) )
self.failUnless( isinstance( m3, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m3 )
self.assertEqual( d, IECore.StringVectorData( [ "1", "2", "3" ] ) )
self.assertEqual( n, "c" )
m4 = p.addMember( "d", IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 5 ) ] ) )
self.failUnless( isinstance( m4, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m4 )
self.assertEqual( d, IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 5 ) ] ) )
self.assertEqual( n, "d" )
m5 = p.addMember( "e", IECore.Color3fVectorData( [ IECore.Color3f( x ) for x in range( 1, 5 ) ] ) )
self.failUnless( isinstance( m5, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m5 )
self.assertEqual( d, IECore.Color3fVectorData( [ IECore.Color3f( x ) for x in range( 1, 5 ) ] ) )
self.assertEqual( n, "e" )
def testImathVectorData( self ) :
p = Gaffer.CompoundDataPlug()
m1 = p.addMember( "a", IECore.V3fData( IECore.V3f( 1, 2, 3 ) ) )
self.failUnless( isinstance( m1, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m1 )
self.assertEqual( d, IECore.V3fData( IECore.V3f( 1, 2, 3 ) ) )
self.assertEqual( n, "a" )
m2 = p.addMember( "b", IECore.V2fData( IECore.V2f( 1, 2 ) ) )
self.failUnless( isinstance( m2, Gaffer.CompoundPlug ) )
d, n = p.memberDataAndName( m2 )
self.assertEqual( d, IECore.V2fData( IECore.V2f( 1, 2 ) ) )
self.assertEqual( n, "b" )
def testPlugFlags( self ) :
p = Gaffer.CompoundDataPlug()
m1 = p.addMember( "a", IECore.V3fData( IECore.V3f( 1, 2, 3 ) ), plugFlags = Gaffer.Plug.Flags.Default )
self.assertEqual( m1.getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( m1["name"].getFlags(), Gaffer.Plug.Flags.Default)
self.assertEqual( m1["value"].getFlags(), Gaffer.Plug.Flags.Default )
m2 = p.addOptionalMember( "a", IECore.V3fData( IECore.V3f( 1, 2, 3 ) ), plugFlags = Gaffer.Plug.Flags.Default )
self.assertEqual( m2.getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( m2["name"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( m2["value"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( m2["enabled"].getFlags(), Gaffer.Plug.Flags.Default )
def testCreateCounterpart( self ) :
p1 = Gaffer.CompoundDataPlug()
m1 = p1.addMember( "a", IECore.V3fData( IECore.V3f( 1, 2, 3 ) ), plugFlags = Gaffer.Plug.Flags.Default )
p2 = p1.createCounterpart( "c", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.typeName(), p1.typeName() )
self.assertEqual( p2.getName(), "c" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( len( p2 ), len( p1 ) )
self.assertEqual( p2.getFlags(), p1.getFlags() )
m2 = p2["member1"]
self.assertEqual( m2.typeName(), m1.typeName() )
self.assertEqual( m2.getFlags(), m1.getFlags() )
self.assertEqual( m2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( m2.keys(), m1.keys() )
def testCreateWithValuePlug( self ) :
p = Gaffer.CompoundDataPlug()
v = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, minValue = -10, maxValue = 10 )
m1 = p.addMember( "a", v )
self.assertTrue( v.parent().isSame( m1 ) )
self.assertEqual( v.getName(), "value" )
self.assertEqual( m1.getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( p.memberDataAndName( m1 ), ( IECore.IntData( 0 ), "a" ) )
v2 = Gaffer.IntPlug( defaultValue = 5, minValue = -10, maxValue = 10 )
m2 = p.addOptionalMember( "b", v2, plugName = "blah", enabled = True )
self.assertTrue( v2.parent().isSame( m2 ) )
self.assertEqual( v2.getName(), "value" )
self.assertEqual( m2.getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( p.memberDataAndName( m2 ), ( IECore.IntData( 5 ), "b" ) )
def testAdditionalChildrenRejected( self ) :
p = Gaffer.CompoundDataPlug()
self.assertRaises( RuntimeError, p.addChild, Gaffer.IntPlug() )
self.assertRaises( RuntimeError, p.addChild, Gaffer.CompoundPlug() )
m = p.addMember( "a", IECore.IntData( 10 ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug() )
self.assertRaises( RuntimeError, m.addChild, Gaffer.StringPlug( "name" ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug( "name" ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug( "value" ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.CompoundDataPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["p"].addMember( "a", IECore.IntData( 10 ), "a" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual(
s["n"]["p"].memberDataAndName( s["n"]["p"]["a"] ),
s2["n"]["p"].memberDataAndName( s2["n"]["p"]["a"] ),
)
def testMemberPlugRepr( self ) :
p = Gaffer.CompoundDataPlug.MemberPlug( "mm", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
p2 = eval( repr( p ) )
self.assertEqual( p.getName(), p2.getName() )
self.assertEqual( p.direction(), p2.direction() )
self.assertEqual( p.getFlags(), p2.getFlags() )
def testDefaultValues( self ) :
p = Gaffer.CompoundDataPlug()
m = p.addMember( "a", IECore.IntData( 10 ) )
self.assertTrue( m["value"].defaultValue(), 10 )
self.assertTrue( m["value"].getValue(), 10 )
m = p.addMember( "b", IECore.FloatData( 20 ) )
self.assertTrue( m["value"].defaultValue(), 20 )
self.assertTrue( m["value"].getValue(), 20 )
m = p.addMember( "c", IECore.StringData( "abc" ) )
self.assertTrue( m["value"].defaultValue(), "abc" )
self.assertTrue( m["value"].getValue(), "abc" )
def testAddMembers( self ) :
p = Gaffer.CompoundDataPlug()
p.addMembers( IECore.CompoundData( { "one" : 1, "two" : 2 } ) )
self.assertEqual( len( p ), 2 )
self.assertEqual( p[0].getName(), "member1" )
self.assertEqual( p[1].getName(), "member2" )
c = IECore.CompoundData()
p.fillCompoundData( c )
self.assertEqual( c, IECore.CompoundData( { "one" : 1, "two" : 2 } ) )
def testAddMembersWithSpecificNames( self ) :
p = Gaffer.CompoundDataPlug()
p.addMembers( IECore.CompoundData( { "one" : 1 } ), useNameAsPlugName=True )
self.assertEqual( len( p ), 1 )
self.assertEqual( p[0].getName(), "one" )
o = IECore.CompoundObject()
p.fillCompoundObject( o )
self.assertEqual( o, IECore.CompoundObject( { "one" : IECore.IntData( 1 ) } ) )
def testBoxTypes( self ) :
p = Gaffer.CompoundDataPlug()
for name, value in [
( "b2f", IECore.Box2fData( IECore.Box2f( IECore.V2f( 0, 1 ), IECore.V2f( 1, 2 ) ) ) ),
( "b2i", IECore.Box2iData( IECore.Box2i( IECore.V2i( -1, 10 ), IECore.V2i( 11, 20 ) ) ) ),
( "b3f", IECore.Box3fData( IECore.Box3f( IECore.V3f( 0, 1, 2 ), IECore.V3f( 3, 4, 5 ) ) ) ),
( "b3i", IECore.Box3iData( IECore.Box3i( IECore.V3i( 0, 1, 2 ), IECore.V3i( 3, 4, 5 ) ) ) ),
] :
m = p.addMember( name, value )
self.assertEqual( p.memberDataAndName( m ), ( value, name ) )
def testBoxPromotion( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
s["b"]["n"]["p"] = Gaffer.CompoundDataPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
p = s["b"].promotePlug( s["b"]["n"]["p"], asUserPlug=False )
p.setName( "p" )
def assertPreconditions( script ) :
self.assertEqual( script["b"]["n"]["p"].keys(), [] )
self.assertEqual( script["b"]["p"].keys(), [] )
self.assertTrue( script["b"]["n"]["p"].getInput().isSame( script["b"]["p"] ) )
def assertPostconditions( script ) :
self.assertEqual( script["b"]["p"].keys(), [ "test" ] )
self.assertEqual( script["b"]["n"]["p"].keys(), [ "test" ] )
self.assertEqual( script["b"]["p"]["test"].keys(), [ "name", "value" ] )
self.assertEqual( script["b"]["n"]["p"]["test"].keys(), [ "name", "value" ] )
self.assertTrue( script["b"]["n"]["p"].getInput().isSame( script["b"]["p"] ) )
self.assertTrue( script["b"]["n"]["p"]["test"].getInput().isSame( script["b"]["p"]["test"] ) )
self.assertTrue( script["b"]["n"]["p"]["test"]["name"].getInput().isSame( script["b"]["p"]["test"]["name"] ) )
self.assertTrue( script["b"]["n"]["p"]["test"]["value"].getInput().isSame( script["b"]["p"]["test"]["value"] ) )
assertPreconditions( s )
with Gaffer.UndoContext( s ) :
p.addMember( "test", 10, "test" )
assertPostconditions( s )
s.undo()
assertPreconditions( s )
s.redo()
assertPostconditions( s )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
assertPostconditions( s2 )
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Security Model
@copyright: 2012-14 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3SecurityModel",)
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3SecurityModel(S3Model):
"""
"""
names = ("security_zone_type",
"security_zone",
"security_staff_type",
"security_staff",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# -----------------------------------------------------------
# Security Zone Types
#
tablename = "security_zone_type"
define_table(tablename,
Field("name",
label=T("Name")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ZONE_TYPE = T("Create Zone Type")
crud_strings[tablename] = Storage(
label_create = ADD_ZONE_TYPE,
title_display = T("Zone Type Details"),
title_list = T("Zone Types"),
title_update = T("Edit Zone Type"),
title_upload = T("Import Zone Types"),
label_list_button = T("List Zone Types"),
label_delete_button = T("Delete Zone Type"),
msg_record_created = T("Zone Type added"),
msg_record_modified = T("Zone Type updated"),
msg_record_deleted = T("Zone Type deleted"),
msg_list_empty = T("No Zone Types currently registered"))
zone_type_represent = S3Represent(lookup=tablename)
self.configure(tablename,
deduplicate = self.security_zone_type_duplicate,
)
# -----------------------------------------------------------
# Security Zones
#
tablename = "security_zone"
define_table(tablename,
Field("name",
label=T("Name")),
Field("zone_type_id", db.security_zone_type,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "security_zone_type.id",
zone_type_represent,
sort=True)),
represent = zone_type_represent,
comment = S3AddResourceLink(c="security",
f="zone_type",
label=ADD_ZONE_TYPE,
tooltip=T("Select a Zone Type from the list or click 'Add Zone Type'")),
label=T("Type")),
self.gis_location_id(
widget = S3LocationSelectorWidget2(
catalog_layers = True,
points = False,
polygons = True,
)
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_ZONE = T("Create Zone")
crud_strings[tablename] = Storage(
label_create = ADD_ZONE,
title_display = T("Zone Details"),
title_list = T("Zones"),
title_update = T("Edit Zone"),
title_upload = T("Import Zones"),
label_list_button = T("List Zones"),
label_delete_button = T("Delete Zone"),
msg_record_created = T("Zone added"),
msg_record_modified = T("Zone updated"),
msg_record_deleted = T("Zone deleted"),
msg_list_empty = T("No Zones currently registered"))
zone_represent = S3Represent(lookup=tablename)
# -----------------------------------------------------------
# Security Staff Types
#
tablename = "security_staff_type"
define_table(tablename,
Field("name",
label=T("Name")),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_STAFF = T("Add Staff Type")
crud_strings[tablename] = Storage(
label_create = ADD_STAFF,
title_display = T("Staff Type Details"),
title_list = T("Staff Types"),
title_update = T("Edit Staff Type"),
title_upload = T("Import Staff Types"),
label_list_button = T("List Staff Types"),
label_delete_button = T("Delete Staff Type"),
msg_record_created = T("Staff Type added"),
msg_record_modified = T("Staff Type updated"),
msg_record_deleted = T("Staff Type deleted"),
msg_list_empty = T("No Staff Types currently registered"))
staff_type_represent = S3Represent(lookup=tablename)
# -----------------------------------------------------------
# Security Staff
#
tablename = "security_staff"
define_table(tablename,
self.hrm_human_resource_id(),
Field("staff_type_id", "list:reference security_staff_type",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "security_staff_type.id",
staff_type_represent,
sort=True,
multiple=True)),
represent = self.security_staff_type_multirepresent,
comment = S3AddResourceLink(c="security",
f="staff_type",
label=ADD_STAFF,
tooltip=T("Select a Staff Type from the list or click 'Add Staff Type'")),
label=T("Type")),
Field("zone_id", db.security_zone,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "security_zone.id",
zone_represent,
sort=True)),
represent = zone_represent,
comment = S3AddResourceLink(c="security",
f="zone",
label=ADD_ZONE,
tooltip=T("For wardens, select a Zone from the list or click 'Add Zone'")),
label=T("Zone")),
self.super_link("site_id", "org_site",
label = T("Facility"),
represent=self.org_site_represent,
readable=True,
writable=True),
s3_comments(),
*s3_meta_fields())
# CRUD strings
ADD_STAFF = T("Add Security-Related Staff")
crud_strings[tablename] = Storage(
label_create = ADD_STAFF,
title_display = T("Security-Related Staff Details"),
title_list = T("Security-Related Staff"),
title_update = T("Edit Security-Related Staff"),
title_upload = T("Import Security-Related Staff"),
label_list_button = T("List Security-Related Staff"),
label_delete_button = T("Delete Security-Related Staff"),
msg_record_created = T("Security-Related Staff added"),
msg_record_modified = T("Security-Related Staff updated"),
msg_record_deleted = T("Security-Related Staff deleted"),
msg_list_empty = T("No Security-Related Staff currently registered"))
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict()
# -------------------------------------------------------------------------
@staticmethod
def security_zone_type_duplicate(item):
"""
Zone Type record duplicate detection, used for the deduplicate hook
@param item: the S3ImportItem to check
"""
if item.tablename == "security_zone_type":
table = item.table
query = (table.name == item.data.name)
row = current.db(query).select(table.id,
limitby=(0, 1)).first()
if row:
item.id = row.id
item.method = item.METHOD.UPDATE
# -----------------------------------------------------------------------------
@staticmethod
def security_staff_type_multirepresent(opt):
""" Represent a staff type in list views """
db = current.db
table = db.security_staff_type
set = db(table.id > 0).select(table.id,
table.name).as_dict()
if isinstance(opt, (list, tuple)):
opts = opt
vals = [str(set.get(o)["name"]) for o in opts]
multiple = True
elif isinstance(opt, int):
opts = [opt]
vals = str(set.get(opt)["name"])
multiple = False
else:
try:
opt = int(opt)
except:
return current.messages["NONE"]
else:
opts = [opt]
vals = str(set.get(opt)["name"])
multiple = False
if multiple:
if len(opts) > 1:
vals = ", ".join(vals)
else:
vals = len(vals) and vals[0] or ""
return vals
# END =========================================================================
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ProductImage.rating'
db.delete_column(u'catalog_productimage', 'rating')
# Adding field 'ProductReview.rating'
db.add_column(u'catalog_productreview', 'rating',
self.gf('django.db.models.fields.IntegerField')(default=3),
keep_default=False)
# Adding field 'ShopReview.rating'
db.add_column(u'catalog_shopreview', 'rating',
self.gf('django.db.models.fields.IntegerField')(default=3),
keep_default=False)
def backwards(self, orm):
# Adding field 'ProductImage.rating'
db.add_column(u'catalog_productimage', 'rating',
self.gf('django.db.models.fields.IntegerField')(default=3),
keep_default=False)
# Deleting field 'ProductReview.rating'
db.delete_column(u'catalog_productreview', 'rating')
# Deleting field 'ShopReview.rating'
db.delete_column(u'catalog_shopreview', 'rating')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'catalog.cfistoreitem': {
'Meta': {'object_name': 'CfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalog.Product']", 'unique': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cfi_store_item_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeCfiStoreItem']", 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.like': {
'Meta': {'object_name': 'Like'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likecfistoreitem': {
'Meta': {'object_name': 'LikeCfiStoreItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'cfi_store_item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.CfiStoreItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductDescription']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.ProductImage']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likeproducttutorial': {
'Meta': {'object_name': 'LikeProductTutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.listitem': {
'Meta': {'object_name': 'ListItem'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'createdby': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.location': {
'Meta': {'object_name': 'Location'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.logidenticalproduct': {
'Meta': {'object_name': 'LogIdenticalProduct'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product1'", 'to': "orm['catalog.Product']"}),
'product2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product2'", 'to': "orm['catalog.Product']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.makey': {
'Meta': {'object_name': 'Makey'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'collaborators': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborators'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeycomments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Comment']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'documentations': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeydocumentations'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Documentation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeyimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'makeynotes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Note']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.note': {
'Meta': {'object_name': 'Note'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identicalto': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likers': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'product_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeProduct']", 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'makeys': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'partsused'", 'blank': 'True', 'to': "orm['catalog.Makey']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('django.db.models.fields.IntegerField', [], {}),
'tutorials': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalog.Tutorial']", 'symmetrical': 'False', 'blank': 'True'})
},
'catalog.productdescription': {
'Meta': {'object_name': 'ProductDescription'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productdescriptions'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'blank': 'True'}),
'user_or_shop': ('django.db.models.fields.BooleanField', [], {})
},
'catalog.productimage': {
'Meta': {'object_name': 'ProductImage'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productimages'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.productreview': {
'Meta': {'object_name': 'ProductReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'product_reviews'", 'to': "orm['catalog.Product']"}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.productshopurl': {
'Meta': {'object_name': 'ProductShopUrl'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'productshopurls'", 'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.searchlog': {
'Meta': {'object_name': 'SearchLog'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
'catalog.shop': {
'Meta': {'object_name': 'Shop'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'shopimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shop_likes'", 'symmetrical': 'False', 'through': "orm['catalog.LikeShop']", 'to': u"orm['django_facebook.FacebookCustomUser']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.shopreview': {
'Meta': {'object_name': 'ShopReview'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'rating': ('django.db.models.fields.IntegerField', [], {}),
'review': ('django.db.models.fields.CharField', [], {'max_length': '100000'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shop_reviews'", 'to': "orm['catalog.Shop']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.toindexstore': {
'Meta': {'object_name': 'ToIndexStore'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'catalog.topmakeys': {
'Meta': {'object_name': 'TopMakeys'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Makey']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topproducts': {
'Meta': {'object_name': 'TopProducts'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.topshops': {
'Meta': {'object_name': 'TopShops'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Shop']"})
},
'catalog.toptutorials': {
'Meta': {'object_name': 'TopTutorials'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Tutorial']"})
},
'catalog.topusers': {
'Meta': {'object_name': 'TopUsers'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
'catalog.tutorial': {
'Meta': {'object_name': 'Tutorial'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tutorialimages'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['catalog.Image']"}),
'is_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'django_facebook.facebookcustomuser': {
'Meta': {'object_name': 'FacebookCustomUser'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'new_token_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['catalog']
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author Pradeep Jairamani; github.com/pradeepjairamani
import socket
import socks
import time
import json
import threading
import string
import requests
import random
import os
from core.alert import *
from core.targets import target_type
from core.targets import target_to_host
from core.load_modules import load_file_path
from lib.socks_resolver.engine import getaddrinfo
from core._time import now
from core.log import __log_into_file
from core._die import __die_failure
from lib.scan.wp_timthumbs import wp_timthumbs
def extra_requirements_dict():
return {
"wp_timthumb_scan_http_method": ["GET"],
"wp_timthumb_scan_random_agent": ["True"],
}
def check(target, user_agent, timeout_sec, log_in_file, language, time_sleep, thread_tmp_filename, retries,
http_method, socks_proxy, scan_id, scan_cmd):
status_codes = [200, 401, 403]
directory_listing_msgs = ["<title>Index of /", "<a href=\"\\?C=N;O=D\">Name</a>", "Directory Listing for",
"Parent Directory</a>", "Last modified</a>", "<TITLE>Folder Listing.",
"- Browsing directory "]
time.sleep(time_sleep)
try:
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
n = 0
while 1:
try:
if http_method == "GET":
r = requests.get(
target, timeout=timeout_sec, headers=user_agent)
elif http_method == "HEAD":
r = requests.head(
target, timeout=timeout_sec, headers=user_agent)
content = r.content
break
except:
n += 1
if n is retries:
warn(messages(language, "http_connection_timeout").format(target))
return 1
if version() is 3:
content = content.decode('utf8')
if r.status_code in status_codes:
info(messages(language, "found").format(
target, r.status_code, r.reason))
__log_into_file(thread_tmp_filename, 'w', '0', language)
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',
'PORT': "", 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "found").format(target, r.status_code, r.reason),
'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
if r.status_code is 200:
for dlmsg in directory_listing_msgs:
if dlmsg in content:
info(messages(language, "directoy_listing").format(target))
data = json.dumps({'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '',
'PORT': "", 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "directoy_listing").format(target), 'TIME': now(),
'CATEGORY': "scan", 'SCAN_ID': scan_id, 'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
break
return True
except:
return False
def test(target, retries, timeout_sec, user_agent, http_method, socks_proxy, verbose_level, trying, total_req, total,
num, language):
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target_to_host(target), "default_port",
'wp_timthumb_scan'))
if socks_proxy is not None:
socks_version = socks.SOCKS5 if socks_proxy.startswith(
'socks5://') else socks.SOCKS4
socks_proxy = socks_proxy.rsplit('://')[1]
if '@' in socks_proxy:
socks_username = socks_proxy.rsplit(':')[0]
socks_password = socks_proxy.rsplit(':')[1].rsplit('@')[0]
socks.set_default_proxy(socks_version, str(socks_proxy.rsplit('@')[1].rsplit(':')[0]),
int(socks_proxy.rsplit(':')[-1]), username=socks_username,
password=socks_password)
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
else:
socks.set_default_proxy(socks_version, str(
socks_proxy.rsplit(':')[0]), int(socks_proxy.rsplit(':')[1]))
socket.socket = socks.socksocket
socket.getaddrinfo = getaddrinfo
n = 0
while 1:
try:
if http_method == "GET":
r = requests.get(target, timeout=timeout_sec,
headers=user_agent)
elif http_method == "HEAD":
r = requests.head(target, timeout=timeout_sec,
headers=user_agent)
return 0
except:
n += 1
if n is retries:
return 1
def start(target, users, passwds, ports, timeout_sec, thread_number, num, total, log_in_file, time_sleep, language,
verbose_level, socks_proxy, retries, methods_args, scan_id, scan_cmd): # Main function
if target_type(target) != 'SINGLE_IPv4' or target_type(target) != 'DOMAIN' or target_type(
target) != 'HTTP' or target_type(target) != 'SINGLE_IPv6':
# rand useragent
user_agent_list = [
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.5) Gecko/20060719 Firefox/1.5.0.5",
"Googlebot/2.1 ( http://www.googlebot.com/bot.html)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Ubuntu/10.04"
" Chromium/9.0.595.0 Chrome/9.0.595.0 Safari/534.13",
"Mozilla/5.0 (compatible; MSIE 7.0; Windows NT 5.2; WOW64; .NET CLR 2.0.50727)",
"Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51",
"Mozilla/5.0 (compatible; 008/0.83; http://www.80legs.com/webcrawler.html) Gecko/2008032620",
"Debian APT-HTTP/1.3 (0.8.10.3)",
"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)",
"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
"YahooSeeker/1.2 (compatible; Mozilla 4.0; MSIE 5.5; yahooseeker at yahoo-inc dot com ; "
"http://help.yahoo.com/help/us/shop/merchant/)",
"Mozilla/5.0 (compatible; YandexBot/3.0; +http://yandex.com/bots)",
"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)",
"msnbot/1.1 (+http://search.msn.com/msnbot.htm)"
]
http_methods = ["GET", "HEAD"]
user_agent = {'User-agent': random.choice(user_agent_list)}
# requirements check
new_extra_requirements = extra_requirements_dict()
if methods_args is not None:
for extra_requirement in extra_requirements_dict():
if extra_requirement in methods_args:
new_extra_requirements[
extra_requirement] = methods_args[extra_requirement]
extra_requirements = new_extra_requirements
if extra_requirements["wp_timthumb_scan_http_method"][0] not in http_methods:
warn(messages(language, "wp_timthumb_scan_get"))
extra_requirements["wp_timthumb_scan_http_method"] = ["GET"]
random_agent_flag = True
if extra_requirements["wp_timthumb_scan_random_agent"][0] == "False":
random_agent_flag = False
threads = []
total_req = len(wp_timthumbs.timthumb())
thread_tmp_filename = '{}/tmp/thread_tmp_'.format(load_file_path()) + ''.join(
random.choice(string.ascii_letters + string.digits) for _ in range(20))
__log_into_file(thread_tmp_filename, 'w', '1', language)
trying = 0
if target_type(target) != "HTTP":
target = 'https://' + target
if test(str(target), retries, timeout_sec, user_agent, extra_requirements["wp_timthumb_scan_http_method"][0],
socks_proxy, verbose_level, trying, total_req, total, num, language) is 0:
keyboard_interrupt_flag = False
scan_list = wp_timthumbs.timthumb()
for idir in scan_list:
if random_agent_flag:
user_agent = {'User-agent': random.choice(user_agent_list)}
t = threading.Thread(target=check,
args=(
target + '/' + idir, user_agent, timeout_sec, log_in_file, language,
time_sleep, thread_tmp_filename, retries,
extra_requirements[
"wp_timthumb_scan_http_method"][0],
socks_proxy, scan_id, scan_cmd))
threads.append(t)
t.start()
trying += 1
if verbose_level > 3:
info(messages(language, "trying_message").format(trying, total_req, num, total, target + "/" + idir,
"default_port", 'wp_timthumb_scan'))
while 1:
try:
if threading.activeCount() >= thread_number:
time.sleep(0.01)
else:
break
except KeyboardInterrupt:
keyboard_interrupt_flag = True
break
if keyboard_interrupt_flag:
break
else:
warn(messages(language, "open_error").format(target))
# wait for threads
kill_switch = 0
kill_time = int(
timeout_sec / 0.1) if int(timeout_sec / 0.1) is not 0 else 1
while 1:
time.sleep(0.1)
kill_switch += 1
try:
if threading.activeCount() is 1 or kill_switch is kill_time:
break
except KeyboardInterrupt:
break
thread_write = int(open(thread_tmp_filename).read().rsplit()[0])
if thread_write is 1:
info(messages(language, "directory_file_404").format(
target, "default_port"))
if verbose_level is not 0:
data = json.dumps(
{'HOST': target_to_host(target), 'USERNAME': '', 'PASSWORD': '', 'PORT': '', 'TYPE': 'wp_timthumb_scan',
'DESCRIPTION': messages(language, "no_open_ports"), 'TIME': now(), 'CATEGORY': "scan", 'SCAN_ID': scan_id,
'SCAN_CMD': scan_cmd})
__log_into_file(log_in_file, 'a', data, language)
os.remove(thread_tmp_filename)
else:
warn(messages(language, "input_target_error").format(
'wp_timthumb_scan', target))
| |
import getpass, json, random, os, sys
import h2o_args
from h2o_objects import RemoteHost
# some circular import issues, so go with the full import
import h2o_bc # write_flatfile, get_base_port
import h2o2 as h2o # build_cloud
from h2o_test import verboseprint, clean_sandbox, find_file
def find_config(base):
f = base
if not os.path.exists(f): f = 'testdir_hosts/' + base
if not os.path.exists(f): f = 'py/testdir_hosts/' + base
if not os.path.exists(f):
raise Exception("unable to find config %s" % base)
return f
#************************************************************
def upload_jar_to_remote_hosts(hosts, slow_connection=False):
def prog(sofar, total):
# output is bad for jenkins.
username = getpass.getuser()
if username != 'jenkins':
p = int((10.0*sofar)/total)
sys.stdout.write('\rUploading jar [%s%s] %02d%%' % ('#'*p, ' ' * (10-p), (100*sofar)/total))
sys.stdout.flush()
if not slow_connection:
for h in hosts:
f = find_file('build/h2o.jar')
h.upload_file(f, progress=prog)
# skipping progress indicator for the flatfile
h.upload_file(h2o_bc.flatfile_pathname())
else:
f = find_file('build/h2o.jar')
hosts[0].upload_file(f, progress=prog)
hosts[0].push_file_to_remotes(f, hosts[1:])
f = find_file(h2o_bc.flatfile_pathname())
hosts[0].upload_file(f, progress=prog)
hosts[0].push_file_to_remotes(f, hosts[1:])
#************************************************************
# node_count is sometimes used positionally...break that out. all others are keyword args
def build_cloud_with_hosts(node_count=None, **kwargs):
# legacy: we allow node_count to be positional.
# if it's used positionally, stick in in kwargs (overwrite if there too)
if node_count is not None:
# we use h2o_per_host in the config file. will translate to node_count for build_cloud
kwargs['h2o_per_host'] = node_count
# set node_count to None to make sure we don't use it below. 'h2o_per_host' should be used
node_count = None
# for new params:
# Just update this list with the param name and default and you're done
allParamsDefault = {
# any combination of force_ip/network could be interesting
# network may mean you don't need force_ip
'force_ip': False,
'network': None,
'use_flatfile': False,
# default to true, so when we flip import folder to hdfs+s3n import on ec2,
# the cloud is built correctly
'use_hdfs': True,
'hdfs_name_node': None,
'hdfs_config': None,
'hdfs_version': None,
'java_heap_GB': None,
'java_heap_MB': None,
'java_extra_args': None,
'timeoutSecs': 60,
'retryDelaySecs': 2,
'cleanup': True,
'slow_connection': False,
'h2o_per_host': 2,
'ip':'["127.0.0.1"]', # this is for creating the hosts list
'base_port': None,
'username':'0xdiag',
'password': None,
'rand_shuffle': True,
'use_home_for_ice': False,
'key_filename': None,
'aws_credentials': None,
'redirect_import_folder_to_s3_path': None,
'redirect_import_folder_to_s3n_path': None,
'disable_h2o_log': False,
'enable_benchmark_log': False,
'h2o_remote_buckets_root': None,
'conservative': False,
'create_json': False,
# pass this from cloud building to the common "release" h2o_test.py classes
# for deciding whether keys should be deleted when a test ends.
'delete_keys_at_teardown': False,
'clone_cloud': False,
'cloud_name': None,
'force_tcp': None,
'random_udp_drop': None,
'sandbox_ignore_errors': None,
}
# initialize the default values
paramsToUse = {}
for k,v in allParamsDefault.iteritems():
paramsToUse[k] = allParamsDefault.setdefault(k, v)
# allow user to specify the config json at the command line. config_json is a global.
if h2o_args.config_json:
configFilename = find_config(h2o_args.config_json)
else:
# configs may be in the testdir_hosts
configFilename = find_config(h2o_bc.default_hosts_file())
verboseprint("Loading host config from", configFilename)
with open(configFilename, 'rb') as fp:
hostDict = json.load(fp)
for k,v in hostDict.iteritems():
# Don't take in params that we don't have in the list above
# Because michal has extra params in here for ec2! and comments!
if k in paramsToUse:
paramsToUse[k] = hostDict.setdefault(k, v)
# Now overwrite with anything passed by the test
# whatever the test passes, always overrules the config json
for k,v in kwargs.iteritems():
paramsToUse[k] = kwargs.setdefault(k, v)
# Let's assume we should set the h2o_remote_buckets_root (only affects
# schema=local), to the home directory of whatever remote user
# is being used for the hosts. Better than living with a decision
# we made from scanning locally (remote might not match local)
# assume the remote user has a /home/<username> (linux targets?)
# This only affects import folder path name generation by python tests
if paramsToUse['username']:
paramsToUse['h2o_remote_buckets_root'] = "/home/" + paramsToUse['username']
verboseprint("All build_cloud_with_hosts params:", paramsToUse)
#********************
global hosts
hosts = []
# Update: special case paramsToUse['ip'] = ["127.0.0.1"] and use the normal build_cloud
# this allows all tests in testdir_host to be run with a special config that points to 127.0.0.1
# hosts should be None for everyone if normal build_cloud is desired
if paramsToUse['ip']== ["127.0.0.1"]:
hosts = None
else:
verboseprint("About to RemoteHost, likely bad ip if hangs")
hosts = []
for h in paramsToUse['ip']:
verboseprint("Connecting to:", h)
# expand any ~ or ~user in the string
key_filename = paramsToUse['key_filename']
if key_filename: # don't try to expand if None
key_filename=os.path.expanduser(key_filename)
hosts.append(RemoteHost(addr=h,
username=paramsToUse['username'], password=paramsToUse['password'], key_filename=key_filename))
# done with these, don't pass to build_cloud
# this was the list of ip's from the config file, replaced by 'hosts' to build_cloud
paramsToUse.pop('ip')
# we want to save username in the node info. don't pop
# paramsToUse.pop('username')
paramsToUse.pop('password')
paramsToUse.pop('key_filename')
# flatfile is going into sandbox (LOG_DIR) now..so clean it first
# (will make sandbox dir if it doesn't exist already)
clean_sandbox()
# handles hosts=None correctly
base_port = h2o_bc.get_base_port(base_port=paramsToUse['base_port'])
h2o_bc.write_flatfile(
node_count=paramsToUse['h2o_per_host'],
# let the env variable H2O_PORT_OFFSET add in there
base_port=base_port,
hosts=hosts,
rand_shuffle=paramsToUse['rand_shuffle'],
)
if hosts is not None:
# this uploads the flatfile too
upload_jar_to_remote_hosts(hosts, slow_connection=paramsToUse['slow_connection'])
# timeout wants to be larger for large numbers of hosts * h2oPerHost
# use 60 sec min, 5 sec per node.
timeoutSecs = max(60, 8*(len(hosts) * paramsToUse['h2o_per_host']))
else: # for 127.0.0.1 case
timeoutSecs = 60
paramsToUse.pop('slow_connection')
# sandbox gets cleaned in build_cloud
# legacy param issue
node_count = paramsToUse['h2o_per_host']
paramsToUse.pop('h2o_per_host')
print "java_heap_GB", paramsToUse['java_heap_GB']
# don't wipe out or create the sandbox. already did here, and put flatfile there
nodes = h2o_bc.build_cloud(node_count, hosts=hosts, init_sandbox=False, **paramsToUse)
# we weren't doing this before, but since build_cloud returns nodes
# people might expect this works similarly
return nodes
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import mock
import netaddr
from oslo_config import cfg
from rally.plugins.openstack.scenarios.vm import utils
from tests.unit import test
VMTASKS_UTILS = "rally.plugins.openstack.scenarios.vm.utils"
CONF = cfg.CONF
class VMScenarioTestCase(test.ScenarioTestCase):
@mock.patch("%s.open" % VMTASKS_UTILS,
side_effect=mock.mock_open(), create=True)
def test__run_command_over_ssh_script_file(self, mock_open):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"script_file": "foobar",
"interpreter": ["interpreter", "interpreter_arg"],
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.execute.assert_called_once_with(
["interpreter", "interpreter_arg", "arg1", "arg2"],
stdin=mock_open.side_effect())
mock_open.assert_called_once_with("foobar", "rb")
@mock.patch("%s.six.moves.StringIO" % VMTASKS_UTILS)
def test__run_command_over_ssh_script_inline(self, mock_string_io):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"script_inline": "foobar",
"interpreter": ["interpreter", "interpreter_arg"],
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.execute.assert_called_once_with(
["interpreter", "interpreter_arg", "arg1", "arg2"],
stdin=mock_string_io.return_value)
mock_string_io.assert_called_once_with("foobar")
def test__run_command_over_ssh_remote_path(self):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"remote_path": ["foo", "bar"],
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.execute.assert_called_once_with(
["foo", "bar", "arg1", "arg2"],
stdin=None)
def test__run_command_over_ssh_remote_path_copy(self):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"remote_path": ["foo", "bar"],
"local_path": "/bin/false",
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.put_file.assert_called_once_with(
"/bin/false", "bar", mode=0o755
)
mock_ssh.execute.assert_called_once_with(
["foo", "bar", "arg1", "arg2"],
stdin=None)
def test__run_command_over_ssh_fails(self):
vm_scenario = utils.VMScenario(self.context)
self.assertRaises(ValueError,
vm_scenario._run_command_over_ssh,
None, command={})
def test__wait_for_ssh(self):
ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._wait_for_ssh(ssh)
ssh.wait.assert_called_once_with()
def test__wait_for_ping(self):
vm_scenario = utils.VMScenario(self.context)
vm_scenario._ping_ip_address = mock.Mock(return_value=True)
vm_scenario._wait_for_ping(netaddr.IPAddress("1.2.3.4"))
self.mock_wait_for_status.mock.assert_called_once_with(
utils.Host("1.2.3.4"),
ready_statuses=[utils.Host.ICMP_UP_STATUS],
update_resource=utils.Host.update_status,
timeout=CONF.benchmark.vm_ping_timeout,
check_interval=CONF.benchmark.vm_ping_poll_interval)
@mock.patch(VMTASKS_UTILS + ".VMScenario._run_command_over_ssh")
@mock.patch("rally.common.sshutils.SSH")
def test__run_command(self, mock_sshutils_ssh,
mock_vm_scenario__run_command_over_ssh):
vm_scenario = utils.VMScenario(self.context)
vm_scenario.context = {"user": {"keypair": {"private": "ssh"}}}
vm_scenario._run_command("1.2.3.4", 22, "username", "password",
command={"script_file": "foo",
"interpreter": "bar"})
mock_sshutils_ssh.assert_called_once_with(
"username", "1.2.3.4", port=22, pkey="ssh", password="password")
mock_sshutils_ssh.return_value.wait.assert_called_once_with()
mock_vm_scenario__run_command_over_ssh.assert_called_once_with(
mock_sshutils_ssh.return_value,
{"script_file": "foo", "interpreter": "bar"})
def get_scenario(self):
server = mock.Mock(
networks={"foo_net": "foo_data"},
addresses={"foo_net": [{"addr": "foo_ip"}]},
tenant_id="foo_tenant"
)
scenario = utils.VMScenario(self.context)
scenario._boot_server = mock.Mock(return_value=server)
scenario._delete_server = mock.Mock()
scenario._associate_floating_ip = mock.Mock()
scenario._wait_for_ping = mock.Mock()
return scenario, server
def test__boot_server_with_fip_without_networks(self):
scenario, server = self.get_scenario()
server.networks = {}
self.assertRaises(RuntimeError,
scenario._boot_server_with_fip,
"foo_image", "foo_flavor", foo_arg="foo_value")
scenario._boot_server.assert_called_once_with(
"foo_image", "foo_flavor",
foo_arg="foo_value", auto_assign_nic=True)
def test__boot_server_with_fixed_ip(self):
scenario, server = self.get_scenario()
scenario._attach_floating_ip = mock.Mock()
server, ip = scenario._boot_server_with_fip(
"foo_image", "foo_flavor", floating_network="ext_network",
use_floating_ip=False, foo_arg="foo_value")
self.assertEqual(ip, {"ip": "foo_ip", "id": None,
"is_floating": False})
scenario._boot_server.assert_called_once_with(
"foo_image", "foo_flavor",
auto_assign_nic=True, foo_arg="foo_value")
self.assertEqual(scenario._attach_floating_ip.mock_calls, [])
def test__boot_server_with_fip(self):
scenario, server = self.get_scenario()
scenario._attach_floating_ip = mock.Mock(
return_value={"id": "foo_id", "ip": "foo_ip"})
server, ip = scenario._boot_server_with_fip(
"foo_image", "foo_flavor", floating_network="ext_network",
use_floating_ip=True, foo_arg="foo_value")
self.assertEqual(ip, {"ip": "foo_ip", "id": "foo_id",
"is_floating": True})
scenario._boot_server.assert_called_once_with(
"foo_image", "foo_flavor",
auto_assign_nic=True, foo_arg="foo_value")
scenario._attach_floating_ip.assert_called_once_with(
server, "ext_network")
def test__delete_server_with_fixed_ip(self):
ip = {"ip": "foo_ip", "id": None, "is_floating": False}
scenario, server = self.get_scenario()
scenario._delete_floating_ip = mock.Mock()
scenario._delete_server_with_fip(server, ip, force_delete=True)
self.assertEqual(scenario._delete_floating_ip.mock_calls, [])
scenario._delete_server.assert_called_once_with(server, force=True)
def test__delete_server_with_fip(self):
fip = {"ip": "foo_ip", "id": "foo_id", "is_floating": True}
scenario, server = self.get_scenario()
scenario._delete_floating_ip = mock.Mock()
scenario._delete_server_with_fip(server, fip, force_delete=True)
scenario._delete_floating_ip.assert_called_once_with(server, fip)
scenario._delete_server.assert_called_once_with(server, force=True)
@mock.patch(VMTASKS_UTILS + ".network_wrapper.wrap")
def test__attach_floating_ip(self, mock_wrap):
scenario, server = self.get_scenario()
netwrap = mock_wrap.return_value
netwrap.create_floating_ip.return_value = {
"id": "foo_id", "ip": "foo_ip"}
scenario._attach_floating_ip(
server, floating_network="bar_network")
mock_wrap.assert_called_once_with(scenario.clients, scenario)
netwrap.create_floating_ip.assert_called_once_with(
ext_network="bar_network",
tenant_id="foo_tenant", fixed_ip="foo_ip")
scenario._associate_floating_ip.assert_called_once_with(
server, "foo_ip", fixed_address="foo_ip", atomic_action=False)
@mock.patch(VMTASKS_UTILS + ".network_wrapper.wrap")
def test__delete_floating_ip(self, mock_wrap):
scenario, server = self.get_scenario()
_check_addr = mock.Mock(return_value=True)
scenario.check_ip_address = mock.Mock(return_value=_check_addr)
scenario._dissociate_floating_ip = mock.Mock()
scenario._delete_floating_ip(
server, fip={"id": "foo_id", "ip": "foo_ip"})
scenario.check_ip_address.assert_called_once_with(
"foo_ip")
_check_addr.assert_called_once_with(server)
scenario._dissociate_floating_ip.assert_called_once_with(
server, "foo_ip", atomic_action=False)
mock_wrap.assert_called_once_with(scenario.clients, scenario)
mock_wrap.return_value.delete_floating_ip.assert_called_once_with(
"foo_id", wait=True)
class HostTestCase(test.TestCase):
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_linux(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "linux2"
host = utils.Host("1.2.3.4")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping", "-c1", "-w1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_linux_ipv6(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "linux2"
host = utils.Host("1ce:c01d:bee2:15:a5:900d:a5:11fe")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping6", "-c1", "-w1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_other_os(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "freebsd10"
host = utils.Host("1.2.3.4")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping", "-c1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_other_os_ipv6(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "freebsd10"
host = utils.Host("1ce:c01d:bee2:15:a5:900d:a5:11fe")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping6", "-c1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six # pylint: disable=unused-import
from tensorflow.core.protobuf import tensorflow_server_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.util import compat
def _make_server_def(server_or_cluster_def, job_name, task_index, protocol,
config):
"""Creates a `tf.train.ServerDef` protocol buffer.
Args:
server_or_cluster_def: A `tf.train.ServerDef` or
`tf.train.ClusterDef` protocol buffer, or a
`tf.train.ClusterSpec` object, describing the server to be
defined and/or the cluster of which it is a member.
job_name: (Optional.) Specifies the name of the job of which the server
is a member. Defaults to the value in `server_or_cluster_def`, if
specified.
task_index: (Optional.) Specifies the task index of the server in its job.
Defaults to the value in `server_or_cluster_def`, if specified. Otherwise
defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
Acceptable values include `"grpc"`. Defaults to the value in
`server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
config: (Options.) A `tf.ConfigProto` that specifies default configuration
options for all sessions that run on this server.
Returns:
A `tf.train.ServerDef`.
Raises:
TypeError: If the arguments do not have the appropriate type.
ValueError: If an argument is not specified and cannot be inferred.
"""
server_def = tensorflow_server_pb2.ServerDef()
if isinstance(server_or_cluster_def, tensorflow_server_pb2.ServerDef):
server_def.MergeFrom(server_or_cluster_def)
if job_name is not None:
server_def.job_name = job_name
if task_index is not None:
server_def.task_index = task_index
if protocol is not None:
server_def.protocol = protocol
if config is not None:
server_def.default_session_config.MergeFrom(config)
else:
try:
cluster_spec = ClusterSpec(server_or_cluster_def)
except TypeError:
raise TypeError("Could not convert `server_or_cluster_def` to a "
"`tf.train.ServerDef` or `tf.train.ClusterSpec`.")
if job_name is None:
if len(cluster_spec.jobs) == 1:
job_name = cluster_spec.jobs[0]
else:
raise ValueError("Must specify an explicit `job_name`.")
if task_index is None:
if len(cluster_spec.job_tasks(job_name)) == 1:
task_index = 0
else:
raise ValueError("Must specify an explicit `task_index`.")
if protocol is None:
protocol = "grpc"
server_def = tensorflow_server_pb2.ServerDef(
cluster=cluster_spec.as_cluster_def(),
job_name=job_name, task_index=task_index, protocol=protocol)
if config is not None:
server_def.default_session_config.MergeFrom(config)
return server_def
class Server(object):
"""An in-process TensorFlow server, for use in distributed training.
A `tf.train.Server` instance encapsulates a set of devices and a
[`tf.Session`](../../api_docs/python/client.md#Session) target that
can participate in distributed training. A server belongs to a
cluster (specified by a [`tf.train.ClusterSpec`](#ClusterSpec)), and
corresponds to a particular task in a named job. The server can
communicate with any other server in the same cluster.
@@__init__
@@create_local_server
@@target
@@server_def
@@start
@@join
"""
def __init__(self,
server_or_cluster_def,
job_name=None,
task_index=None,
protocol=None,
config=None,
start=True):
"""Creates a new server with the given definition.
The `job_name`, `task_index`, and `protocol` arguments are optional, and
override any information provided in `server_or_cluster_def`.
Args:
server_or_cluster_def: A `tf.train.ServerDef` or
`tf.train.ClusterDef` protocol buffer, or a
`tf.train.ClusterSpec` object, describing the server to be
created and/or the cluster of which it is a member.
job_name: (Optional.) Specifies the name of the job of which the server
is a member. Defaults to the value in `server_or_cluster_def`, if
specified.
task_index: (Optional.) Specifies the task index of the server in its
job. Defaults to the value in `server_or_cluster_def`, if specified.
Otherwise defaults to 0 if the server's job has only one task.
protocol: (Optional.) Specifies the protocol to be used by the server.
Acceptable values include `"grpc"`. Defaults to the value in
`server_or_cluster_def`, if specified. Otherwise defaults to `"grpc"`.
config: (Options.) A `tf.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
start: (Optional.) Boolean, indicating whether to start the server
after creating it. Defaults to `True`.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow server.
"""
self._server_def = _make_server_def(server_or_cluster_def,
job_name, task_index, protocol, config)
with errors.raise_exception_on_not_ok_status() as status:
self._server = pywrap_tensorflow.PyServer_New(
self._server_def.SerializeToString(), status)
if start:
self.start()
def start(self):
"""Starts this server.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
starting the TensorFlow server.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.PyServer_Start(self._server, status)
def join(self):
"""Blocks until the server has shut down.
This method currently blocks forever.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
joining the TensorFlow server.
"""
with errors.raise_exception_on_not_ok_status() as status:
pywrap_tensorflow.PyServer_Join(self._server, status)
@property
def server_def(self):
"""Returns the `tf.train.ServerDef` for this server.
Returns:
A `tf.train.ServerDef` prototocol buffer that describes the configuration
of this server.
"""
return self._server_def
@property
def target(self):
"""Returns the target for a `tf.Session` to connect to this server.
To create a
[`tf.Session`](../../api_docs/python/client.md#Session) that
connects to this server, use the following snippet:
```python
server = tf.train.Server(...)
with tf.Session(server.target):
# ...
```
Returns:
A string containing a session target for this server.
"""
return self._server.target()
@staticmethod
def create_local_server(config=None, start=True):
"""Creates a new single-process cluster running on the local host.
This method is a convenience wrapper for creating a
`tf.train.Server` with a `tf.train.ServerDef` that specifies a
single-process cluster containing a single task in a job called
`"local"`.
Args:
config: (Options.) A `tf.ConfigProto` that specifies default
configuration options for all sessions that run on this server.
start: (Optional.) Boolean, indicating whether to start the server after
creating it. Defaults to `True`.
Returns:
A local `tf.train.Server`.
"""
# Specifying port 0 means that the OS will choose a free port for the
# server.
return Server({"local": ["localhost:0"]}, protocol="grpc", config=config,
start=start)
class ClusterSpec(object):
"""Represents a cluster as a set of "tasks", organized into "jobs".
A `tf.train.ClusterSpec` represents the set of processes that
participate in a distributed TensorFlow computation. Every
[`tf.train.Server`](#Server) is constructed in a particular cluster.
To create a cluster with two jobs and five tasks, you specify the
mapping from job names to lists of network addresses (typically
hostname-port pairs).
```
cluster = tf.train.ClusterSpec({"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]})
```
@@as_cluster_def
@@as_dict
"""
def __init__(self, cluster):
"""Creates a `ClusterSpec`.
Args:
cluster: A dictionary mapping one or more job names to lists of network
addresses, or a `tf.train.ClusterDef` protocol buffer.
Raises:
TypeError: If `cluster` is not a dictionary mapping strings to lists
of strings, and not a `tf.train.ClusterDef` protobuf.
"""
if isinstance(cluster, dict):
self._cluster_spec = cluster
self._make_cluster_def()
elif isinstance(cluster, tensorflow_server_pb2.ClusterDef):
self._cluster_def = cluster
self._cluster_spec = {}
for job_def in self._cluster_def.job:
self._cluster_spec[job_def.name] = [t for t in job_def.tasks.values()]
elif isinstance(cluster, ClusterSpec):
self._cluster_def = tensorflow_server_pb2.ClusterDef()
self._cluster_def.MergeFrom(cluster.as_cluster_def())
self._cluster_spec = {}
for job_def in self._cluster_def.job:
self._cluster_spec[job_def.name] = [t for t in job_def.tasks.values()]
else:
raise TypeError("`cluster` must be a dictionary mapping one or more "
"job names to lists of network addresses, or a "
"`ClusterDef` protocol buffer")
def as_dict(self):
"""Returns a dictionary from job names to lists of network addresses."""
return self._cluster_spec
def as_cluster_def(self):
"""Returns a `tf.train.ClusterDef` protocol buffer based on this cluster."""
return self._cluster_def
@property
def jobs(self):
"""Returns a list of job names in this cluster.
Returns:
A list of strings, corresponding to the names of jobs in this cluster.
"""
return list(self._cluster_spec.keys())
def job_tasks(self, job_name):
"""Returns a list of tasks in the given job.
Args:
job_name: The string name of a job in this cluster.
Returns:
A list of strings, corresponding to the network addresses of tasks in
the given job, ordered by task index.
Raises:
ValueError: If `job_name` does not name a job in this cluster.
"""
try:
return [task for task in self._cluster_spec[job_name]]
except IndexError:
raise ValueError("No such job in cluster: %r" % job_name)
def _make_cluster_def(self):
"""Creates a `tf.train.ClusterDef` based on the given `cluster_spec`.
Raises:
TypeError: If `cluster_spec` is not a dictionary mapping strings to lists
of strings.
"""
self._cluster_def = tensorflow_server_pb2.ClusterDef()
# NOTE(mrry): Sort by job_name to produce deterministic protobufs.
for job_name, task_list in sorted(self._cluster_spec.items()):
try:
job_name = compat.as_bytes(job_name)
except TypeError:
raise TypeError("Job name %r must be bytes or unicode" % job_name)
job_def = self._cluster_def.job.add()
job_def.name = job_name
for i, task_address in enumerate(task_list):
try:
task_address = compat.as_bytes(task_address)
except TypeError:
raise TypeError(
"Task address %r must be bytes or unicode" % task_address)
job_def.tasks[i] = task_address
| |
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import logging
from unittest import TestCase
from hamcrest import equal_to, assert_that, not_none, none, raises
from storops.vnx.enums import VNXSPEnum
from storops.vnx.parsers import VNXCliParser, VNXPropDescriptor, \
VNXParserConfigFactory
from storops.vnx.resource import get_vnx_parser
from storops_test.vnx.cli_mock import MockCli
from storops_test.vnx.resource.fakes import STORAGE_GROUP_HBA
log = logging.getLogger(__name__)
A = VNXPropDescriptor('-a', 'Prop A (name):', 'prop_a')
B = VNXPropDescriptor('-b', 'Prop B:')
C = VNXPropDescriptor('-c', 'Prop C:')
ID = VNXPropDescriptor(None, 'ID:', is_index=True)
class DemoParser(VNXCliParser):
def __init__(self):
super(DemoParser, self).__init__()
self.add_property(A, B, C, ID)
class DemoParserNonIndex(VNXCliParser):
def __init__(self):
super(DemoParserNonIndex, self).__init__()
self.add_property(VNXPropDescriptor('-b', 'Prop B:'))
class DemoParserRegexIndex(VNXCliParser):
def __init__(self):
super(DemoParserRegexIndex, self).__init__()
self.add_property(
VNXPropDescriptor(None,
r'\s*\w+:(\d+)',
'id',
is_index=True,
is_regex=True,
converter=int),
VNXPropDescriptor(None,
r'\s*value:\s*(\w+)',
'value',
is_regex=True))
class DemoParserMultiIndices(VNXCliParser):
def __init__(self):
super(DemoParserMultiIndices, self).__init__()
self.add_property(
VNXPropDescriptor(None, 'A:', is_index=True),
VNXPropDescriptor(None, 'B:', is_index=True),
VNXPropDescriptor(None, 'C:'),
VNXPropDescriptor(None, 'D:'))
class VNXCliParserTest(TestCase):
def test_get_property_options(self):
options = DemoParser().property_options
assert_that(' '.join(options), equal_to('-a -b -c'))
def test_get_index_descriptor(self):
assert_that(DemoParser().index_property.label, equal_to('ID:'))
def test_get_index_descriptor_none(self):
assert_that(DemoParserNonIndex().index_property, none())
def test_parse(self):
output = """
ID: test
Prop A (Name): ab (c)
Prop B: d ef
"""
parser = DemoParser()
parsed = parser.parse(output, [A, ID, C])
assert_that(parsed.prop_a, equal_to('ab (c)'))
assert_that(parsed.prop_c, none())
assert_that(parsed.id, equal_to('test'))
def f():
log.debug(parsed.prop_b)
assert_that(f, raises(AttributeError))
def test_parse_empty_prop(self):
output = """
ID: test
Prop A (Name): ab (c)
Prop B:
Prop C: abc
"""
parser = DemoParser()
parsed = parser.parse(output, [A, ID, B, C])
assert_that(parsed.id, equal_to('test'))
assert_that(parsed.prop_a, equal_to('ab (c)'))
assert_that(parsed.prop_b, equal_to(''))
def test_parse_regex_label(self):
output = """
id:123
value:abcde
id:456
value:ghijk
"""
parsed = DemoParserRegexIndex().parse_all(output)
assert_that(len(parsed), equal_to(2))
for i in parsed:
if i.id == 123:
assert_that(i.value, equal_to('abcde'))
elif i.id == 456:
assert_that(i.value, equal_to('ghijk'))
else:
self.fail('id not recognized.')
def test_all_options(self):
options = DemoParser().all_options
assert_that(options, equal_to(['-a', '-b', '-c']))
def test_parse_multi_index(self):
output = """
A: a0
B: b0
C: c0
A: a0
B: b0
D: d0
A: a0
B: b1
C: c1
"""
parsed = DemoParserMultiIndices().parse_all(output)
assert_that(len(parsed), equal_to(2))
a0b0 = next(i for i in parsed if i.b == 'b0')
assert_that(a0b0, not_none())
assert_that(a0b0.a, equal_to('a0'))
assert_that(a0b0.b, equal_to('b0'))
assert_that(a0b0.c, equal_to('c0'))
assert_that(a0b0.d, equal_to('d0'))
a0b1 = next(i for i in parsed if i.b == 'b1')
assert_that(a0b1, not_none())
assert_that(a0b1.a, equal_to('a0'))
assert_that(a0b1.b, equal_to('b1'))
assert_that(a0b1.c, equal_to('c1'))
class VNXStorageGroupHBAParserTest(TestCase):
def test_parse(self):
data = get_vnx_parser("VNXStorageGroupHBA").parse(STORAGE_GROUP_HBA)
assert_that(data.host_name, equal_to('abc.def.dev'))
assert_that(data.sp_port, equal_to('A-3v1'))
assert_that(data.initiator_ip, equal_to('10.244.209.72'))
assert_that(data.tpgt, equal_to('1'))
assert_that(data.isid, equal_to('10000000000'))
assert_that(
data.hba,
equal_to(('iqn.1991-05.com.microsoft:abc.def.dev',
'SP A', '3')))
def test_parse_no_header(self):
output = """
iqn.1991-05.com.microsoft:abc.def.dev SP A 1
Host name: abc.def.dev
SPPort: A-1v0
Initiator IP: 10.244.209.72
TPGT: 1
ISID: 10000000000
"""
data = get_vnx_parser("VNXStorageGroupHBA").parse(output)
assert_that(data.host_name, equal_to('abc.def.dev'))
assert_that(data.sp_port, equal_to('A-1v0'))
assert_that(data.initiator_ip, equal_to('10.244.209.72'))
assert_that(data.tpgt, equal_to('1'))
assert_that(data.isid, equal_to('10000000000'))
assert_that(data.hba,
equal_to(('iqn.1991-05.com.microsoft:abc.def.dev',
'SP A',
'1')))
class VNXStorageGroupParserTest(TestCase):
def test_parse(self):
parser = get_vnx_parser('VNXStorageGroup')
output = MockCli.read_file('storagegroup_-messner_-list_-host_'
'-iscsiAttributes_-gname_microsoft.txt')
sg = parser.parse(output)
assert_that(sg.shareable, equal_to(True))
assert_that(sg.name, equal_to('microsoft'))
assert_that(
sg.wwn,
equal_to('12:34:56:78:9A:BC:DE:F1:23:45:67:89:AB:CD:EF:01'))
assert_that(sg.alu_hlu_map[4], equal_to(0))
assert_that(sg.alu_hlu_map[456], equal_to(123))
assert_that(sg.alu_hlu_map.get(3, None), none())
# assert for hba members
assert_that(len(sg.hba_sp_pairs), equal_to(2))
hba = sg.hba_sp_pairs[0]
assert_that(hba.host_name, equal_to('abc.def.dev'))
class VNXConsistencyGroupParserTest(TestCase):
def test_parse(self):
output = MockCli.read_file('snap_-group_-list_-detail.txt')
parser = get_vnx_parser('VNXConsistencyGroup')
cgs = parser.parse_all(output)
cg = next(c for c in cgs if c.name == 'test cg name')
assert_that(cg, not_none())
assert_that(cg.state, equal_to('Ready'))
cg = next(c for c in cgs if c.name == 'another cg')
assert_that(cg, not_none())
assert_that(cg.state, equal_to('Offline'))
class VNXPoolPropertiesTest(TestCase):
def test_parse(self):
output = MockCli.read_file('storagepool_-list_-all_-id_1.txt')
parser = get_vnx_parser('VNXPool')
pool = parser.parse(output)
assert_that(pool.state, equal_to('Ready'))
assert_that(pool.pool_id, equal_to(1))
assert_that(pool.user_capacity_gbs, equal_to(2329.792))
assert_that(pool.available_capacity_gbs, equal_to(1473.623))
assert_that(pool.fast_cache, none())
assert_that(pool.name, equal_to('Pool_daq'))
assert_that(pool.total_subscribed_capacity_gbs, equal_to(2701.767))
assert_that(pool.percent_full_threshold, equal_to(70))
class VNXPoolFeatureParserTest(TestCase):
# command: storagepool -feature -info
output = """
Is Virtual Provisioning Supported: true
Max. Pools: 60
Max. Disks Per Pool: 1496
Max. Disks for all Pools: 1496
Max. Disks Per Operation: 180
Max. Pool LUNs: 4000
Min. Pool LUN Size(Blocks): 1
Max. Pool LUN Size(Blocks): 549755813888
Max. Pool LUN Size(GBs): 262144.000
Total Number of Pools: 2
Total Number of Pool LUNs: 4
Total Number of all Pool LUNs that are thin: 3
Total Number of all Pool LUNs that are non-thin: 1
Number of Disks used in Pools: 5
Available Disks:
Bus 0 Enclosure 0 Disk 24
Bus 0 Enclosure 0 Disk 16
Bus 0 Enclosure 0 Disk 5
Bus 0 Enclosure 0 Disk 4
"""
def test_parse(self):
parser = get_vnx_parser('VNXPoolFeature')
parsed = parser.parse(self.output)
assert_that(parsed.max_pool_luns, equal_to(4000))
assert_that(parsed.total_pool_luns, equal_to(4))
class VNXLunPropertiesTest(TestCase):
def test_parse(self):
output = MockCli.read_file('lun_-list_-all_-l_19.txt')
parser = get_vnx_parser('VNXLun')
parsed = parser.parse(output)
wwn = '60:06:01:60:1A:50:35:00:CC:22:61:D6:76:B1:E4:11'
assert_that(parsed.wwn, equal_to(wwn))
assert_that(parsed.name, equal_to('test_lun'))
assert_that(parsed.lun_id, equal_to(19))
assert_that(parsed.total_capacity_gb, equal_to(1.0))
assert_that(parsed.is_thin_lun, equal_to(True))
assert_that(parsed.is_compressed, equal_to(False))
assert_that(parsed.deduplication_state, equal_to('Off'))
assert_that(parsed.tiering_policy, equal_to('No Movement'))
assert_that(parsed.initial_tier, equal_to('Optimize Pool'))
assert_that(parsed.state, equal_to('Ready'))
assert_that(parsed.status, equal_to('OK(0x0)'))
assert_that(parsed.operation, equal_to('None'))
assert_that(parsed.current_owner, equal_to(VNXSPEnum.SP_A))
assert_that(parsed.attached_snapshot, none())
class VNXParserConfigFactoryTest(TestCase):
def test_read_properties(self):
name = 'VNXConsistencyGroup'
prop = get_vnx_parser(name)
assert_that(prop.resource_class_name, equal_to(name))
assert_that(prop.data_src, equal_to('cli'))
def test_properties_sequence_should_align_with_file(self):
props = get_vnx_parser('VNXSystem')
assert_that(props.MODEL.sequence, equal_to(0))
assert_that(props.NAME.sequence, equal_to(5))
def test_get_rsc_pkg_name(self):
name = VNXParserConfigFactory.get_rsc_pkg_name()
assert_that(name, equal_to('storops.vnx.resource'))
| |
# Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
This module contains the main interface to the botocore package, the
Session object.
"""
import copy
import logging
import os
import platform
import re
import socket
import warnings
from botocore import __version__
from botocore import UNSIGNED
import botocore.configloader
import botocore.credentials
import botocore.client
from botocore.configprovider import ConfigValueStore
from botocore.configprovider import ConfigChainFactory
from botocore.configprovider import create_botocore_default_config_mapping
from botocore.configprovider import BOTOCORE_DEFAUT_SESSION_VARIABLES
from botocore.exceptions import (
ConfigNotFound, ProfileNotFound, UnknownServiceError,
PartialCredentialsError,
)
from botocore.errorfactory import ClientExceptionsFactory
from botocore import handlers
from botocore.hooks import HierarchicalEmitter, first_non_none_response
from botocore.hooks import EventAliaser
from botocore.loaders import create_loader
from botocore.parsers import ResponseParserFactory
from botocore.regions import EndpointResolver
from botocore.model import ServiceModel
from botocore import monitoring
from botocore import paginate
from botocore import waiter
from botocore import retryhandler, translate
from botocore import utils
from botocore.utils import EVENT_ALIASES, validate_region_name
from botocore.compat import MutableMapping, HAS_CRT
logger = logging.getLogger(__name__)
class Session(object):
"""
The Session object collects together useful functionality
from `botocore` as well as important data such as configuration
information and credentials into a single, easy-to-use object.
:ivar available_profiles: A list of profiles defined in the config
file associated with this session.
:ivar profile: The current profile.
"""
SESSION_VARIABLES = copy.copy(BOTOCORE_DEFAUT_SESSION_VARIABLES)
#: The default format string to use when configuring the botocore logger.
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
def __init__(self, session_vars=None, event_hooks=None,
include_builtin_handlers=True, profile=None):
"""
Create a new Session object.
:type session_vars: dict
:param session_vars: A dictionary that is used to override some or all
of the environment variables associated with this session. The
key/value pairs defined in this dictionary will override the
corresponding variables defined in ``SESSION_VARIABLES``.
:type event_hooks: BaseEventHooks
:param event_hooks: The event hooks object to use. If one is not
provided, an event hooks object will be automatically created
for you.
:type include_builtin_handlers: bool
:param include_builtin_handlers: Indicates whether or not to
automatically register builtin handlers.
:type profile: str
:param profile: The name of the profile to use for this
session. Note that the profile can only be set when
the session is created.
"""
if event_hooks is None:
self._original_handler = HierarchicalEmitter()
else:
self._original_handler = event_hooks
self._events = EventAliaser(self._original_handler)
if include_builtin_handlers:
self._register_builtin_handlers(self._events)
self.user_agent_name = 'Botocore'
self.user_agent_version = __version__
self.user_agent_extra = ''
# The _profile attribute is just used to cache the value
# of the current profile to avoid going through the normal
# config lookup process each access time.
self._profile = None
self._config = None
self._credentials = None
self._profile_map = None
# This is a dict that stores per session specific config variable
# overrides via set_config_variable().
self._session_instance_vars = {}
if profile is not None:
self._session_instance_vars['profile'] = profile
self._client_config = None
self._last_client_region_used = None
self._components = ComponentLocator()
self._internal_components = ComponentLocator()
self._register_components()
self.session_var_map = SessionVarDict(self, self.SESSION_VARIABLES)
if session_vars is not None:
self.session_var_map.update(session_vars)
def _register_components(self):
self._register_credential_provider()
self._register_data_loader()
self._register_endpoint_resolver()
self._register_event_emitter()
self._register_response_parser_factory()
self._register_exceptions_factory()
self._register_config_store()
self._register_monitor()
def _register_event_emitter(self):
self._components.register_component('event_emitter', self._events)
def _register_credential_provider(self):
self._components.lazy_register_component(
'credential_provider', self._create_credential_resolver)
def _create_credential_resolver(self):
return botocore.credentials.create_credential_resolver(
self, region_name=self._last_client_region_used
)
def _register_data_loader(self):
self._components.lazy_register_component(
'data_loader',
lambda: create_loader(self.get_config_variable('data_path')))
def _register_endpoint_resolver(self):
def create_default_resolver():
loader = self.get_component('data_loader')
endpoints = loader.load_data('endpoints')
return EndpointResolver(endpoints)
self._internal_components.lazy_register_component(
'endpoint_resolver', create_default_resolver)
def _register_response_parser_factory(self):
self._components.register_component('response_parser_factory',
ResponseParserFactory())
def _register_exceptions_factory(self):
self._internal_components.register_component(
'exceptions_factory', ClientExceptionsFactory())
def _register_builtin_handlers(self, events):
for spec in handlers.BUILTIN_HANDLERS:
if len(spec) == 2:
event_name, handler = spec
self.register(event_name, handler)
else:
event_name, handler, register_type = spec
if register_type is handlers.REGISTER_FIRST:
self._events.register_first(event_name, handler)
elif register_type is handlers.REGISTER_LAST:
self._events.register_last(event_name, handler)
def _register_config_store(self):
config_store_component = ConfigValueStore(
mapping=create_botocore_default_config_mapping(self)
)
self._components.register_component('config_store',
config_store_component)
def _register_monitor(self):
self._internal_components.lazy_register_component(
'monitor', self._create_csm_monitor)
def _create_csm_monitor(self):
if self.get_config_variable('csm_enabled'):
client_id = self.get_config_variable('csm_client_id')
host = self.get_config_variable('csm_host')
port = self.get_config_variable('csm_port')
handler = monitoring.Monitor(
adapter=monitoring.MonitorEventAdapter(),
publisher=monitoring.SocketPublisher(
socket=socket.socket(socket.AF_INET, socket.SOCK_DGRAM),
host=host,
port=port,
serializer=monitoring.CSMSerializer(
csm_client_id=client_id)
)
)
return handler
return None
def _get_crt_version(self):
try:
import pkg_resources
return pkg_resources.get_distribution("awscrt").version
except Exception:
# We're catching *everything* here to avoid failing
# on pkg_resources issues. This is unlikely in our
# supported versions but it avoids making a hard
# dependency on the package being present.
return "Unknown"
@property
def available_profiles(self):
return list(self._build_profile_map().keys())
def _build_profile_map(self):
# This will build the profile map if it has not been created,
# otherwise it will return the cached value. The profile map
# is a list of profile names, to the config values for the profile.
if self._profile_map is None:
self._profile_map = self.full_config['profiles']
return self._profile_map
@property
def profile(self):
if self._profile is None:
profile = self.get_config_variable('profile')
self._profile = profile
return self._profile
def get_config_variable(self, logical_name, methods=None):
if methods is not None:
return self._get_config_variable_with_custom_methods(
logical_name, methods)
return self.get_component('config_store').get_config_variable(
logical_name)
def _get_config_variable_with_custom_methods(self, logical_name, methods):
# If a custom list of methods was supplied we need to perserve the
# behavior with the new system. To do so a new chain that is a copy of
# the old one will be constructed, but only with the supplied methods
# being added to the chain. This chain will be consulted for a value
# and then thrown out. This is not efficient, nor is the methods arg
# used in botocore, this is just for backwards compatibility.
chain_builder = SubsetChainConfigFactory(session=self, methods=methods)
mapping = create_botocore_default_config_mapping(self)
for name, config_options in self.session_var_map.items():
config_name, env_vars, default, typecast = config_options
build_chain_config_args = {
'conversion_func': typecast,
'default': default,
}
if 'instance' in methods:
build_chain_config_args['instance_name'] = name
if 'env' in methods:
build_chain_config_args['env_var_names'] = env_vars
if 'config' in methods:
build_chain_config_args['config_property_name'] = config_name
mapping[name] = chain_builder.create_config_chain(
**build_chain_config_args
)
config_store_component = ConfigValueStore(
mapping=mapping
)
value = config_store_component.get_config_variable(logical_name)
return value
def set_config_variable(self, logical_name, value):
"""Set a configuration variable to a specific value.
By using this method, you can override the normal lookup
process used in ``get_config_variable`` by explicitly setting
a value. Subsequent calls to ``get_config_variable`` will
use the ``value``. This gives you per-session specific
configuration values.
::
>>> # Assume logical name 'foo' maps to env var 'FOO'
>>> os.environ['FOO'] = 'myvalue'
>>> s.get_config_variable('foo')
'myvalue'
>>> s.set_config_variable('foo', 'othervalue')
>>> s.get_config_variable('foo')
'othervalue'
:type logical_name: str
:param logical_name: The logical name of the session variable
you want to set. These are the keys in ``SESSION_VARIABLES``.
:param value: The value to associate with the config variable.
"""
logger.debug(
"Setting config variable for %s to %r",
logical_name,
value,
)
self._session_instance_vars[logical_name] = value
def instance_variables(self):
return copy.copy(self._session_instance_vars)
def get_scoped_config(self):
"""
Returns the config values from the config file scoped to the current
profile.
The configuration data is loaded **only** from the config file.
It does not resolve variables based on different locations
(e.g. first from the session instance, then from environment
variables, then from the config file). If you want this lookup
behavior, use the ``get_config_variable`` method instead.
Note that this configuration is specific to a single profile (the
``profile`` session variable).
If the ``profile`` session variable is set and the profile does
not exist in the config file, a ``ProfileNotFound`` exception
will be raised.
:raises: ConfigNotFound, ConfigParseError, ProfileNotFound
:rtype: dict
"""
profile_name = self.get_config_variable('profile')
profile_map = self._build_profile_map()
# If a profile is not explicitly set return the default
# profile config or an empty config dict if we don't have
# a default profile.
if profile_name is None:
return profile_map.get('default', {})
elif profile_name not in profile_map:
# Otherwise if they specified a profile, it has to
# exist (even if it's the default profile) otherwise
# we complain.
raise ProfileNotFound(profile=profile_name)
else:
return profile_map[profile_name]
@property
def full_config(self):
"""Return the parsed config file.
The ``get_config`` method returns the config associated with the
specified profile. This property returns the contents of the
**entire** config file.
:rtype: dict
"""
if self._config is None:
try:
config_file = self.get_config_variable('config_file')
self._config = botocore.configloader.load_config(config_file)
except ConfigNotFound:
self._config = {'profiles': {}}
try:
# Now we need to inject the profiles from the
# credentials file. We don't actually need the values
# in the creds file, only the profile names so that we
# can validate the user is not referring to a nonexistent
# profile.
cred_file = self.get_config_variable('credentials_file')
cred_profiles = botocore.configloader.raw_config_parse(
cred_file)
for profile in cred_profiles:
cred_vars = cred_profiles[profile]
if profile not in self._config['profiles']:
self._config['profiles'][profile] = cred_vars
else:
self._config['profiles'][profile].update(cred_vars)
except ConfigNotFound:
pass
return self._config
def get_default_client_config(self):
"""Retrieves the default config for creating clients
:rtype: botocore.client.Config
:returns: The default client config object when creating clients. If
the value is ``None`` then there is no default config object
attached to the session.
"""
return self._client_config
def set_default_client_config(self, client_config):
"""Sets the default config for creating clients
:type client_config: botocore.client.Config
:param client_config: The default client config object when creating
clients. If the value is ``None`` then there is no default config
object attached to the session.
"""
self._client_config = client_config
def set_credentials(self, access_key, secret_key, token=None):
"""
Manually create credentials for this session. If you would
prefer to use botocore without a config file, environment variables,
or IAM roles, you can pass explicit credentials into this
method to establish credentials for this session.
:type access_key: str
:param access_key: The access key part of the credentials.
:type secret_key: str
:param secret_key: The secret key part of the credentials.
:type token: str
:param token: An option session token used by STS session
credentials.
"""
self._credentials = botocore.credentials.Credentials(access_key,
secret_key,
token)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
if self._credentials is None:
self._credentials = self._components.get_component(
'credential_provider').load_credentials()
return self._credentials
def user_agent(self):
"""
Return a string suitable for use as a User-Agent header.
The string will be of the form:
<agent_name>/<agent_version> Python/<py_ver> <plat_name>/<plat_ver> <exec_env>
Where:
- agent_name is the value of the `user_agent_name` attribute
of the session object (`Botocore` by default).
- agent_version is the value of the `user_agent_version`
attribute of the session object (the botocore version by default).
by default.
- py_ver is the version of the Python interpreter beng used.
- plat_name is the name of the platform (e.g. Darwin)
- plat_ver is the version of the platform
- exec_env is exec-env/$AWS_EXECUTION_ENV
If ``user_agent_extra`` is not empty, then this value will be
appended to the end of the user agent string.
"""
base = '%s/%s Python/%s %s/%s' % (self.user_agent_name,
self.user_agent_version,
platform.python_version(),
platform.system(),
platform.release())
if HAS_CRT:
base += ' awscrt/%s' % self._get_crt_version()
if os.environ.get('AWS_EXECUTION_ENV') is not None:
base += ' exec-env/%s' % os.environ.get('AWS_EXECUTION_ENV')
if self.user_agent_extra:
base += ' %s' % self.user_agent_extra
return base
def get_data(self, data_path):
"""
Retrieve the data associated with `data_path`.
:type data_path: str
:param data_path: The path to the data you wish to retrieve.
"""
return self.get_component('data_loader').load_data(data_path)
def get_service_model(self, service_name, api_version=None):
"""Get the service model object.
:type service_name: string
:param service_name: The service name
:type api_version: string
:param api_version: The API version of the service. If none is
provided, then the latest API version will be used.
:rtype: L{botocore.model.ServiceModel}
:return: The botocore service model for the service.
"""
service_description = self.get_service_data(service_name, api_version)
return ServiceModel(service_description, service_name=service_name)
def get_waiter_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
waiter_config = loader.load_service_model(
service_name, 'waiters-2', api_version)
return waiter.WaiterModel(waiter_config)
def get_paginator_model(self, service_name, api_version=None):
loader = self.get_component('data_loader')
paginator_config = loader.load_service_model(
service_name, 'paginators-1', api_version)
return paginate.PaginatorModel(paginator_config)
def get_service_data(self, service_name, api_version=None):
"""
Retrieve the fully merged data associated with a service.
"""
data_path = service_name
service_data = self.get_component('data_loader').load_service_model(
data_path,
type_name='service-2',
api_version=api_version
)
service_id = EVENT_ALIASES.get(service_name, service_name)
self._events.emit('service-data-loaded.%s' % service_id,
service_data=service_data,
service_name=service_name, session=self)
return service_data
def get_available_services(self):
"""
Return a list of names of available services.
"""
return self.get_component('data_loader')\
.list_available_services(type_name='service-2')
def set_debug_logger(self, logger_name='botocore'):
"""
Convenience function to quickly configure full debug output
to go to the console.
"""
self.set_stream_logger(logger_name, logging.DEBUG)
def set_stream_logger(self, logger_name, log_level, stream=None,
format_string=None):
"""
Convenience method to configure a stream logger.
:type logger_name: str
:param logger_name: The name of the logger to configure
:type log_level: str
:param log_level: The log level to set for the logger. This
is any param supported by the ``.setLevel()`` method of
a ``Log`` object.
:type stream: file
:param stream: A file like object to log to. If none is provided
then sys.stderr will be used.
:type format_string: str
:param format_string: The format string to use for the log
formatter. If none is provided this will default to
``self.LOG_FORMAT``.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(stream)
ch.setLevel(log_level)
# create formatter
if format_string is None:
format_string = self.LOG_FORMAT
formatter = logging.Formatter(format_string)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def set_file_logger(self, log_level, path, logger_name='botocore'):
"""
Convenience function to quickly configure any level of logging
to a file.
:type log_level: int
:param log_level: A log level as specified in the `logging` module
:type path: string
:param path: Path to the log file. The file will be created
if it doesn't already exist.
"""
log = logging.getLogger(logger_name)
log.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(path)
ch.setLevel(log_level)
# create formatter
formatter = logging.Formatter(self.LOG_FORMAT)
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
def register(self, event_name, handler, unique_id=None,
unique_id_uses_count=False):
"""Register a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to invoke when the event
is emitted. This object must be callable, and must
accept ``**kwargs``. If either of these preconditions are
not met, a ``ValueError`` will be raised.
:type unique_id: str
:param unique_id: An optional identifier to associate with the
registration. A unique_id can only be used once for
the entire session registration (unless it is unregistered).
This can be used to prevent an event handler from being
registered twice.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every register call
using the unique id has been matched by an ``unregister`` call.
If ``unique_id`` is specified, subsequent ``register``
calls must use the same value for ``unique_id_uses_count``
as the ``register`` call that first registered the event.
:raises ValueError: If the call to ``register`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.register(event_name, handler, unique_id,
unique_id_uses_count=unique_id_uses_count)
def unregister(self, event_name, handler=None, unique_id=None,
unique_id_uses_count=False):
"""Unregister a handler with an event.
:type event_name: str
:param event_name: The name of the event.
:type handler: callable
:param handler: The callback to unregister.
:type unique_id: str
:param unique_id: A unique identifier identifying the callback
to unregister. You can provide either the handler or the
unique_id, you do not have to provide both.
:param unique_id_uses_count: boolean
:param unique_id_uses_count: Specifies if the event should maintain
a count when a ``unique_id`` is registered and unregisted. The
event can only be completely unregistered once every ``register``
call using the ``unique_id`` has been matched by an ``unregister``
call. If the ``unique_id`` is specified, subsequent
``unregister`` calls must use the same value for
``unique_id_uses_count`` as the ``register`` call that first
registered the event.
:raises ValueError: If the call to ``unregister`` uses ``unique_id``
but the value for ``unique_id_uses_count`` differs from the
``unique_id_uses_count`` value declared by the very first
``register`` call for that ``unique_id``.
"""
self._events.unregister(event_name, handler=handler,
unique_id=unique_id,
unique_id_uses_count=unique_id_uses_count)
def emit(self, event_name, **kwargs):
return self._events.emit(event_name, **kwargs)
def emit_first_non_none_response(self, event_name, **kwargs):
responses = self._events.emit(event_name, **kwargs)
return first_non_none_response(responses)
def get_component(self, name):
try:
return self._components.get_component(name)
except ValueError:
if name in ['endpoint_resolver', 'exceptions_factory']:
warnings.warn(
'Fetching the %s component with the get_component() '
'method is deprecated as the component has always been '
'considered an internal interface of botocore' % name,
DeprecationWarning)
return self._internal_components.get_component(name)
raise
def _get_internal_component(self, name):
# While this method may be called by botocore classes outside of the
# Session, this method should **never** be used by a class that lives
# outside of botocore.
return self._internal_components.get_component(name)
def _register_internal_component(self, name, component):
# While this method may be called by botocore classes outside of the
# Session, this method should **never** be used by a class that lives
# outside of botocore.
return self._internal_components.register_component(name, component)
def register_component(self, name, component):
self._components.register_component(name, component)
def lazy_register_component(self, name, component):
self._components.lazy_register_component(name, component)
def create_client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""Create a botocore client.
:type service_name: string
:param service_name: The name of the service for which a client will
be created. You can use the ``Sesssion.get_available_services()``
method to get a list of all available service names.
:type region_name: string
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:type api_version: string
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:type aws_secret_access_key: string
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:type aws_session_token: string
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type config: botocore.client.Config
:param config: Advanced client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
:rtype: botocore.client.BaseClient
:return: A botocore client instance
"""
default_client_config = self.get_default_client_config()
# If a config is provided and a default config is set, then
# use the config resulting from merging the two.
if config is not None and default_client_config is not None:
config = default_client_config.merge(config)
# If a config was not provided then use the default
# client config from the session
elif default_client_config is not None:
config = default_client_config
region_name = self._resolve_region_name(region_name, config)
# Figure out the verify value base on the various
# configuration options.
if verify is None:
verify = self.get_config_variable('ca_bundle')
if api_version is None:
api_version = self.get_config_variable('api_versions').get(
service_name, None)
loader = self.get_component('data_loader')
event_emitter = self.get_component('event_emitter')
response_parser_factory = self.get_component(
'response_parser_factory')
if config is not None and config.signature_version is UNSIGNED:
credentials = None
elif aws_access_key_id is not None and aws_secret_access_key is not None:
credentials = botocore.credentials.Credentials(
access_key=aws_access_key_id,
secret_key=aws_secret_access_key,
token=aws_session_token)
elif self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key):
raise PartialCredentialsError(
provider='explicit',
cred_var=self._missing_cred_vars(aws_access_key_id,
aws_secret_access_key))
else:
credentials = self.get_credentials()
endpoint_resolver = self._get_internal_component('endpoint_resolver')
exceptions_factory = self._get_internal_component('exceptions_factory')
config_store = self.get_component('config_store')
client_creator = botocore.client.ClientCreator(
loader, endpoint_resolver, self.user_agent(), event_emitter,
retryhandler, translate, response_parser_factory,
exceptions_factory, config_store)
client = client_creator.create_client(
service_name=service_name, region_name=region_name,
is_secure=use_ssl, endpoint_url=endpoint_url, verify=verify,
credentials=credentials, scoped_config=self.get_scoped_config(),
client_config=config, api_version=api_version)
monitor = self._get_internal_component('monitor')
if monitor is not None:
monitor.register(client.meta.events)
return client
def _resolve_region_name(self, region_name, config):
# Figure out the user-provided region based on the various
# configuration options.
if region_name is None:
if config and config.region_name is not None:
region_name = config.region_name
else:
region_name = self.get_config_variable('region')
validate_region_name(region_name)
# For any client that we create in retrieving credentials
# we want to create it using the same region as specified in
# creating this client. It is important to note though that the
# credentials client is only created once per session. So if a new
# client is created with a different region, its credential resolver
# will use the region of the first client. However, that is not an
# issue as of now because the credential resolver uses only STS and
# the credentials returned at regional endpoints are valid across
# all regions in the partition.
self._last_client_region_used = region_name
return region_name
def _missing_cred_vars(self, access_key, secret_key):
if access_key is not None and secret_key is None:
return 'aws_secret_access_key'
if secret_key is not None and access_key is None:
return 'aws_access_key_id'
return None
def get_available_partitions(self):
"""Lists the available partitions found on disk
:rtype: list
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
"""
resolver = self._get_internal_component('endpoint_resolver')
return resolver.get_available_partitions()
def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the region and endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3).
This parameter accepts a service name (e.g., "elb") or endpoint
prefix (e.g., "elasticloadbalancing").
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
resolver = self._get_internal_component('endpoint_resolver')
results = []
try:
service_data = self.get_service_data(service_name)
endpoint_prefix = service_data['metadata'].get(
'endpointPrefix', service_name)
results = resolver.get_available_endpoints(
endpoint_prefix, partition_name, allow_non_regional)
except UnknownServiceError:
pass
return results
class ComponentLocator(object):
"""Service locator for session components."""
def __init__(self):
self._components = {}
self._deferred = {}
def get_component(self, name):
if name in self._deferred:
factory = self._deferred[name]
self._components[name] = factory()
# Only delete the component from the deferred dict after
# successfully creating the object from the factory as well as
# injecting the instantiated value into the _components dict.
del self._deferred[name]
try:
return self._components[name]
except KeyError:
raise ValueError("Unknown component: %s" % name)
def register_component(self, name, component):
self._components[name] = component
try:
del self._deferred[name]
except KeyError:
pass
def lazy_register_component(self, name, no_arg_factory):
self._deferred[name] = no_arg_factory
try:
del self._components[name]
except KeyError:
pass
class SessionVarDict(MutableMapping):
def __init__(self, session, session_vars):
self._session = session
self._store = copy.copy(session_vars)
def __getitem__(self, key):
return self._store[key]
def __setitem__(self, key, value):
self._store[key] = value
self._update_config_store_from_session_vars(key, value)
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def _update_config_store_from_session_vars(self, logical_name,
config_options):
# This is for backwards compatibility. The new preferred way to
# modify configuration logic is to use the component system to get
# the config_store component from the session, and then update
# a key with a custom config provider(s).
# This backwards compatibility method takes the old session_vars
# list of tuples and and transforms that into a set of updates to
# the config_store component.
config_chain_builder = ConfigChainFactory(session=self._session)
config_name, env_vars, default, typecast = config_options
config_store = self._session.get_component('config_store')
config_store.set_config_provider(
logical_name,
config_chain_builder.create_config_chain(
instance_name=logical_name,
env_var_names=env_vars,
config_property_names=config_name,
default=default,
conversion_func=typecast,
)
)
class SubsetChainConfigFactory(object):
"""A class for creating backwards compatible configuration chains.
This class can be used instead of
:class:`botocore.configprovider.ConfigChainFactory` to make it honor the
methods argument to get_config_variable. This class can be used to filter
out providers that are not in the methods tuple when creating a new config
chain.
"""
def __init__(self, session, methods, environ=None):
self._factory = ConfigChainFactory(session, environ)
self._supported_methods = methods
def create_config_chain(self, instance_name=None, env_var_names=None,
config_property_name=None, default=None,
conversion_func=None):
"""Build a config chain following the standard botocore pattern.
This config chain factory will omit any providers not in the methods
tuple provided at initialization. For example if given the tuple
('instance', 'config',) it will not inject the environment provider
into the standard config chain. This lets the botocore session support
the custom ``methods`` argument for all the default botocore config
variables when calling ``get_config_variable``.
"""
if 'instance' not in self._supported_methods:
instance_name = None
if 'env' not in self._supported_methods:
env_var_names = None
if 'config' not in self._supported_methods:
config_property_name = None
return self._factory.create_config_chain(
instance_name=instance_name,
env_var_names=env_var_names,
config_property_names=config_property_name,
default=default,
conversion_func=conversion_func,
)
def get_session(env_vars=None):
"""
Return a new session object.
"""
return Session(env_vars)
| |
from __future__ import absolute_import, division, print_function
import warnings
import re
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
testdir.makepyfile(
"""
import warnings
def test_method(recwarn):
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestWarningsRecorderChecker(object):
def test_recording(self):
rec = WarningsRecorder()
with rec:
assert not rec.list
warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
values = rec.list
rec.clear()
assert len(rec.list) == 0
assert values is rec.list
pytest.raises(AssertionError, "rec.pop()")
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(("hi", RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
class TestDeprecatedCall(object):
"""test pytest.deprecated_call()"""
def dep(self, i, j=None):
if i == 0:
warnings.warn("is deprecated", DeprecationWarning, stacklevel=1)
return 42
def dep_explicit(self, i):
if i == 0:
warnings.warn_explicit(
"dep_explicit", category=DeprecationWarning, filename="hello", lineno=3
)
def test_deprecated_call_raises(self):
with pytest.raises(pytest.fail.Exception, match="No warnings of type"):
pytest.deprecated_call(self.dep, 3, 5)
def test_deprecated_call(self):
pytest.deprecated_call(self.dep, 0, 5)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(self.dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = warnings.onceregistry.copy()
filters = warnings.filters[:]
warn = warnings.warn
warn_explicit = warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == warnings.onceregistry
assert filters == warnings.filters
assert warn is warnings.warn
assert warn_explicit is warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
with pytest.raises(pytest.fail.Exception):
pytest.deprecated_call(self.dep_explicit, 3)
def test_deprecated_explicit_call(self):
pytest.deprecated_call(self.dep_explicit, 0)
pytest.deprecated_call(self.dep_explicit, 0)
@pytest.mark.parametrize("mode", ["context_manager", "call"])
def test_deprecated_call_no_warning(self, mode):
"""Ensure deprecated_call() raises the expected failure when its block/function does
not raise a deprecation warning.
"""
def f():
pass
msg = "No warnings of type (.*DeprecationWarning.*, .*PendingDeprecationWarning.*)"
with pytest.raises(pytest.fail.Exception, match=msg):
if mode == "call":
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
@pytest.mark.parametrize(
"warning_type", [PendingDeprecationWarning, DeprecationWarning]
)
@pytest.mark.parametrize("mode", ["context_manager", "call"])
@pytest.mark.parametrize("call_f_first", [True, False])
@pytest.mark.filterwarnings("ignore")
def test_deprecated_call_modes(self, warning_type, mode, call_f_first):
"""Ensure deprecated_call() captures a deprecation warning as expected inside its
block/function.
"""
def f():
warnings.warn(warning_type("hi"))
return 10
# ensure deprecated_call() can capture the warning even if it has already been triggered
if call_f_first:
assert f() == 10
if mode == "call":
assert pytest.deprecated_call(f) == 10
else:
with pytest.deprecated_call():
assert f() == 10
@pytest.mark.parametrize("mode", ["context_manager", "call"])
def test_deprecated_call_exception_is_raised(self, mode):
"""If the block of the code being tested by deprecated_call() raises an exception,
it must raise the exception undisturbed.
"""
def f():
raise ValueError("some exception")
with pytest.raises(ValueError, match="some exception"):
if mode == "call":
pytest.deprecated_call(f)
else:
with pytest.deprecated_call():
f()
def test_deprecated_call_specificity(self):
other_warnings = [
Warning,
UserWarning,
SyntaxWarning,
RuntimeWarning,
FutureWarning,
ImportWarning,
UnicodeWarning,
]
for warning in other_warnings:
def f():
warnings.warn(warning("hi"))
with pytest.raises(pytest.fail.Exception):
pytest.deprecated_call(f)
with pytest.raises(pytest.fail.Exception):
with pytest.deprecated_call():
f()
def test_deprecated_call_supports_match(self):
with pytest.deprecated_call(match=r"must be \d+$"):
warnings.warn("value must be 42", DeprecationWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.deprecated_call(match=r"must be \d+$"):
warnings.warn("this is not here", DeprecationWarning)
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(
SyntaxWarning, lambda msg: warnings.warn(msg, SyntaxWarning), "syntax"
)
def test_warning_tuple(self):
pytest.warns(
(RuntimeWarning, SyntaxWarning), lambda: warnings.warn("w1", RuntimeWarning)
)
pytest.warns(
(RuntimeWarning, SyntaxWarning), lambda: warnings.warn("w2", SyntaxWarning)
)
pytest.raises(
pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn("w3", UserWarning),
),
)
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
excinfo.match(
r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[UserWarning\('user',?\)\]."
)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
excinfo.match(
r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)\]."
)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(UserWarning):
pass
excinfo.match(
r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. "
r"The list of emitted warnings is: \[\]."
)
warning_classes = (UserWarning, FutureWarning)
with pytest.raises(pytest.fail.Exception) as excinfo:
with pytest.warns(warning_classes) as warninfo:
warnings.warn("runtime", RuntimeWarning)
warnings.warn("import", ImportWarning)
message_template = (
"DID NOT WARN. No warnings of type {0} was emitted. "
"The list of emitted warnings is: {1}."
)
excinfo.match(
re.escape(
message_template.format(
warning_classes, [each.message for each in warninfo]
)
)
)
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_record_by_subclass(self):
with pytest.warns(Warning) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
class MyUserWarning(UserWarning):
pass
class MyRuntimeWarning(RuntimeWarning):
pass
with pytest.warns((UserWarning, RuntimeWarning)) as record:
warnings.warn("user", MyUserWarning)
warnings.warn("runtime", MyRuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile(
"""
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 passed in*"])
def test_match_regex(self):
with pytest.warns(UserWarning, match=r"must be \d+$"):
warnings.warn("value must be 42", UserWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(UserWarning, match=r"must be \d+$"):
warnings.warn("this is not here", UserWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(FutureWarning, match=r"must be \d+$"):
warnings.warn("value must be 42", UserWarning)
def test_one_from_multiple_warns(self):
with pytest.warns(UserWarning, match=r"aaa"):
warnings.warn("cccccccccc", UserWarning)
warnings.warn("bbbbbbbbbb", UserWarning)
warnings.warn("aaaaaaaaaa", UserWarning)
def test_none_of_multiple_warns(self):
with pytest.raises(pytest.fail.Exception):
with pytest.warns(UserWarning, match=r"aaa"):
warnings.warn("bbbbbbbbbb", UserWarning)
warnings.warn("cccccccccc", UserWarning)
@pytest.mark.filterwarnings("ignore")
def test_can_capture_previously_warned(self):
def f():
warnings.warn(UserWarning("ohai"))
return 10
assert f() == 10
assert pytest.warns(UserWarning, f) == 10
assert pytest.warns(UserWarning, f) == 10
| |
"""
Cross Site Request Forgery Middleware.
This module provides a middleware that implements protection
against request forgeries from other sites.
"""
import itertools
import re
import random
from django.conf import settings
from django.core.urlresolvers import get_callable
from django.utils.cache import patch_vary_headers
from django.utils.hashcompat import md5_constructor
from django.utils.log import getLogger
from django.utils.safestring import mark_safe
from django.utils.crypto import constant_time_compare
_POST_FORM_RE = \
re.compile(r'(<form\W[^>]*\bmethod\s*=\s*(\'|"|)POST(\'|"|)\b[^>]*>)', re.IGNORECASE)
_HTML_TYPES = ('text/html', 'application/xhtml+xml')
logger = getLogger('django.request')
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
_MAX_CSRF_KEY = 18446744073709551616L # 2 << 63
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = "Referer checking failed - %s does not match %s."
REASON_NO_COOKIE = "No CSRF or session cookie."
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
def _get_failure_view():
"""
Returns the view to be used for CSRF rejections
"""
return get_callable(settings.CSRF_FAILURE_VIEW)
def _get_new_csrf_key():
return md5_constructor("%s%s"
% (randrange(0, _MAX_CSRF_KEY), settings.SECRET_KEY)).hexdigest()
def _make_legacy_session_token(session_id):
return md5_constructor(settings.SECRET_KEY + session_id).hexdigest()
def get_token(request):
"""
Returns the the CSRF token required for a POST form. The token is an
alphanumeric value.
A side effect of calling this function is to make the the csrf_protect
decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'
header to the outgoing response. For this reason, you may need to use this
function lazily, as is done by the csrf context processor.
"""
request.META["CSRF_COOKIE_USED"] = True
return request.META.get("CSRF_COOKIE", None)
def _sanitize_token(token):
# Allow only alphanum, and ensure we return a 'str' for the sake of the post
# processing middleware.
token = re.sub('[^a-zA-Z0-9]', '', str(token.decode('ascii', 'ignore')))
if token == "":
# In case the cookie has been truncated to nothing at some point.
return _get_new_csrf_key()
else:
return token
class CsrfViewMiddleware(object):
"""
Middleware that requires a present and correct csrfmiddlewaretoken
for POST requests that have a CSRF cookie, and sets an outgoing
CSRF cookie.
This middleware should be used in conjunction with the csrf_token template
tag.
"""
def process_view(self, request, callback, callback_args, callback_kwargs):
if getattr(request, 'csrf_processing_done', False):
return None
reject = lambda s: _get_failure_view()(request, reason=s)
def accept():
# Avoid checking the request twice by adding a custom attribute to
# request. This will be relevant when both decorator and middleware
# are used.
request.csrf_processing_done = True
return None
# If the user doesn't have a CSRF cookie, generate one and store it in the
# request, so it's available to the view. We'll store it in a cookie when
# we reach the response.
try:
# In case of cookies from untrusted sources, we strip anything
# dangerous at this point, so that the cookie + token will have the
# same, sanitized value.
request.META["CSRF_COOKIE"] = _sanitize_token(request.COOKIES[settings.CSRF_COOKIE_NAME])
cookie_is_new = False
except KeyError:
# No cookie, so create one. This will be sent with the next
# response.
request.META["CSRF_COOKIE"] = _get_new_csrf_key()
# Set a flag to allow us to fall back and allow the session id in
# place of a CSRF cookie for this request only.
cookie_is_new = True
# Wait until request.META["CSRF_COOKIE"] has been manipulated before
# bailing out, so that get_token still works
if getattr(callback, 'csrf_exempt', False):
return None
if request.method == 'POST':
if getattr(request, '_dont_enforce_csrf_checks', False):
# Mechanism to turn off CSRF checks for test suite. It comes after
# the creation of CSRF cookies, so that everything else continues to
# work exactly the same (e.g. cookies are sent etc), but before the
# any branches that call reject()
return accept()
if request.is_ajax():
# .is_ajax() is based on the presence of X-Requested-With. In
# the context of a browser, this can only be sent if using
# XmlHttpRequest. Browsers implement careful policies for
# XmlHttpRequest:
#
# * Normally, only same-domain requests are allowed.
#
# * Some browsers (e.g. Firefox 3.5 and later) relax this
# carefully:
#
# * if it is a 'simple' GET or POST request (which can
# include no custom headers), it is allowed to be cross
# domain. These requests will not be recognized as AJAX.
#
# * if a 'preflight' check with the server confirms that the
# server is expecting and allows the request, cross domain
# requests even with custom headers are allowed. These
# requests will be recognized as AJAX, but can only get
# through when the developer has specifically opted in to
# allowing the cross-domain POST request.
#
# So in all cases, it is safe to allow these requests through.
return accept()
if request.is_secure():
# Suppose user visits http://example.com/
# An active network attacker,(man-in-the-middle, MITM) sends a
# POST form which targets https://example.com/detonate-bomb/ and
# submits it via javascript.
#
# The attacker will need to provide a CSRF cookie and token, but
# that is no problem for a MITM and the session independent
# nonce we are using. So the MITM can circumvent the CSRF
# protection. This is true for any HTTP connection, but anyone
# using HTTPS expects better! For this reason, for
# https://example.com/ we need additional protection that treats
# http://example.com/ as completely untrusted. Under HTTPS,
# Barth et al. found that the Referer header is missing for
# same-domain requests in only about 0.2% of cases or less, so
# we can use strict Referer checking.
referer = request.META.get('HTTP_REFERER')
if referer is None:
logger.warning('Forbidden (%s): %s' % (REASON_NO_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return reject(REASON_NO_REFERER)
# The following check ensures that the referer is HTTPS,
# the domains match and the ports match - the same origin policy.
good_referer = 'https://%s/' % request.get_host()
if not referer.startswith(good_referer):
reason = REASON_BAD_REFERER % (referer, good_referer)
logger.warning('Forbidden (%s): %s' % (reason, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return reject(reason)
# If the user didn't already have a CSRF cookie, then fall back to
# the Django 1.1 method (hash of session ID), so a request is not
# rejected if the form was sent to the user before upgrading to the
# Django 1.2 method (session independent nonce)
if cookie_is_new:
try:
session_id = request.COOKIES[settings.SESSION_COOKIE_NAME]
csrf_token = _make_legacy_session_token(session_id)
except KeyError:
# No CSRF cookie and no session cookie. For POST requests,
# we insist on a CSRF cookie, and in this way we can avoid
# all CSRF attacks, including login CSRF.
logger.warning('Forbidden (%s): %s' % (REASON_NO_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return reject(REASON_NO_COOKIE)
else:
csrf_token = request.META["CSRF_COOKIE"]
# check incoming token
request_csrf_token = request.POST.get('csrfmiddlewaretoken', '')
if not constant_time_compare(request_csrf_token, csrf_token):
if cookie_is_new:
# probably a problem setting the CSRF cookie
logger.warning('Forbidden (%s): %s' % (REASON_NO_CSRF_COOKIE, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return reject(REASON_NO_CSRF_COOKIE)
else:
logger.warning('Forbidden (%s): %s' % (REASON_BAD_TOKEN, request.path),
extra={
'status_code': 403,
'request': request,
}
)
return reject(REASON_BAD_TOKEN)
return accept()
def process_response(self, request, response):
if getattr(response, 'csrf_processing_done', False):
return response
# If CSRF_COOKIE is unset, then CsrfViewMiddleware.process_view was
# never called, probaby because a request middleware returned a response
# (for example, contrib.auth redirecting to a login page).
if request.META.get("CSRF_COOKIE") is None:
return response
if not request.META.get("CSRF_COOKIE_USED", False):
return response
# Set the CSRF cookie even if it's already set, so we renew the expiry timer.
response.set_cookie(settings.CSRF_COOKIE_NAME,
request.META["CSRF_COOKIE"], max_age = 60 * 60 * 24 * 7 * 52,
domain=settings.CSRF_COOKIE_DOMAIN)
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
response.csrf_processing_done = True
return response
class CsrfResponseMiddleware(object):
"""
DEPRECATED
Middleware that post-processes a response to add a csrfmiddlewaretoken.
This exists for backwards compatibility and as an interim measure until
applications are converted to using use the csrf_token template tag
instead. It will be removed in Django 1.4.
"""
def __init__(self):
import warnings
warnings.warn(
"CsrfResponseMiddleware and CsrfMiddleware are deprecated; use CsrfViewMiddleware and the template tag instead (see CSRF documentation).",
DeprecationWarning
)
def process_response(self, request, response):
if getattr(response, 'csrf_exempt', False):
return response
if response['Content-Type'].split(';')[0] in _HTML_TYPES:
csrf_token = get_token(request)
# If csrf_token is None, we have no token for this request, which probably
# means that this is a response from a request middleware.
if csrf_token is None:
return response
# ensure we don't add the 'id' attribute twice (HTML validity)
idattributes = itertools.chain(("id='csrfmiddlewaretoken'",),
itertools.repeat(''))
def add_csrf_field(match):
"""Returns the matched <form> tag plus the added <input> element"""
return mark_safe(match.group() + "<div style='display:none;'>" + \
"<input type='hidden' " + idattributes.next() + \
" name='csrfmiddlewaretoken' value='" + csrf_token + \
"' /></div>")
# Modify any POST forms
response.content, n = _POST_FORM_RE.subn(add_csrf_field, response.content)
if n > 0:
# Content varies with the CSRF cookie, so set the Vary header.
patch_vary_headers(response, ('Cookie',))
# Since the content has been modified, any Etag will now be
# incorrect. We could recalculate, but only if we assume that
# the Etag was set by CommonMiddleware. The safest thing is just
# to delete. See bug #9163
del response['ETag']
return response
class CsrfMiddleware(object):
"""
Django middleware that adds protection against Cross Site
Request Forgeries by adding hidden form fields to POST forms and
checking requests for the correct value.
CsrfMiddleware uses two middleware, CsrfViewMiddleware and
CsrfResponseMiddleware, which can be used independently. It is recommended
to use only CsrfViewMiddleware and use the csrf_token template tag in
templates for inserting the token.
"""
# We can't just inherit from CsrfViewMiddleware and CsrfResponseMiddleware
# because both have process_response methods.
def __init__(self):
self.response_middleware = CsrfResponseMiddleware()
self.view_middleware = CsrfViewMiddleware()
def process_response(self, request, resp):
# We must do the response post-processing first, because that calls
# get_token(), which triggers a flag saying that the CSRF cookie needs
# to be sent (done in CsrfViewMiddleware.process_response)
resp2 = self.response_middleware.process_response(request, resp)
return self.view_middleware.process_response(request, resp2)
def process_view(self, request, callback, callback_args, callback_kwargs):
return self.view_middleware.process_view(request, callback, callback_args,
callback_kwargs)
| |
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
import logging
from androguard.decompiler.dad.opcode_ins import INSTRUCTION_SET
from androguard.decompiler.dad.instruction import Variable
from androguard.decompiler.dad.node import Node
logger = logging.getLogger('dad.basic_blocks')
class BasicBlock(Node):
def __init__(self, name, block_ins):
super(BasicBlock, self).__init__(name)
self.ins = block_ins
self.ins_range = None
self.loc_ins = None
def get_ins(self):
return self.ins
def get_loc_with_ins(self):
if self.loc_ins is None:
self.loc_ins = zip(range(*self.ins_range), self.ins)
return self.loc_ins
def remove_ins(self, loc, ins):
self.ins.remove(ins)
self.loc_ins.remove((loc, ins))
def add_ins(self, new_ins_list):
for new_ins in new_ins_list:
self.ins.append(new_ins)
self.ins_range[1] += len(new_ins_list)
def number_ins(self, num):
last_ins_num = num + len(self.ins)
self.ins_range = [num, last_ins_num]
self.loc_ins = None
return last_ins_num
class StatementBlock(BasicBlock):
def __init__(self, name, block_ins):
super(StatementBlock, self).__init__(name, block_ins)
def visit(self, visitor):
return visitor.visit_statement_node(self)
def __str__(self):
return '%d-Statement(%s)' % (self.num, self.name)
class ReturnBlock(BasicBlock):
def __init__(self, name, block_ins):
super(ReturnBlock, self).__init__(name, block_ins)
def visit(self, visitor):
return visitor.visit_return_node(self)
def __str__(self):
return '%d-Return(%s)' % (self.num, self.name)
class ThrowBlock(BasicBlock):
def __init__(self, name, block_ins):
super(ThrowBlock, self).__init__(name, block_ins)
def visit(self, visitor):
return visitor.visit_throw_node(self)
def __str__(self):
return '%d-Throw(%s)' % (self.num, self.name)
class SwitchBlock(BasicBlock):
def __init__(self, name, switch, block_ins):
super(SwitchBlock, self).__init__(name, block_ins)
self.switch = switch
self.cases = []
self.default = None
self.node_to_case = {}
def add_case(self, case):
self.cases.append(case)
def visit(self, visitor):
return visitor.visit_switch_node(self)
def copy_from(self, node):
super(SwitchBlock, self).copy_from(node)
self.cases = node.cases
self.switch = node.switch
def update_attribute_with(self, n_map):
super(SwitchBlock, self).update_attribute_with(n_map)
self.cases = [n_map.get(n, n) for n in self.cases]
for node1, node2 in n_map.iteritems():
if node1 in self.node_to_case:
self.node_to_case[node2] = self.node_to_case.pop(node1)
def order_cases(self):
values = self.switch.get_values()
if len(values) < len(self.cases):
self.default = self.cases.pop(0)
for case, node in zip(values, self.cases):
self.node_to_case.setdefault(node, []).append(case)
def __str__(self):
return '%d-Switch(%s)' % (self.num, self.name)
class CondBlock(BasicBlock):
def __init__(self, name, block_ins):
super(CondBlock, self).__init__(name, block_ins)
self.true = None
self.false = None
def set_true(self, node):
self.true = node
def set_false(self, node):
self.false = node
def update_attribute_with(self, n_map):
super(CondBlock, self).update_attribute_with(n_map)
self.true = n_map.get(self.true, self.true)
self.false = n_map.get(self.false, self.false)
def neg(self):
if len(self.ins) > 1:
raise ('Condition should have only 1 instruction !')
self.ins[0].neg()
def visit(self, visitor):
return visitor.visit_cond_node(self)
def visit_cond(self, visitor):
if len(self.ins) > 1:
raise ('Condition should have only 1 instruction !')
return visitor.visit_ins(self.ins[0])
def __str__(self):
return '%d-If(%s)' % (self.num, self.name)
class Condition(object):
def __init__(self, cond1, cond2, isand, isnot):
self.cond1 = cond1
self.cond2 = cond2
self.isand = isand
self.isnot = isnot
def neg(self):
self.isand = not self.isand
self.cond1.neg()
self.cond2.neg()
def get_ins(self):
lins = []
lins.extend(self.cond1.get_ins())
lins.extend(self.cond2.get_ins())
return lins
def get_loc_with_ins(self):
loc_ins = []
loc_ins.extend(self.cond1.get_loc_with_ins())
loc_ins.extend(self.cond2.get_loc_with_ins())
return loc_ins
def visit(self, visitor):
return visitor.visit_short_circuit_condition(self.isnot, self.isand,
self.cond1, self.cond2)
def __str__(self):
if self.isnot:
ret = '!%s %s %s'
else:
ret = '%s %s %s'
return ret % (self.cond1, ['||', '&&'][self.isand], self.cond2)
class ShortCircuitBlock(CondBlock):
def __init__(self, name, cond):
super(ShortCircuitBlock, self).__init__(name, None)
self.cond = cond
def get_ins(self):
return self.cond.get_ins()
def get_loc_with_ins(self):
return self.cond.get_loc_with_ins()
def neg(self):
self.cond.neg()
def visit_cond(self, visitor):
return self.cond.visit(visitor)
def __str__(self):
return '%d-SC(%s)' % (self.num, self.cond)
class LoopBlock(CondBlock):
def __init__(self, name, cond):
super(LoopBlock, self).__init__(name, None)
self.cond = cond
def get_ins(self):
return self.cond.get_ins()
def neg(self):
self.cond.neg()
def get_loc_with_ins(self):
return self.cond.get_loc_with_ins()
def visit(self, visitor):
return visitor.visit_loop_node(self)
def visit_cond(self, visitor):
return self.cond.visit_cond(visitor)
def update_attribute_with(self, n_map):
super(LoopBlock, self).update_attribute_with(n_map)
self.cond.update_attribute_with(n_map)
def __str__(self):
if self.looptype.pretest():
if self.false in self.loop_nodes:
return '%d-While(!%s)[%s]' % (self.num, self.name, self.cond)
return '%d-While(%s)[%s]' % (self.num, self.name, self.cond)
elif self.looptype.posttest():
return '%d-DoWhile(%s)[%s]' % (self.num, self.name, self.cond)
elif self.looptype.endless():
return '%d-WhileTrue(%s)[%s]' % (self.num, self.name, self.cond)
return '%dWhileNoType(%s)' % (self.num, self.name)
class TryBlock(BasicBlock):
def __init__(self, name, block_ins):
super(TryBlock, self).__init__(name, block_ins)
self.catch = []
def add_catch(self, node):
self.catch.append(node)
def __str__(self):
return 'Try(%s)' % self.name
class CatchBlock(BasicBlock):
def __init__(self, name, block_ins, typeh):
super(CatchBlock, self).__init__(name, block_ins)
self.exception_type = typeh
def __str__(self):
return 'Catch(%s)' % self.name
class GenInvokeRetName(object):
def __init__(self):
self.num = 0
self.ret = None
def new(self):
self.num += 1
self.ret = Variable('tmp%d' % self.num)
return self.ret
def set_to(self, ret):
self.ret = ret
def last(self):
return self.ret
def build_node_from_block(block, vmap, gen_ret):
ins, lins = None, []
idx = block.get_start()
for ins in block.get_instructions():
opcode = ins.get_op_value()
if opcode == 0x1f: # check-cast
idx += ins.get_length()
continue
_ins = INSTRUCTION_SET.get(ins.get_name().lower())
if _ins is None:
logger.error('Unknown instruction : %s.', _ins.get_name().lower())
# fill-array-data
if opcode == 0x26:
fillaray = block.get_special_ins(idx)
lins.append(_ins(ins, vmap, fillaray))
# invoke-kind[/range]
elif (0x6e <= opcode <= 0x72 or 0x74 <= opcode <= 0x78):
lins.append(_ins(ins, vmap, gen_ret))
# filled-new-array[/range]
elif 0x24 <= opcode <= 0x25:
lins.append(_ins(ins, vmap, gen_ret.new()))
# move-result*
elif 0xa <= opcode <= 0xc:
lins.append(_ins(ins, vmap, gen_ret.last()))
# monitor-{enter,exit}
elif 0x1d <= opcode <= 0x1e:
idx += ins.get_length()
continue
else:
lins.append(_ins(ins, vmap))
idx += ins.get_length()
name = block.get_name()
# return*
if 0xe <= opcode <= 0x11:
node = ReturnBlock(name, lins)
node.set_return()
# {packed,sparse}-switch
elif 0x2b <= opcode <= 0x2c:
idx -= ins.get_length()
values = block.get_special_ins(idx)
node = SwitchBlock(name, values, lins)
node.set_switch()
# if-test[z]
elif 0x32 <= opcode <= 0x3d:
node = CondBlock(name, lins)
node.set_cond()
node.off_last_ins = ins.get_ref_off()
# throw
elif opcode == 0x27:
node = ThrowBlock(name, lins)
node.set_throw()
else:
# goto*
if 0x28 <= opcode <= 0x2a:
lins.pop()
node = StatementBlock(name, lins)
node.set_stmt()
return node
| |
"""
This version of nmrmath features speed-optimized hamiltonian, simsignals,
and transition_matrix functions. Up to at least 8 spins, the new non-sparse
Hamilton code is about 10x faster. The overall performance is dramatically
better than the original code.
"""
import numpy as np
from math import sqrt
from scipy.linalg import eigh
from scipy.sparse import kron, csc_matrix, csr_matrix, lil_matrix, bmat
##############################################################################
# Second-order, Quantum Mechanics routines
##############################################################################
def popcount(n=0):
"""
Computes the popcount (binary Hamming weight) of integer n
input:
:param n: an integer
returns:
popcount of integer (binary Hamming weight)
"""
return bin(n).count('1')
def is_allowed(m=0, n=0):
"""
determines if a transition between two spin states is allowed or forbidden.
The transition is allowed if one and only one spin (i.e. bit) changes
input: integers whose binary codes for a spin state
:param n:
:param m:
output: 1 = allowed, 0 = forbidden
"""
return popcount(m ^ n) == 1
def transition_matrix(n):
"""
Creates a matrix of allowed transitions.
The integers 0-n, in their binary form, code for a spin state (alpha/beta).
The (i,j) cells in the matrix indicate whether a transition from spin state
i to spin state j is allowed or forbidden.
See the is_allowed function for more information.
input:
:param n: size of the n,n matrix (i.e. number of possible spin states)
:returns: a transition matrix that can be used to compute the intensity of
allowed transitions.
"""
# function was optimized by only calculating upper triangle and then adding
# the lower.
T = lil_matrix((n, n)) # sparse matrix created
for i in range(n - 1):
for j in range(i + 1, n):
if is_allowed(i, j):
T[i, j] = 1
T = T + T.T
return T
def hamiltonian(freqlist, couplings):
"""
Computes the spin Hamiltonian for spin-1/2 nuclei.
inputs for n nuclei:
:param freqlist: a list of frequencies in Hz of length n
:param couplings: an n x n array of coupling constants in Hz
Returns: a Hamiltonian array
"""
nspins = len(freqlist)
# Define Pauli matrices
sigma_x = np.matrix([[0, 1 / 2], [1 / 2, 0]])
sigma_y = np.matrix([[0, -1j / 2], [1j / 2, 0]])
sigma_z = np.matrix([[1 / 2, 0], [0, -1 / 2]])
unit = np.matrix([[1, 0], [0, 1]])
# The following empty arrays will be used to store the
# Cartesian spin operators.
Lx = np.empty((1, nspins), dtype='object')
Ly = np.empty((1, nspins), dtype='object')
Lz = np.empty((1, nspins), dtype='object')
for n in range(nspins):
Lx[0, n] = 1
Ly[0, n] = 1
Lz[0, n] = 1
for k in range(nspins):
if k == n: # Diagonal element
Lx[0, n] = np.kron(Lx[0, n], sigma_x)
Ly[0, n] = np.kron(Ly[0, n], sigma_y)
Lz[0, n] = np.kron(Lz[0, n], sigma_z)
else: # Off-diagonal element
Lx[0, n] = np.kron(Lx[0, n], unit)
Ly[0, n] = np.kron(Ly[0, n], unit)
Lz[0, n] = np.kron(Lz[0, n], unit)
Lcol = np.vstack((Lx, Ly, Lz)).real
Lrow = Lcol.T # As opposed to sparse version of code, this works!
Lproduct = np.dot(Lrow, Lcol)
# Hamiltonian operator
H = np.zeros((2**nspins, 2**nspins))
# Add Zeeman interactions:
for n in range(nspins):
H = H + freqlist[n] * Lz[0, n]
# Scalar couplings
# Testing with MATLAB discovered J must be /2.
# Believe it is related to the fact that in the SpinDynamics.org simulation
# freqs are *2pi, but Js by pi only.
scalars = 0.5 * couplings
scalars = np.multiply(scalars, Lproduct)
for n in range(nspins):
for k in range(nspins):
H += scalars[n, k].real
return H
def simsignals(H, nspins):
"""
Solves the spin Hamiltonian H and returns a list of (frequency, intensity)
tuples. Nuclei must be spin-1/2.
Inputs:
:param H: a Hamiltonian array
:param nspins: number of nuclei
:return spectrum: a list of (frequency, intensity) tuples.
"""
# This routine was optimized for speed by vectorizing the intensity
# calculations, replacing a nested-for signal-by-signal calculation.
# Considering that hamiltonian was dramatically faster when refactored to
# use arrays instead of sparse matrices, consider an array refactor to this
# function as well.
# The eigensolution calculation apparently must be done on a dense matrix,
# because eig functions on sparse matrices can't return all answers?!
# Using eigh so that answers have only real components and no residual small
# unreal components b/c of rounding errors
E, V = np.linalg.eigh(H) # V will be eigenvectors, v will be frequencies
# Eigh still leaves residual 0j terms, so:
V = np.asmatrix(V.real)
# Calculate signal intensities
Vcol = csc_matrix(V)
Vrow = csr_matrix(Vcol.T)
m = 2 ** nspins
T = transition_matrix(m)
I = Vrow * T * Vcol
I = np.square(I.todense())
spectrum = []
for i in range(m - 1):
for j in range(i + 1, m):
if I[i, j] > 0.01: # consider making this minimum intensity
# cutoff a function arg, for flexibility
v = abs(E[i] - E[j])
spectrum.append((v, I[i, j]))
return spectrum
def nspinspec(freqs, couplings, normalize=True):
"""
Function that calculates a spectrum for n spin-half nuclei.
Inputs:
:param freqs: a list of n nuclei frequencies in Hz
:param couplings: an n x n array of couplings in Hz. The order
of nuclei in the list corresponds to the column and row order in the
matrix, e.g. couplings[0][1] and [1]0] are the J coupling between
the nuclei of freqs[0] and freqs [1].
:param normalize: (bool) True if the intensities should be normalized
so that total intensity equals the total number of nuclei.
Returns:
-spectrum: a list of (frequency, intensity) tuples.
Dependencies: hamiltonian, simsignals
"""
nspins = len(freqs)
H = hamiltonian(freqs, couplings)
spectrum = simsignals(H, nspins)
if normalize:
spectrum = normalize_spectrum(spectrum, nspins)
return spectrum
##############################################################################
# Non-QM solutions for specific multiplets
##############################################################################
# doublet, multiplet, add_peaks, and reduce_peaks are used to generate
# first-order splitting patterns
def doublet(plist, J):
"""
plist: a list of (frequency{Hz}, intensity) tuples;
J: a coupling constant {Hz}
returns: a plist of the result of splitting every peak in plist by J
"""
res = []
for v, i in plist:
res.append((v - J / 2, i / 2))
res.append((v + J / 2, i / 2))
return res
def multiplet(plist, couplings):
"""
plist: a list of (frequency{Hz}, intensity) tuples;
couplings: one or more (J, # of nuclei) tuples.
e.g. to split a signal into a dt, J = 8, 5 Hz, use:
couplings = [(8, 2), (5, 3)]
Dependency: doublet function
returns: a plist of the multiplet that results from splitting the plist
signal(s) by each J.
The order of the tuples in couplings does not matter
"""
res = plist
for coupling in couplings:
for i in range(coupling[1]):
res = doublet(res, coupling[0])
return res
def add_peaks(plist):
"""
condenses a list of (frequency, intensity) tuples
input: a list of (v, i) tuples
output: a tuple of (average v, total i)
"""
if len(plist) == 1:
return plist[0] # nothing to add
v_total = 0
i_total = 0
for v, i in plist:
v_total += v
i_total += i
return v_total / len(plist), i_total
def reduce_peaks(plist, tolerance=0):
"""
Takes an ordered list of (x, y) tuples and adds together tuples whose first
values are within a certain tolerance limit.
Dependency: add_peaks
Input:
plist: a *sorted* list of (x, y) tuples (sorted by x)
tolerance: tuples that differ in x by <= tolerance are combined
using add_peaks
Output:
a list of (x, y) tuples where all x values differ by > tolerance
"""
res = []
work = [plist[0]] # an accumulator of peaks to be processed
for i in range(1, len(plist)):
if not work:
work.append(plist)
continue
if plist[i][0] - work[-1][0] <= tolerance:
work.append(plist[i]) # accumulate close peaks
continue
else:
res.append(add_peaks(work))
work = [plist[i]]
if work:
res.append(add_peaks(work))
return res
def normalize(intensities, n=1):
"""Scale a list of intensities so that they sum to the total number of
nuclei.
:param intensities: [float] A list of intensities.
:param n: (int) Number of nuclei."""
factor = n / sum(intensities)
for index, intensity in enumerate(intensities):
intensities[index] = intensity * factor
def first_order(signal, couplings): # Wa, RightHz, WdthHz not implemented yet
"""Uses the above functions to split a signal into a first-order
multiplet.
Input:
-signal: a (frequency, intensity) tuple
-Couplings: a list of (J, # of nuclei) tuples. See multiplet
docstring for more info.
-intensity (optional): the intensity of the signal
Output:
a plist-style spectrum (list of (frequency, intensity) tuples)
Dependencies: doublet, multiplet, reduce_peaks, add_peaks
"""
# Possible future refactor: if function used a list of signals,
# may be useful in other situations?
signallist = [signal]
return reduce_peaks(sorted(multiplet(signallist, couplings)))
def normalize_spectrum(spectrum, n=1):
"""Normalize the intensities in a spectrum so that total intensity equals
value n (nominally the number of nuclei giving rise to the signal).
:param spectrum: [(float, float)...] a list of (frequency, intensity)
tuples.
:param n: total intensity to normalize to."""
freq, int_ = [x for x, y in spectrum], [y for x, y in spectrum]
normalize(int_, n)
return list(zip(freq, int_))
def AB(Jab, Vab, Vcentr, **kwargs): # Wa, RightHz, WdthHz not implemented yet
"""
Reich-style inputs for AB quartet.
Jab is the A-B coupling constant (Hz)
Vab is the difference in nuclei frequencies in the absence of coupling (Hz)
Vcentr is the frequency for the center of the AB quartet
Wa is width of peak at half-height (not implemented yet)
RightHz is the lower frequency limit for the window
WdthHz is the width of the window in Hz
return: peaklist of (frequency, intensity) tuples
"""
J = Jab
dv = Vab
c = ((dv ** 2 + J ** 2) ** 0.5) / 2
center = Vcentr
v1 = center - c - (J / 2)
v2 = v1 + J
v3 = center + c - (J / 2)
v4 = v3 + J
dI = J / (2 * c)
I1 = 1 - dI
I2 = 1 + dI
# nmrmint requires normailization.
# integration = 1 # hack in hard-coded integration for now
# I1 *= (integration/2)
# I2 *= (integration/2)
I3 = I2
I4 = I1
vList = [v1, v2, v3, v4]
IList = [I1, I2, I3, I4]
normalize(IList, 2)
return list(zip(vList, IList))
def AB2(Jab, Vab, Vcentr, **kwargs): # Wa, RightHz, WdthHz not implemented yet
"""
Reich-style inputs for AB2 spin system.
J is the A-B coupling constant (Hz)
dV is the difference in nuclei frequencies in the absence of coupling (Hz)
Vab is the frequency for the center of the AB2 signal
Wa is width of peak at half-height (not implemented yet)
RightHz is the lower frequency limit for the window (not implemented yet)
WdthHz is the width of the window in Hz (not implemented yet)
return: peaklist of (frequency, intensity) tuples
"""
# Currently, there is a disconnect between the variable names in the GUI
# and the variable names in this function. The following code provides a
# temporary interface.
J, dV, Vab = Jab, Vab, Vcentr
# for now, old Jupyter code using Pople equations kept hashed out for now
# Reich vs. Pople variable names are confused, e.g. Vab
# So, variables being placed by position in the def header--CAUTION
# From main passed in order of: Jab, Vab, Vcentr, Wa, RightHz, WdthHz
# Here read in as: J, dV, Vab, " " "
# dV = va - vb # Reich: used d = Vb - vA and then mucked with sign of d
# Vab = (va + vb) / 2 # Reich: ABOff
dV = - dV
va = Vab + (dV / 2)
vb = va - dV
Jmod = J * (3 / 4) # This factor used in frequency calculations
# In Reich's code, the definitions of cp/cm (for C_plus/C_minus) were
# swapped, and then modifications using sign of d were employed. This
# code hews closer to Pople definitions
C_plus = sqrt(dV ** 2 + dV * J + (9 / 4) * (J ** 2)) / 2
C_minus = sqrt(dV ** 2 - dV * J + (9 / 4) * (J ** 2)) / 2
cos2theta_plus = (dV / 2 + J / 4) / C_plus # Reich: cos2x
cos2theta_minus = (dV / 2 - J / 4) / C_minus # Reich: cos2y
# This code differs from Reich's in the calculation of
# the sin/cos x/y values
sintheta_plus = sqrt((1 - cos2theta_plus) / 2) # Reich: sinx
sintheta_minus = sqrt((1 - cos2theta_minus) / 2) # Reich: siny
costheta_plus = sqrt((1 + cos2theta_plus) / 2) # Reich: cosx
costheta_minus = sqrt((1 + cos2theta_minus) / 2) # Reich: cosy
# Intensity formulas use the sin and cos of (theta_plus - theta_minus)
# sin_dtheta is Reich's qq; cos_dtheta is Reich's rr
sin_dtheta = sintheta_plus * costheta_minus - costheta_plus * sintheta_minus
cos_dtheta = costheta_plus * costheta_minus + sintheta_plus * sintheta_minus
# Calculate the frequencies and intensities.
# V1-V4 are "Origin: A" (PSB Table 6-8);
# V5-V8 are "Origin: B";
# V9-V12 are "Origin: Comb."
V1 = Vab + Jmod + C_plus
V2 = vb + C_plus + C_minus
V3 = va
V4 = Vab - Jmod + C_minus
V5 = vb + C_plus - C_minus
V6 = Vab + Jmod - C_plus
V7 = vb - C_plus + C_minus
V8 = Vab - Jmod - C_minus
V9 = vb - C_plus - C_minus
I1 = (sqrt(2) * sintheta_plus - costheta_plus) ** 2
I2 = (sqrt(2) * sin_dtheta + costheta_plus * costheta_minus) ** 2
I3 = 1
I4 = (sqrt(2) * sintheta_minus + costheta_minus) ** 2
I5 = (sqrt(2) * cos_dtheta + costheta_plus * sintheta_minus) ** 2
I6 = (sqrt(2) * costheta_plus + sintheta_plus) ** 2
I7 = (sqrt(2) * cos_dtheta - sintheta_plus * costheta_minus) ** 2
I8 = (sqrt(2) * costheta_minus - sintheta_minus) ** 2
I9 = (sqrt(2) * sin_dtheta + sintheta_plus * sintheta_minus) ** 2
vList = [V1, V2, V3, V4, V5, V6, V7, V8, V9]
IList = [I1, I2, I3, I4, I5, I6, I7, I8, I9]
normalize(IList, 3)
return list(zip(vList, IList))
def ABX(Jab, Jbx, Jax, Vab, Vcentr, **kwargs):
# Wa, RightHz, WdthHz not implemented yet
"""
Reich-style inputs for AB2 spin system.
Jab is the A-B coupling constant (Hz)
dV is the difference in nuclei frequencies in the absence of coupling (Hz)
Vab is the frequency for the center of the AB2 signal
Wa is width of peak at half-height (not implemented yet)
RightHz is the lower frequency limit for the window (not implemented yet)
WdthHz is the width of the window in Hz (not implemented yet)
return: peaklist of (frequency, intensity) tuples
"""
# Another function where Reich vs. non-Reich variable names gets confusing
# See comments in AB2 function
# So, variables being placed by position in the def header--CAUTION
# From main passed in order of: Jab, Jax, Jbx, Vab, Vcentr, ...
# Here read in as: Jab, Jbx, Jax, dVab, Vab, ...
# CHANGE: with switch to kwargs used in function calls, the following
# code matches this Reich code to the current view dictionary
Jbx, Jax = Jax, Jbx
dVab = Vab
Vab = Vcentr
# dVab = va - vb # Reich: Vab
# Vab = (va + vb) / 2 # Reich: ABOff
# Reich's ABX: vx initialized as vb + 100
vx = Vab - (dVab / 2) + 100
dJx = Jax - Jbx # GMS stepping-stone constant for readability
# Retaining Reich names for next two constants
cm = dJx / 2
cp = Jax + Jbx
# Reich re-defines constants m and l
# (declaration/garbage-collection efficiency?)
# GMS: using M and L for the first instance, m and n for second
# (avoid lower-case l for variables)
# Reich redefines m a third time for calculating X intensities
# GMS: uses t
M = dVab + cm
L = dVab - cm
D_plus = sqrt(M ** 2 + Jab ** 2) / 2
D_minus = sqrt(L ** 2 + Jab ** 2) / 2
sin2phi_plus = Jab / (2 * D_plus) # Reich: sin2x
sin2phi_minus = Jab / (2 * D_minus) # Reich: sin2y
cos2phi_plus = M / (2 * D_plus) # Reich: cos2x
cos2phi_minus = L / (2 * D_minus) # Reich: cos2y
m = (cp + 2 * Jab) / 4
n = (cp - 2 * Jab) / 4 # Reich: l
t = cos2phi_plus * cos2phi_minus + sin2phi_plus * sin2phi_minus
# Calculate the frequencies and intensities.
# V1-V4 are "Origin: B" (PSB Table 6-15);
# V5-V8 are "Origin: A";
# V9-V12 are "Origin: X" and V13-14 are "Origin: Comb. (X)"
V1 = Vab - m - D_minus
V2 = Vab + n - D_plus
V3 = Vab - n - D_minus
V4 = Vab + m - D_plus
V5 = Vab - m + D_minus
V6 = Vab + n + D_plus
V7 = Vab - n + D_minus
V8 = Vab + m + D_plus
V9 = vx - cp / 2
V10 = vx + D_plus - D_minus
V11 = vx - D_plus + D_minus
V12 = vx + cp / 2
V13 = vx - D_plus - D_minus
V14 = vx + D_plus + D_minus
I1 = 1 - sin2phi_minus
I2 = 1 - sin2phi_plus
I3 = 1 + sin2phi_minus
I4 = 1 + sin2phi_plus
I5 = I3
I6 = I4
I7 = I1
I8 = I2
I9 = 1
I10 = (1 + t) / 2
I11 = I10
I12 = 1
I13 = (1 - t) / 2
I14 = I13
VList = [V1, V2, V3, V4, V5, V6, V7, V8, V9, V10, V11, V12, V13, V14]
IList = [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13, I14]
normalize(IList, 3)
return list(zip(VList, IList))
def ABX3(Jab, Jax, Jbx, Vab, Vcentr, **kwargs):
# Wa, RightHz, WdthHz not implemented yet
"""
Refactoring of Reich's code for simulating the ABX3 system.
"""
va = Vcentr - Vab / 2
vb = Vcentr + Vab / 2
a_quartet = first_order((va, 1), [(Jax, 3)])
b_quartet = first_order((vb, 1), [(Jbx, 3)])
res = []
for i in range(4):
dv = b_quartet[i][0] - a_quartet[i][0]
abcenter = (b_quartet[i][0] + a_quartet[i][0]) / 2
sub_abq = AB(Jab, dv, abcenter) # Wa, RightHz, WdthHz not implemented
scale_factor = a_quartet[i][1]
scaled_sub_abq = [(v, i * scale_factor) for v, i in sub_abq]
res.extend(scaled_sub_abq)
return res
def AAXX(Jaa, Jxx, Jax, Jax_prime, Vcentr, **kwargs):
# Wa, RightHz, WdthHz not implemented yet
"""
Simulates an AA'XX' spin system. Frequencies and Js in Hz.
Jaa is the JAA' coupling constant, Jxx the JXX', Jax the JAX,
and JAX2 the JAX'.
Vcentr is the frequency for the center of the signal.
Wa is width of peak at half-height (not implemented yet)
RightHz is the lower frequency limit for the window
WdthHz is the width of the window in Hz
return: peaklist of (frequency, intensity) tuples
"""
# Define the constants required to calculate frequencies and intensities
# K, L, M, N are as defined in PSB
K = Jaa + Jxx # Reich: K
M = Jaa - Jxx # Reich: l
L = Jax - Jax_prime # Reich: m
N = Jax + Jax_prime # Reich: n
# Retaining Reich names for next two constants
# Suggested refactoring: don't divide by 2 here; can simplify later formulas
p = sqrt((K ** 2 + L ** 2)) / 2
r = sqrt((M ** 2 + L ** 2)) / 2
sin2theta_s = (1 - K / (2 * p)) / 2
sin2theta_a = (1 - M / (2 * r)) / 2
cos2theta_s = (1 + K / (2 * p)) / 2
cos2theta_a = (1 + M / (2 * r)) / 2
# Calculate the frequencies and intensities.
# See PSB Table 6-18. Transitions 1-4 are condensed into V1 and V2.
V1 = Vcentr + N / 2
V2 = Vcentr - N / 2
V3 = Vcentr + K / 2 + p
V4 = Vcentr - K / 2 + p
V5 = Vcentr + K / 2 - p
V6 = Vcentr - K / 2 - p
V7 = Vcentr + M / 2 + r
V8 = Vcentr - M / 2 + r
V9 = Vcentr + M / 2 - r
V10 = Vcentr - M / 2 - r
I1 = 2
I2 = I1
I3 = sin2theta_s
I4 = cos2theta_s
I5 = I4
I6 = I3
I7 = sin2theta_a
I8 = cos2theta_a
I9 = I8
I10 = I7
VList = [V1, V2, V3, V4, V5, V6, V7, V8, V9, V10]
IList = [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10]
normalize(IList, 4)
return list(zip(VList, IList))
def AABB(Vab, Jaa, Jbb, Jab, Jab_prime, Vcentr, **kwargs):
# Wa, RightHz, WdthHz not implemented yet
"""
A wrapper for a second-order AA'BB' calculation, but using the
values taken from the WINDNMR-style AA'BB' bar selected by the Multiplet
menu.
"""
va = Vcentr - Vab / 2
vb = Vcentr + Vab / 2
freqlist = [va, va, vb, vb]
J = np.zeros((4, 4))
J[0, 1] = Jaa
J[0, 2] = Jab
J[0, 3] = Jab_prime
J[1, 2] = Jab_prime
J[1, 3] = Jab
J[2, 3] = Jbb
J = J + J.T
return normalize_spectrum(nspinspec(freqlist, J), 4)
# TODO: doesn't seem to be used; schedule for deletion? (plus associated test)
def add_spectra(original, additional):
original.extend(additional)
if __name__ == '__main__':
from nspin import reich_list
from nmrplot import nmrplot as nmrplt
test_freqs, test_couplings = reich_list()[8]
# refactor reich_list to do this!
# test_couplings = test_couplings.todense()
# spectrum = nspinspec(test_freqs, test_couplings)
# nmrplt(nspinspec(test_freqs, test_couplings), y=24)
# ab2test = AB2(7.9, 26.5, 13.25, 0.5, 0, 300)
# abxtest = ABX(12.0, 2.0, 8.0, 15.0, 7.5, 0.5, 0, 300)
# nmrplt(abxtest)
# print(abxtest)
# v1 = (1200, 2)
# v2 = (450, 2)
# v3 = (300, 3)
# J12 = 7
# J23 = 7
# m1 = first_order(v1, [(J12, 2)])
# m2 = first_order(v2, [(J12, 2), (J23, 3)])
# m3 = first_order(v3, [(J23, 2)])
# testspec = reduce_peaks(sorted(m1 + m2 + m3))
# print(testspec)
# nmrplt(testspec)
# nmrplt(m1)
# # print(m2)
# nmrplt(m2)
# nmrplt(m3)
# m1 = multiplet(v1, [(J12, 2)])
# m2 = multiplet(v2, [(J12, 2), (J23, 3)])
# m3 = multiplet(v3, [(J23, 2)])
#
# testspec = sorted(m1 + m2 + m3)
# print(testspec)
# nmrplt(testspec)
# nmrplt(m1)
# nmrplt(m2)
# nmrplt(m3)
# abx3spec = ABX3(-12.0, 7.0, 7.0, 14.0, 150.0, 0.5, 0.0, 300.0)
# nmrplt(abx3spec)
# aaxxspec = AAXX(15, -10, 40, 6, 150, 0.5, 0, 300)
# print(sorted(aaxxspec))
# nmrplt(aaxxspec)
aabbspec = AABB(40, 15, -10, 40, 6, 150, 0.5, 0, 300)
print(sorted(aabbspec))
nmrplt(aabbspec)
| |
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import optparse
import re
def ParseTest(lines):
r"""Parses section-based test.
Args:
lines: list of \n-terminated strings.
Returns:
List of string pairs (field name, field content) in order. Field content is
concatenation of \n-terminated lines, so it's either empty or ends with \n.
"""
fields = []
field_data = {}
current_field = None
for line in lines:
if line.startswith(' '):
assert current_field is not None, line
field_data[current_field].append(line[2:])
else:
match = re.match('@(\S+):$', line)
if match is None:
raise Exception('Bad line: %r' % line)
current_field = match.group(1)
assert current_field not in field_data, current_field
field_data[current_field] = []
fields.append(current_field)
return [(field, ''.join(field_data[field])) for field in fields]
def SplitLines(lines, separator_regex):
"""Split sequence of lines into sequence of list of lines.
Args:
lines: sequence of strings.
separator_regex: separator regex.
Yields:
Nonempty sequence of (possibly empty) lists of strings. Separator lines
are not included.
"""
part = []
for line in lines:
if re.match(separator_regex, line):
yield part
part = []
else:
part.append(line)
yield part
def LoadTestFile(filename):
r"""Loads and parses .test file.
Args:
filename: filename.
Returns:
List of tests (see ParseTest).
"""
with open(filename) as file_in:
return map(ParseTest, SplitLines(file_in, r'-{3,}\s*$'))
def UnparseTest(items_list):
"""Convert test to sequence of \n-terminated strings
Args:
items_list: list of string pairs (see ParseTest).
Yields:
Sequence of \n-terminated strings.
"""
for field, content in items_list:
yield '@%s:\n' % field
if content == '':
continue
assert content.endswith('\n')
content = content[:-1]
for line in content.split('\n'):
yield ' %s\n' % line
def SaveTestFile(tests, filename):
r"""Saves .test file
Args:
tests: list of tests (see ParseTest).
filename: filename.
Returns:
None.
"""
with open(filename, 'w') as file_out:
first = True
for test in tests:
if not first:
file_out.write('-' * 70 + '\n')
first = False
for line in UnparseTest(test):
file_out.write(line)
def ParseHex(hex_content):
"""Parse content of @hex section and return binary data
Args:
hex_content: Content of @hex section as a string.
Yields:
Chunks of binary data corresponding to lines of given @hex section (as
strings). If line ends with r'\\', chunk is continued on the following line.
"""
bytes = []
for line in hex_content.split('\n'):
line, sep, comment = line.partition('#')
line = line.strip()
if line == '':
continue
if line.endswith(r'\\'):
line = line[:-2]
continuation = True
else:
continuation = False
for byte in line.split():
assert len(byte) == 2
bytes.append(chr(int(byte, 16)))
if not continuation:
assert len(bytes) > 0
yield ''.join(bytes)
bytes = []
assert bytes == [], r'r"\\" should not appear on the last line'
def AssertEquals(actual, expected):
if actual != expected:
raise AssertionError('\nEXPECTED:\n"""\n%s"""\n\nACTUAL:\n"""\n%s"""'
% (expected, actual))
class TestRunner(object):
SECTION_NAME = None
def CommandLineOptions(self, parser):
pass
def GetSectionContent(self, options, sections):
raise NotImplementedError()
def Test(self, options, items_list):
info = dict(items_list)
assert self.SECTION_NAME in info
content = self.GetSectionContent(options, info)
print ' Checking %s field...' % self.SECTION_NAME
if options.update:
if content != info[self.SECTION_NAME]:
print ' Updating %s field...' % self.SECTION_NAME
info[self.SECTION_NAME] = content
else:
AssertEquals(content, info[self.SECTION_NAME])
# Update field values, but preserve their order.
items_list = [(field, info[field]) for field, _ in items_list]
return items_list
def Run(self, argv):
parser = optparse.OptionParser()
parser.add_option('--bits',
type=int,
help='The subarchitecture to run tests against: 32 or 64')
parser.add_option('--update',
default=False,
action='store_true',
help='Regenerate golden fields instead of testing')
self.CommandLineOptions(parser)
options, args = parser.parse_args(argv)
if options.bits not in [32, 64]:
parser.error('specify --bits 32 or --bits 64')
if len(args) == 0:
parser.error('No test files specified')
processed = 0
for glob_expr in args:
test_files = sorted(glob.glob(glob_expr))
if len(test_files) == 0:
raise AssertionError(
'%r matched no files, which was probably not intended' % glob_expr)
for test_file in test_files:
print 'Testing %s...' % test_file
tests = LoadTestFile(test_file)
tests = [self.Test(options, test) for test in tests]
if options.update:
SaveTestFile(tests, test_file)
processed += 1
print '%s test files were processed.' % processed
| |
# Copyright (c) 2017 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from kmip import enums
from kmip.core import primitives
from kmip.core import utils
from kmip.core.messages.payloads import base
class ObtainLeaseRequestPayload(base.RequestPayload):
"""
A request payload for the ObtainLease operation.
Attributes:
unique_identifier: The unique ID of the object to be leased.
"""
def __init__(self, unique_identifier=None):
"""
Construct an ObtainLease request payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a public key) to obtain a lease for. Optional, defaults to
None.
"""
super(ObtainLeaseRequestPayload, self).__init__()
self._unique_identifier = None
self.unique_identifier = unique_identifier
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
else:
return None
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("Unique identifier must be a string.")
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the ObtainLease request payload and decode it
into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(ObtainLeaseRequestPayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream)
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the ObtainLease request payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(ObtainLeaseRequestPayload, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer)
def __eq__(self, other):
if isinstance(other, ObtainLeaseRequestPayload):
if self.unique_identifier != other.unique_identifier:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObtainLeaseRequestPayload):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
args = "unique_identifier='{0}'".format(self.unique_identifier)
return "ObtainLeaseRequestPayload({0})".format(args)
def __str__(self):
return str({
'unique_identifier': self.unique_identifier
})
class ObtainLeaseResponsePayload(base.ResponsePayload):
"""
A response payload for the ObtainLease operation.
Attributes:
unique_identifier: The unique ID of the object that was leased.
lease_time: The amount of time, in seconds, that the object lease is
in effect.
last_change_date: The date, in seconds since the epoch, representing
the last time a change was made to the object or one of its
attributes.
"""
def __init__(self,
unique_identifier=None,
lease_time=None,
last_change_date=None):
"""
Construct an ObtainLease response payload struct.
Args:
unique_identifier (string): The ID of the managed object (e.g.,
a public key) a lease was obtained for. Optional, defaults to
None.
lease_time (int): The amount of time, in seconds, that the object
lease is in effect for. Optional, defaults to None.
last_change_date (int): The date, in seconds since the epoch,
when the last change was made to the object or one of its
attributes. Optional, defaults to None.
"""
super(ObtainLeaseResponsePayload, self).__init__()
self._unique_identifier = None
self._lease_time = None
self._last_change_date = None
self.unique_identifier = unique_identifier
self.lease_time = lease_time
self.last_change_date = last_change_date
@property
def unique_identifier(self):
if self._unique_identifier:
return self._unique_identifier.value
else:
return None
@unique_identifier.setter
def unique_identifier(self, value):
if value is None:
self._unique_identifier = None
elif isinstance(value, six.string_types):
self._unique_identifier = primitives.TextString(
value=value,
tag=enums.Tags.UNIQUE_IDENTIFIER
)
else:
raise TypeError("Unique identifier must be a string.")
@property
def lease_time(self):
if self._lease_time:
return self._lease_time.value
else:
return None
@lease_time.setter
def lease_time(self, value):
if value is None:
self._lease_time = None
elif isinstance(value, six.integer_types):
self._lease_time = primitives.Interval(
value=value,
tag=enums.Tags.LEASE_TIME
)
else:
raise TypeError("Lease time must be an integer.")
@property
def last_change_date(self):
if self._last_change_date:
return self._last_change_date.value
else:
return None
@last_change_date.setter
def last_change_date(self, value):
if value is None:
self._last_change_date = None
elif isinstance(value, six.integer_types):
self._last_change_date = primitives.DateTime(
value=value,
tag=enums.Tags.LAST_CHANGE_DATE
)
else:
raise TypeError("Last change date must be an integer.")
def read(self, input_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Read the data encoding the ObtainLease response payload and decode it
into its constituent parts.
Args:
input_stream (stream): A data stream containing encoded object
data, supporting a read method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is missing from the
encoded payload.
"""
super(ObtainLeaseResponsePayload, self).read(
input_stream,
kmip_version=kmip_version
)
local_stream = utils.BytearrayStream(input_stream.read(self.length))
if self.is_tag_next(enums.Tags.UNIQUE_IDENTIFIER, local_stream):
self._unique_identifier = primitives.TextString(
tag=enums.Tags.UNIQUE_IDENTIFIER
)
self._unique_identifier.read(
local_stream,
kmip_version=kmip_version
)
if self.is_tag_next(enums.Tags.LEASE_TIME, local_stream):
self._lease_time = primitives.Interval(
tag=enums.Tags.LEASE_TIME
)
self._lease_time.read(local_stream, kmip_version=kmip_version)
if self.is_tag_next(enums.Tags.LAST_CHANGE_DATE, local_stream):
self._last_change_date = primitives.DateTime(
tag=enums.Tags.LAST_CHANGE_DATE
)
self._last_change_date.read(
local_stream,
kmip_version=kmip_version
)
self.is_oversized(local_stream)
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the ObtainLease response payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
if self._lease_time:
self._lease_time.write(
local_stream,
kmip_version=kmip_version
)
if self._last_change_date:
self._last_change_date.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(ObtainLeaseResponsePayload, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer)
def __eq__(self, other):
if isinstance(other, ObtainLeaseResponsePayload):
if self.unique_identifier != other.unique_identifier:
return False
elif self.lease_time != other.lease_time:
return False
elif self.last_change_date != other.last_change_date:
return False
else:
return True
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, ObtainLeaseResponsePayload):
return not (self == other)
else:
return NotImplemented
def __repr__(self):
args = ", ".join([
"unique_identifier='{0}'".format(self.unique_identifier),
"lease_time={0}".format(self.lease_time),
"last_change_date={0}".format(self.last_change_date)
])
return "ObtainLeaseResponsePayload({0})".format(args)
def __str__(self):
return str({
'unique_identifier': self.unique_identifier,
'lease_time': self.lease_time,
'last_change_date': self.last_change_date
})
| |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with Stackdriver Logging via JSON-over-HTTP."""
import functools
from google.api.core import page_iterator
from google.cloud import _http
from google.cloud.logging import __version__
from google.cloud.logging._helpers import entry_from_resource
from google.cloud.logging.sink import Sink
from google.cloud.logging.metric import Metric
_CLIENT_INFO = _http.CLIENT_INFO_TEMPLATE.format(__version__)
class Connection(_http.JSONConnection):
"""A connection to Google Stackdriver Logging via the JSON REST API.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that owns the current connection.
"""
API_BASE_URL = 'https://logging.googleapis.com'
"""The base of the API call URL."""
API_VERSION = 'v2'
"""The version of the API, used in building the API call's URL."""
API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}'
"""A template for the URL of a particular API call."""
_EXTRA_HEADERS = {
_http.CLIENT_INFO_HEADER: _CLIENT_INFO,
}
class _LoggingAPI(object):
"""Helper mapping logging-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_entries(self, projects, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entry resources.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_:
a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry`
accessible to the current API.
"""
extra_params = {'projectIds': projects}
if filter_ is not None:
extra_params['filter'] = filter_
if order_by is not None:
extra_params['orderBy'] = order_by
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/entries:list'
# We attach a mutable loggers dictionary so that as Logger
# objects are created by entry_from_resource, they can be
# re-used by other log entries from the same logger.
loggers = {}
item_to_value = functools.partial(
_item_to_entry, loggers=loggers)
iterator = page_iterator.HTTPIterator(
client=self._client,
api_request=self._client._connection.api_request,
path=path,
item_to_value=item_to_value,
items_key='entries',
page_token=page_token,
extra_params=extra_params)
# This method uses POST to make a read-only request.
iterator._HTTP_METHOD = 'POST'
return iterator
def write_entries(self, entries, logger_name=None, resource=None,
labels=None):
"""API call: log an entry resource via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
:type entries: sequence of mapping
:param entries: the log entry resources to log.
:type logger_name: str
:param logger_name: name of default logger to which to log the entries;
individual entries may override.
:type resource: mapping
:param resource: default resource to associate with entries;
individual entries may override.
:type labels: mapping
:param labels: default labels to associate with entries;
individual entries may override.
"""
data = {'entries': list(entries)}
if logger_name is not None:
data['logName'] = logger_name
if resource is not None:
data['resource'] = resource
if labels is not None:
data['labels'] = labels
self.api_request(method='POST', path='/entries:write', data=data)
def logger_delete(self, project, logger_name):
"""API call: delete all entries in a logger via a DELETE request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.logs/delete
:type project: str
:param project: ID of project containing the log entries to delete
:type logger_name: str
:param logger_name: name of logger containing the log entries to delete
"""
path = '/projects/%s/logs/%s' % (project, logger_name)
self.api_request(method='DELETE', path=path)
class _SinksAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_sinks(self, project, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/list
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.sink.Sink`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/projects/%s/sinks' % (project,)
return page_iterator.HTTPIterator(
client=self._client,
api_request=self._client._connection.api_request,
path=path,
item_to_value=_item_to_sink,
items_key='sinks',
page_token=page_token,
extra_params=extra_params)
def sink_create(self, project, sink_name, filter_, destination):
"""API call: create a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
:type project: str
:param project: ID of the project in which to create the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
"""
target = '/projects/%s/sinks' % (project,)
data = {
'name': sink_name,
'filter': filter_,
'destination': destination,
}
self.api_request(method='POST', path=target, data=data)
def sink_get(self, project, sink_name):
"""API call: retrieve a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/get
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:rtype: dict
:returns: The JSON sink object returned from the API.
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
return self.api_request(method='GET', path=target)
def sink_update(self, project, sink_name, filter_, destination):
"""API call: update a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/update
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
data = {
'name': sink_name,
'filter': filter_,
'destination': destination,
}
return self.api_request(method='PUT', path=target, data=data)
def sink_delete(self, project, sink_name):
"""API call: delete a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/delete
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
"""
target = '/projects/%s/sinks/%s' % (project, sink_name)
self.api_request(method='DELETE', path=target)
class _MetricsAPI(object):
"""Helper mapping sink-related APIs.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client used to make API requests.
"""
def __init__(self, client):
self._client = client
self.api_request = client._connection.api_request
def list_metrics(self, project, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
"""
extra_params = {}
if page_size is not None:
extra_params['pageSize'] = page_size
path = '/projects/%s/metrics' % (project,)
return page_iterator.HTTPIterator(
client=self._client,
api_request=self._client._connection.api_request,
path=path,
item_to_value=_item_to_metric,
items_key='metrics',
page_token=page_token,
extra_params=extra_params)
def metric_create(self, project, metric_name, filter_, description=None):
"""API call: create a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create
:type project: str
:param project: ID of the project in which to create the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
"""
target = '/projects/%s/metrics' % (project,)
data = {
'name': metric_name,
'filter': filter_,
'description': description,
}
self.api_request(method='POST', path=target, data=data)
def metric_get(self, project, metric_name):
"""API call: retrieve a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/get
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:rtype: dict
:returns: The JSON metric object returned from the API.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
return self.api_request(method='GET', path=target)
def metric_update(self, project, metric_name, filter_, description):
"""API call: update a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/update
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
:rtype: dict
:returns: The returned (updated) resource.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
data = {
'name': metric_name,
'filter': filter_,
'description': description,
}
return self.api_request(method='PUT', path=target, data=data)
def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/delete
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric.
"""
target = '/projects/%s/metrics/%s' % (project, metric_name)
self.api_request(method='DELETE', path=target)
def _item_to_entry(iterator, resource, loggers):
"""Convert a log entry resource to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.api.core.page_iterator.Iterator`. It is intended to be
patched with a mutable ``loggers`` argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_LoggingAPI.list_entries`.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Log entry JSON resource returned from the API.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The next log entry in the page.
"""
return entry_from_resource(resource, iterator.client, loggers)
def _item_to_sink(iterator, resource):
"""Convert a sink resource to the native object.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Sink JSON resource returned from the API.
:rtype: :class:`~google.cloud.logging.sink.Sink`
:returns: The next sink in the page.
"""
return Sink.from_api_repr(resource, iterator.client)
def _item_to_metric(iterator, resource):
"""Convert a metric resource to the native object.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: Metric JSON resource returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
return Metric.from_api_repr(resource, iterator.client)
| |
import netaddr
def rdns_domain(network):
"""Transform :py:class:`netaddr.IPNetwork` object to rDNS zone name"""
if network.prefixlen == 0:
return "ip6.arpa" if network.version == 6 else "in-addr.arpa"
if network.version == 4:
return ".".join(map(str, reversed(
network.ip.words[:network.prefixlen // 8]))) + ".in-addr.arpa"
elif network.version == 6:
return ".".join(map(str, reversed(list("".join(hex(n)[2:].rjust(
4, "0") for n in network.ip.words))[:network.prefixlen // 4]))) + ".ip6.arpa"
def rdns_subnets(network):
next_prefixlen = 8 * ((network.prefixlen - 1) // 8 +
1) if network.version == 4 else 4 * ((network.prefixlen - 1) // 4 + 1)
for subnet in network.subnet(next_prefixlen):
yield (subnet, rdns_domain(subnet))
def rdns_network(domain):
components = domain_split(domain)
if len(components) < 2 or components[-1] != "arpa":
return None
if components[-2] == "ip6":
prefixlen = (len(components) - 2) * 4
nibbles = components[:-2][::-1] + (32 - len(components[:-2])) * ["0"]
net = ""
try:
nibbles_iter = iter(nibbles)
while True:
n1, n2, n3, n4 = [next(nibbles_iter) for _ in range(4)]
if n1 is None:
break
net += n1 + n2 + n3 + n4 + ":"
except StopIteration:
pass
return netaddr.IPNetwork(net[:-1] + "/{}".format(prefixlen))
elif components[-2] == "in-addr":
prefixlen = (len(components) - 2) * 8
octets = components[:-2][::-1] + (4 - len(components[:-2])) * ["0"]
net = ""
for octet in octets:
net += octet + "."
return netaddr.IPNetwork(net[:-1] + "/{}".format(prefixlen))
def canonicalize_name(name):
if name[-1] == '.':
name = name[:-1]
return name
def glue_record(domain, glue):
domain = canonicalize_name(domain)
# TODO sanitize glue record
if ":" in glue:
return "{domain}. IN AAAA {glue}".format(domain=domain, glue=glue)
return "{domain}. IN A {glue}".format(domain=domain, glue=glue)
def ns_delegation(domain, nserver):
domain = canonicalize_name(domain)
nserver = canonicalize_name(nserver)
# TODO sanitize nserver
return "{domain}. IN NS {nserver}.".format(domain=domain, nserver=nserver)
def ds_delegation(domain, rrdata):
domain = canonicalize_name(domain)
# TODO sanitize rrdata
return "{domain}. IN DS {rrdata}".format(domain=domain, rrdata=rrdata)
def generate_delegation(
domain,
comments=False,
include_glue=True,
include_ds=True):
if comments:
# TODO sanitize zone-c, admin-c, tech-c and domain name
yield "; {domain} ZONE-C {zonec} ADMIN-C {adminc} TECH-C {techc}".format(
domain=domain["domain"],
zonec=",".join(domain.get("zone-c")) or "(unknown)",
adminc=",".join(domain.get("admin-c")) or "(unknown)",
techc=",".join(domain.get("tech-c")) or "(unknown)")
for nserver_record in domain.get("nserver"):
server, *glues = nserver_record.split()
yield ns_delegation(domain["domain"], server)
if glues and server.endswith("." + domain["domain"]) and include_glue:
for glue in glues:
yield glue_record(server, glue)
if include_ds:
for ds_rrdata in domain.get("ds-rdata"):
yield ds_delegation(domain["domain"], ds_rrdata)
def generate_delegations(domains, **kwargs):
for domain in domains:
yield from generate_delegation(domain, **kwargs)
def domain_split(dom):
if isinstance(dom, list):
return dom
if dom[-1] == '.':
dom = dom[:-1]
if not dom:
return []
return dom.split('.')
def domain_join(dom):
if not dom[-1]:
dom = dom[:-1]
return '.'.join(dom)
def is_subdomain(sub, dom):
if isinstance(dom, str):
dom = domain_split(dom)
if isinstance(sub, str):
sub = domain_split(sub)
if not dom:
return True
n = len(dom)
return sub[-n:] == dom
def domain_equal(dom, dom1):
return domain_split(dom) == domain_split(dom1)
def is_reverse_domain(dom):
return is_subdomain(dom, ["in-addr", "arpa"]
) or is_subdomain(dom, ["ip6", "arpa"])
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(
description="Generator for NIC domain zones")
argparser.add_argument(
"--database",
"-D",
help="Path to database",
default=".")
argparser.add_argument(
"--comments",
help="Enable comments",
dest="include_comments",
default=False,
action="store_true")
argparser.add_argument(
"--no-comments",
help="Disable comments",
dest="include_comments",
action="store_false")
argparser.add_argument(
"--base",
help="Enable base zone information",
action='store_true',
dest='include_base',
default=True)
argparser.add_argument(
"--no-base",
help="Disable base zone information",
action='store_false',
dest='include_base')
argparser.add_argument(
"--glue",
help="Include glue records in generated RRs",
action="store_true",
dest="include_glue",
default=True)
argparser.add_argument(
"--no-glue",
help="Do not include glue records in generated RRs",
action="store_false",
dest="include_glue")
argparser.add_argument(
"--dnssec",
help="Include DS records in generated RRs",
action="store_true",
dest="include_ds",
default=True)
argparser.add_argument(
"--no-dnssec",
help="Do not include DS records in generated RRs",
action="store_false",
dest="include_ds")
argparser.add_argument(
"--dn42",
help="Enable DN42 mode",
action="store_true",
default=False)
argparser.add_argument(
"--no-dn42",
help="Disable DN42 mode",
action="store_false",
dest="dn42")
argparser.add_argument("zone", help="Base domain name")
args = argparser.parse_args()
if args.dn42:
import lglass.dn42
db = lglass.dn42.DN42Database(args.database)
else:
import lglass.nic
db = lglass.nic.FileDatabase(args.database)
gendel_kwargs = dict(
comments=args.include_comments,
include_glue=args.include_glue,
include_ds=args.include_ds)
# Fetch primary domain object
if args.include_base:
try:
domain = db.fetch("domain", args.zone)
print("\n".join(generate_delegation(domain, **gendel_kwargs)))
except KeyError:
pass
domains = set(db.lookup(classes=("domain",)))
for _, domain_name in domains:
if (not domain_name.endswith("." + args.zone) and
args.zone) or domain_name == args.zone:
continue
try:
domain = db.fetch("domain", domain_name)
print("\n".join(generate_delegation(domain, **gendel_kwargs)))
except KeyError:
print("; {} NOT FOUND".format(domain_name))
except Exception as r:
raise Exception(domain_name, r)
| |
import weakref
from AppKit import NSView, NSSegmentStyleSmallSquare, NSSmallSquareBezelStyle
import vanilla
from defconAppKit.controls.glyphCellView import DefconAppKitGlyphCellNSView, GlyphInformationPopUpWindow, GlyphCellItem
from defconAppKit.controls.fontInfoView import GradientButtonBar
class DefconAppKitGlyphCollectionView(NSView):
def viewDidMoveToWindow(self):
wrapper = self.vanillaWrapper()
if wrapper is not None:
wrapper.setPosSize(wrapper.getPosSize())
for subview in self.subviews():
if hasattr(subview, "vanillaWrapper"):
wrapper = subview.vanillaWrapper()
if wrapper is not None:
wrapper.setPosSize(wrapper.getPosSize())
class GlyphCollectionView(vanilla.Group):
"""
This object presents the user with a view showing a collection of glyphs.
The object contains a small control that allows the user to toggle between
two different viewing modes: the default is to show a collection of cells
and the other is to show a standard list view.
The object follows the API of vanilla.List with some special contructor
arguments and some special methods. When you set objects into the view,
you always pass glyph objects. The object will then extract the relevant
data to display. SImilarly, when you recieve drop callbacks as a result
of a drag and dro operation, the "data" in the drop info will be a
list of glyphs.
Contructor Arguments:
initialMode
The initial mode for the view. Either "cell" or "list".
listColumnDescriptions
This sets up the columns in the list mode. These follow the same format
of the column descriptions in vanilla.List. The only exception is that
you need to provide an "attribute" key/value pair. This is the glyph
attribute that the list will extract display values from. For example:
dict(title="Glyph Width", key="glyphWidth", attribute="width")
If no listColumnDescriptions is provided, the glyph name will be
shown in a single column.
listShowColumnTitles
Same as showColumnTitles in vanilla.List
showModePlacard
Flag to indicate if the mode switch placard should be shown. This can be
useful if you only want to show the list or the cell view.
placardActionItems
An optional list of items (defined as defined in vanilla.ActionButton) that
will be shown via a vanilla.ActionButton. The default is None.
showPlacard
Flag to indicate if the placard should be shown. If showModePlacard is True
or placardActionItems is not None, showPlacard will automatically be True.
cellRepresentationName
The representation name used to fetch the cell representations.
glyphDetailWindowClass
A window class to use when the user control-clicks a cell. This must be a
subclass of vanilla.Window and it must have the following methods:
window.set(glyph)
window.setPosition((x, y))
selectionCallback, doubleClickCallback, deleteCallback, editCallback
Sames as the arguments in vanilla.List
enableDelete
Flag to indicate if the delete key has any effect on the contents of the view.
various drop settings:
These follow the same format as vanilla.List. The biggest exception is that
you do not provide a "type" key/value pair. That will be set by the
dragAndDropType argument.
allowDrag:
Unlike vanilla.List, you don't povide any data about dragging. All you do
is tell the view if you want dragging allowed or not.
dragAndDropType
The drag and drop type for the view. Only change this if you know what you are doing.
"""
nsViewClass = DefconAppKitGlyphCollectionView
glyphCellViewClass = DefconAppKitGlyphCellNSView
glyphListViewVanillaClass = vanilla.List
glyphCellItemClass = GlyphCellItem
def __init__(self, posSize, font=None, initialMode="cell", listColumnDescriptions=None, listShowColumnTitles=False,
showPlacard=True, showModePlacard=True, placardActionItems=None,
cellRepresentationName="defconAppKit.GlyphCell", glyphDetailWindowClass=GlyphInformationPopUpWindow,
selectionCallback=None, doubleClickCallback=None, deleteCallback=None, editCallback=None,
enableDelete=False,
selfDropSettings=None, selfWindowDropSettings=None, selfDocumentDropSettings=None, selfApplicationDropSettings=None,
otherApplicationDropSettings=None, allowDrag=False, dragAndDropType="DefconAppKitSelectedGlyphIndexesPboardType"):
self._holdCallbacks = True
super(GlyphCollectionView, self).__init__(posSize)
if showModePlacard or placardActionItems is not None:
showPlacard = True
bottom = 0
if showPlacard:
bottom = -19
self._selectionCallback = selectionCallback
self._doubleClickCallback = doubleClickCallback
self._deleteCallback = deleteCallback
self._dragAndDropType = dragAndDropType
self._enableDelete = enableDelete
# set up the list
self._listEditChangingAttribute = None
self._listEditChangingGlyph = None
enableDelete = deleteCallback is not None
# prep for drag and drop
if selfDropSettings is not None:
selfDropSettings = dict(selfDropSettings)
if selfWindowDropSettings is not None:
selfWindowDropSettings = dict(selfWindowDropSettings)
if selfDocumentDropSettings is not None:
selfDocumentDropSettings = dict(selfDocumentDropSettings)
if selfApplicationDropSettings is not None:
selfApplicationDropSettings = dict(selfApplicationDropSettings)
if otherApplicationDropSettings is not None:
otherApplicationDropSettings = dict(otherApplicationDropSettings)
dropSettings = [
(selfDropSettings, self._selfDropCallback),
(selfWindowDropSettings, self._selfWindowDropCallback),
(selfDocumentDropSettings, self._selfDocumentDropCallback),
(selfApplicationDropSettings, self._selfApplicationDropCallback),
(otherApplicationDropSettings, self._otherApplicationDropCallback)
]
for d, internalCallback in dropSettings:
if d is None:
continue
d["type"] = dragAndDropType
d["finalCallback"] = d["callback"]
d["callback"] = internalCallback
dragSettings = None
if allowDrag:
dragSettings = dict(type=dragAndDropType, callback=self._packListRowsForDrag)
if listColumnDescriptions is None:
listColumnDescriptions = [dict(title="Name", attribute="name")]
self._glyphCellView = self.glyphCellViewClass.alloc().initWithFont_cellRepresentationName_detailWindowClass_(
font, cellRepresentationName, glyphDetailWindowClass)
self._glyphCellView.vanillaWrapper = weakref.ref(self)
self._glyphCellView.setAllowsDrag_(allowDrag)
dropTypes = []
for d in (selfDropSettings, selfWindowDropSettings, selfDocumentDropSettings, selfApplicationDropSettings, otherApplicationDropSettings):
if d is not None:
dropTypes.append(d["type"])
self._glyphCellView.registerForDraggedTypes_(dropTypes)
self._list = self.glyphListViewVanillaClass(
(0, 0, 0, bottom), None,
dataSource=self._arrayController,
columnDescriptions=listColumnDescriptions,
editCallback=editCallback,
selectionCallback=self._listSelectionCallback,
doubleClickCallback=doubleClickCallback,
showColumnTitles=listShowColumnTitles,
enableTypingSensitivity=True,
enableDelete=enableDelete,
autohidesScrollers=True,
selfDropSettings=selfDropSettings,
selfWindowDropSettings=selfWindowDropSettings,
selfDocumentDropSettings=selfDocumentDropSettings,
selfApplicationDropSettings=selfApplicationDropSettings,
otherApplicationDropSettings=otherApplicationDropSettings,
dragSettings=dragSettings
)
# set up the placard
if showPlacard:
self._placard = vanilla.Group((0, -21, 0, 21))
self._placard.base = GradientButtonBar((0, 0, 0, 0))
extensionLeft = 0
extensionWidth = 0
# mode
if showModePlacard:
extensionLeft += 42
modeButton = vanilla.SegmentedButton(
(0, 0, 43, 0),
[
dict(imageNamed="defconAppKitPlacardCellImage", width=20),
dict(imageNamed="defconAppKitPlacardListImage", width=20)
],
callback=self._placardSelection
)
modeButton.frameAdjustments = dict(regular=(0, 0, 0, 0))
modeButton.getNSSegmentedButton().setSegmentStyle_(NSSegmentStyleSmallSquare)
modeButton.set(0)
self._placard.button = modeButton
# action button
if placardActionItems is not None:
extensionWidth -= 35
actionButton = vanilla.ActionButton(
(-35, 0, 45, 21),
placardActionItems,
sizeStyle="small",
bordered=False
)
actionButton.frameAdjustments = dict(regular=(0, 0, 0, 0))
button = actionButton.getNSPopUpButton()
button.setBezelStyle_(NSSmallSquareBezelStyle)
self._placard.actionButton = actionButton
# extension
self._placard.extension = vanilla.Group((extensionLeft, 0, extensionWidth, 0))
else:
self._placard = None
# tweak the scroll view
self._list.getNSScrollView().setBackgroundColor_(DefconAppKitGlyphCellNSView.gridColor)
# set the mode
self._mode = None
self.setMode(initialMode)
self._holdCallbacks = False
def getArrayController(self):
return self._glyphCellView.arrayController
_arrayController = property(getArrayController)
def _breakCycles(self):
self._placard = None
self._glyphCellView = None
super(GlyphCollectionView, self)._breakCycles()
def _placardSelection(self, sender):
mode = ["cell", "list"][sender.get()]
self.setMode(mode)
# ---------------
# mode management
# ---------------
def setMode(self, mode):
"""
Set the view mode. The options are "cell" and "list".
"""
if mode == self._mode:
return
placard = self._placard
if mode == "list":
documentView = self._list.getNSTableView()
if placard is not None:
placard.button.set(1)
# the cell view needs to be told to stop paying attention to the window
self._glyphCellView.unsubscribeFromWindow()
elif mode == "cell":
documentView = self._glyphCellView
if placard is not None and hasattr(placard, "button"):
placard.button.set(0)
self._list.getNSScrollView().setDocumentView_(documentView)
self._mode = mode
if mode == "cell":
# cell view
self._glyphCellView.recalculateFrame()
elif mode == "list":
self._list.getNSTableView().sizeToFit()
def getMode(self):
"""
Get the current mode.
"""
return self._mode
# standard API
def set(self, glyphs):
if not glyphs:
self.setFont(None)
else:
self.setFont(glyphs[0].font)
self.setGlyphNames([glyph.name for glyph in glyphs])
def get(self):
font = self._glyphCellView.getFont()
return [font[glyphName] for glyphName in self._glyphCellView._glyphNames if glyphName in font]
def setGlyphNames(self, glyphNames):
self._holdCallbacks = True
self._glyphCellView.unSubscribeGlyphs()
items = [self._wrapItem(glyphName) for glyphName in glyphNames]
self.getArrayController().setContent_(None)
self.getArrayController().addObjects_(items)
self._holdCallbacks = False
self._glyphCellView.recalculateFrame()
def getGlyphNames(self):
return self._glyphCellView._glyphNames
def setFont(self, font):
self._glyphCellView.setFont_(font)
def _wrapItem(self, glyphName, glyph=None):
item = self.glyphCellItemClass(glyphName, self._glyphCellView.getFont())
if glyph is not None:
item.setGlyphExternally_(glyph)
return item
def _removeSelection(self):
if not self._enableDelete:
return
selection = self.getSelection()
# list
for index in reversed(sorted(selection)):
del self[index]
# call the callback
if self._deleteCallback is not None:
self._deleteCallback(self)
def __contains__(self, glyph):
return glyph.name in self.getGlyphNames()
def __getitem__(self, index):
return self._arrayController.arrangedObjects()[index].glyph()
def __setitem__(self, index, glyph):
# list
existing = self._arrayController.arrangedObjects()[index]
self._glyphCellView.unSubscribeGlyph(existing.glyph())
existing.setGlyphExternally_(glyph)
def __delitem__(self, index):
# list
existing = self._arrayController.arrangedObjects()[index]
self._glyphCellView.unSubscribeGlyph(existing.glyph())
self._arrayController.removeObject_(existing)
def __len__(self):
return self.getArrayController().arrangedObjects().count()
def append(self, glyph):
item = self._wrapItem(glyph.name, glyph=glyph)
self.getArrayController().addObject_(item)
self.getArrayController().rearrangeObjects()
def remove(self, glyph):
index = self.index(glyph)
del self[index]
def index(self, glyph):
return self.getGlyphNames().index(glyph.name)
def insert(self, index, glyph):
item = self._wrapItem(glyph.name, glyph=glyph)
self.getArrayController().insertObject_atArrangedObjectIndex_(item, index)
self.getArrayController().rearrangeObjects()
def extend(self, glyphs):
items = [self._wrapItem(glyph.name, glyph=glyph) for glyph in glyphs]
self.getArrayController().addObjects_(items)
self.getArrayController().rearrangeObjects()
# -----------------
# placard retrieval
# -----------------
def getPlacardGroup(self):
"""
If a placard has been defined, this returns
a vanilla.Group between the prebuilt controls.
"""
if self._placard is None:
return None
return self._placard.extension
# --------------------
# selection management
# --------------------
def getSelection(self):
"""
Get the selection in the view as a list of indexes.
"""
selection = self._arrayController.selectionIndexes()
# if nothing is selected return an empty list
if not selection:
return []
return list(self._list._iterIndexSet(selection))
def setSelection(self, selection):
"""
Sets the selection in the view. The passed value
should be a list of indexes.
"""
self._list.setSelection(selection)
if self.getMode() == "cell":
self._glyphCellView.setLastFoundSelection(selection)
self._glyphCellView.setNeedsDisplay_(True)
def scrollToSelection(self):
"""
Scroll the view so that the current selection is visible.
"""
self._list.scrollToSelection()
selection = self.getSelection()
if selection:
self._glyphCellView.scrollToCell_(selection[0])
def _listSelectionCallback(self, sender):
if self._holdCallbacks:
return
if self.getMode() == "list":
self._glyphCellView.setLastFoundSelection(self.getSelection())
if self._selectionCallback is not None:
self._selectionCallback(self)
# -------------
# drag and drop
# -------------
def _get_selfDropSettings(self):
return self._list._selfDropSettings
_selfDropSettings = property(_get_selfDropSettings)
def _get_selfWindowDropSettings(self):
return self._list._selfWindowDropSettings
_selfWindowDropSettings = property(_get_selfWindowDropSettings)
def _get_selfDocumentDropSettings(self):
return self._list._selfDocumentDropSettings
_selfDocumentDropSettings = property(_get_selfDocumentDropSettings)
def _get_selfApplicationDropSettings(self):
return self._list._selfApplicationDropSettings
_selfApplicationDropSettings = property(_get_selfApplicationDropSettings)
def _get_otherApplicationDropSettings(self):
return self._list._otherApplicationDropSettings
_otherApplicationDropSettings = property(_get_otherApplicationDropSettings)
def _packListRowsForDrag(self, sender, indexes):
return indexes
def _convertDropInfo(self, dropInfo):
source = dropInfo["source"]
indexes = [int(i) for i in dropInfo["data"]]
glyphs = [source[i] for i in indexes]
if isinstance(source, vanilla.List):
glyphs = [item.glyph() for item in glyphs]
dropInfo["data"] = glyphs
return dropInfo
def _selfDropCallback(self, sender, dropInfo):
dropInfo = self._convertDropInfo(dropInfo)
return self._list._selfDropSettings["finalCallback"](self, dropInfo)
def _selfWindowDropCallback(self, sender, dropInfo):
dropInfo = self._convertDropInfo(dropInfo)
return self._list._selfWindowDropSettings["finalCallback"](self, dropInfo)
def _selfDocumentDropCallback(self, sender, dropInfo):
dropInfo = self._convertDropInfo(dropInfo)
return self._list._selfDocumentDropSettings["finalCallback"](self, dropInfo)
def _selfApplicationDropCallback(self, sender, dropInfo):
dropInfo = self._convertDropInfo(dropInfo)
return self._list._selfApplicationDropSettings["finalCallback"](self, dropInfo)
def _otherApplicationDropCallback(self, sender, dropInfo):
dropInfo = self._convertDropInfo(dropInfo)
return self._list._otherApplicationDropSettings["finalCallback"](self, dropInfo)
# ------------------
# cell view behavior
# ------------------
def getGlyphCellView(self):
"""
Get the cell NSView.
"""
return self._glyphCellView
def setCellSize(self, wh):
"""
Set the size of the cells.
"""
self._glyphCellView.setCellSize_(wh)
def getCellSize(self):
"""
Get the size of the cells.
"""
return self._glyphCellView.getCellSize()
def setCellRepresentationArguments(self, **kwargs):
"""
Set the arguments that should be passed to the cell representation factory.
"""
self._glyphCellView.setCellRepresentationArguments_(kwargs)
def getCellRepresentationArguments(self):
"""
Get the arguments passed to the cell representation factory.
"""
return self._glyphCellView.getCellRepresentationArguments()
def preloadGlyphCellImages(self):
"""
Preload the images to be used in the cells. This is useful
when lots of cellsneed to be displayed.
"""
self._glyphCellView.preloadGlyphCellImages()
| |
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Extracts registration forms from the corresponding HTML files.
Used for extracting forms within HTML files. This script is used in
conjunction with the webforms_aggregator.py script, which aggregates web pages
with fillable forms (i.e registration forms).
The purpose of this script is to extract out all non-form elements that may be
causing parsing errors and timeout issues when running browser_tests.
This script extracts all forms from a HTML file.
If there are multiple forms per downloaded site, multiple files are created
for each form.
Used as a standalone script but assumes that it is run from the directory in
which it is checked into.
Usage: forms_extractor.py [options]
Options:
-l LOG_LEVEL, --log_level=LOG_LEVEL,
LOG_LEVEL: debug, info, warning or error [default: error]
-j, --js extracts javascript elements from web form.
-h, --help show this help message and exit
"""
import glob
import logging
from optparse import OptionParser
import os
import re
import sys
class FormsExtractor(object):
"""Extracts HTML files, leaving only registration forms from the HTML file."""
_HTML_FILES_PATTERN = r'*.html'
_HTML_FILE_PREFIX = r'grabber-'
_FORM_FILE_PREFIX = r'grabber-stripped-'
_REGISTRATION_PAGES_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
_EXTRACTED_FORMS_DIR = os.path.join(os.pardir, 'test', 'data', 'autofill',
'heuristics', 'input')
logger = logging.getLogger(__name__)
log_handlers = {'StreamHandler': None}
# This pattern is used for retrieving the form location comment located at the
# top of each downloaded HTML file indicating where the form originated from.
_RE_FORM_LOCATION_PATTERN = re.compile(
ur"""
<!--Form\s{1}Location: # Starting of form location comment.
.*? # Any characters (non-greedy).
--> # Ending of the form comment.
""", re.U | re.S | re.I | re.X)
# This pattern is used for removing all script code.
_RE_SCRIPT_PATTERN = re.compile(
ur"""
<script # A new opening '<script' tag.
\b # The end of the word 'script'.
.*? # Any characters (non-greedy).
> # Ending of the (opening) tag: '>'.
.*? # Any characters (non-greedy) between the tags.
</script\s*> # The '</script>' closing tag.
""", re.U | re.S | re.I | re.X)
# This pattern is used for removing all href js code.
_RE_HREF_JS_PATTERN = re.compile(
ur"""
\bhref # The word href and its beginning.
\s*=\s* # The '=' with all whitespace before and after it.
(?P<quote>[\'\"]) # A single or double quote which is captured.
\s*javascript\s*: # The word 'javascript:' with any whitespace possible.
.*? # Any characters (non-greedy) between the quotes.
\1 # The previously captured single or double quote.
""", re.U | re.S | re.I | re.X)
_RE_EVENT_EXPR = (
ur"""
\b # The beginning of a new word.
on\w+? # All words starting with 'on' (non-greedy)
# example: |onmouseover|.
\s*=\s* # The '=' with all whitespace before and after it.
(?P<quote>[\'\"]) # A captured single or double quote.
.*? # Any characters (non-greedy) between the quotes.
\1 # The previously captured single or double quote.
""")
# This pattern is used for removing code with js events, such as |onload|.
# By adding the leading |ur'<[^<>]*?'| and the trailing |'ur'[^<>]*?>'| the
# pattern matches to strings such as '<tr class="nav"
# onmouseover="mOvr1(this);" onmouseout="mOut1(this);">'
_RE_TAG_WITH_EVENTS_PATTERN = re.compile(
ur"""
< # Matches character '<'.
[^<>]*? # Matches any characters except '<' and '>' (non-greedy).""" +
_RE_EVENT_EXPR +
ur"""
[^<>]*? # Matches any characters except '<' and '>' (non-greedy).
> # Matches character '>'.
""", re.U | re.S | re.I | re.X)
# Adds whitespace chars at the end of the matched event. Also match trailing
# whitespaces for JS events. Do not match leading whitespace.
# For example: |< /form>| is invalid HTML and does not exist but |</form >| is
# considered valid HTML.
_RE_EVENT_PATTERN = re.compile(
_RE_EVENT_EXPR + ur'\s*', re.U | re.S | re.I | re.X)
# This pattern is used for finding form elements.
_RE_FORM_PATTERN = re.compile(
ur"""
<form # A new opening '<form' tag.
\b # The end of the word 'form'.
.*? # Any characters (non-greedy).
> # Ending of the (opening) tag: '>'.
.*? # Any characters (non-greedy) between the tags.
</form\s*> # The '</form>' closing tag.
""", re.U | re.S | re.I | re.X)
def __init__(self, input_dir=_REGISTRATION_PAGES_DIR,
output_dir=_EXTRACTED_FORMS_DIR, logging_level=None):
"""Creates a FormsExtractor object.
Args:
input_dir: the directory of HTML files.
output_dir: the directory where the registration form files will be
saved.
logging_level: verbosity level, default is None.
Raises:
IOError exception if input directory doesn't exist.
"""
if logging_level:
if not self.log_handlers['StreamHandler']:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
self.log_handlers['StreamHandler'] = console
self.logger.addHandler(console)
self.logger.setLevel(logging_level)
else:
if self.log_handlers['StreamHandler']:
self.logger.removeHandler(self.log_handlers['StreamHandler'])
self.log_handlers['StreamHandler'] = None
self._input_dir = input_dir
self._output_dir = output_dir
if not os.path.isdir(self._input_dir):
error_msg = 'Directory "%s" doesn\'t exist.' % self._input_dir
self.logger.error('Error: %s', error_msg)
raise IOError(error_msg)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self._form_location_comment = ''
def _SubstituteAllEvents(self, matchobj):
"""Remove all js events that are present as attributes within a tag.
Args:
matchobj: A regexp |re.MatchObject| containing text that has at least one
event. Example: |<tr class="nav" onmouseover="mOvr1(this);"
onmouseout="mOut1(this);">|.
Returns:
The text containing the tag with all the attributes except for the tags
with events. Example: |<tr class="nav">|.
"""
tag_with_all_attrs = matchobj.group(0)
return self._RE_EVENT_PATTERN.sub('', tag_with_all_attrs)
def Extract(self, strip_js_only):
"""Extracts and saves the extracted registration forms.
Iterates through all the HTML files.
Args:
strip_js_only: If True, only Javascript is stripped from the HTML content.
Otherwise, all non-form elements are stripped.
"""
pathname_pattern = os.path.join(self._input_dir, self._HTML_FILES_PATTERN)
html_files = [f for f in glob.glob(pathname_pattern) if os.path.isfile(f)]
for filename in html_files:
self.logger.info('Stripping file "%s" ...', filename)
with open(filename, 'U') as f:
html_content = self._RE_TAG_WITH_EVENTS_PATTERN.sub(
self._SubstituteAllEvents,
self._RE_HREF_JS_PATTERN.sub(
'', self._RE_SCRIPT_PATTERN.sub('', f.read())))
form_filename = os.path.split(filename)[1] # Path dropped.
form_filename = form_filename.replace(self._HTML_FILE_PREFIX, '', 1)
(form_filename, extension) = os.path.splitext(form_filename)
form_filename = (self._FORM_FILE_PREFIX + form_filename +
'%s' + extension)
form_filename = os.path.join(self._output_dir, form_filename)
if strip_js_only:
form_filename = form_filename % ''
try:
with open(form_filename, 'w') as f:
f.write(html_content)
except IOError as e:
self.logger.error('Error: %s', e)
continue
else: # Remove all non form elements.
match = self._RE_FORM_LOCATION_PATTERN.search(html_content)
if match:
form_location_comment = match.group() + os.linesep
else:
form_location_comment = ''
forms_iterator = self._RE_FORM_PATTERN.finditer(html_content)
for form_number, form_match in enumerate(forms_iterator, start=1):
form_content = form_match.group()
numbered_form_filename = form_filename % form_number
try:
with open(numbered_form_filename, 'w') as f:
f.write(form_location_comment)
f.write(form_content)
except IOError as e:
self.logger.error('Error: %s', e)
continue
self.logger.info('\tFile "%s" extracted SUCCESSFULLY!', filename)
def main():
parser = OptionParser()
parser.add_option(
'-l', '--log_level', metavar='LOG_LEVEL', default='error',
help='LOG_LEVEL: debug, info, warning or error [default: %default]')
parser.add_option(
'-j', '--js', dest='js', action='store_true', default=False,
help='Removes all javascript elements [default: %default]')
(options, args) = parser.parse_args()
options.log_level = options.log_level.upper()
if options.log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR']:
print 'Wrong log_level argument.'
parser.print_help()
return 1
options.log_level = getattr(logging, options.log_level)
extractor = FormsExtractor(logging_level=options.log_level)
extractor.Extract(options.js)
return 0
if __name__ == '__main__':
sys.exit(main())
| |
"""
Developed by niphlod@gmail.com
"""
import redis
from redis.exceptions import ConnectionError
from gluon import current
from gluon.storage import Storage
import cPickle as pickle
import time
import re
import logging
import thread
logger = logging.getLogger("web2py.session.redis")
locker = thread.allocate_lock()
def RedisSession(*args, **vars):
"""
Usage example: put in models
from gluon.contrib.redis_session import RedisSession
sessiondb = RedisSession('localhost:6379',db=0, session_expiry=False, password=None)
session.connect(request, response, db = sessiondb)
Simple slip-in storage for session
"""
locker.acquire()
try:
instance_name = 'redis_instance_' + current.request.application
if not hasattr(RedisSession, instance_name):
setattr(RedisSession, instance_name, RedisClient(*args, **vars))
return getattr(RedisSession, instance_name)
finally:
locker.release()
class RedisClient(object):
meta_storage = {}
MAX_RETRIES = 5
RETRIES = 0
_release_script = None
def __init__(self, server='localhost:6379', db=None, debug=False,
session_expiry=False, with_lock=False, password=None):
"""session_expiry can be an integer, in seconds, to set the default expiration
of sessions. The corresponding record will be deleted from the redis instance,
and there's virtually no need to run sessions2trash.py
"""
self.server = server
self.password = password
self.db = db or 0
host, port = (self.server.split(':') + ['6379'])[:2]
port = int(port)
self.debug = debug
if current and current.request:
self.app = current.request.application
else:
self.app = ''
self.r_server = redis.Redis(host=host, port=port, db=self.db, password=self.password)
if with_lock:
RedisClient._release_script = \
self.r_server.register_script(_LUA_RELEASE_LOCK)
self.tablename = None
self.session_expiry = session_expiry
self.with_lock = with_lock
def get(self, what, default):
return self.tablename
def Field(self, fieldname, type='string', length=None, default=None,
required=False, requires=None):
return None
def define_table(self, tablename, *fields, **args):
if not self.tablename:
self.tablename = MockTable(
self, self.r_server, tablename, self.session_expiry,
self.with_lock)
return self.tablename
def __getitem__(self, key):
return self.tablename
def __call__(self, where=''):
q = self.tablename.query
return q
def commit(self):
#this is only called by session2trash.py
pass
class MockTable(object):
def __init__(self, db, r_server, tablename, session_expiry, with_lock=False):
self.db = db
self.r_server = r_server
self.tablename = tablename
#set the namespace for sessions of this app
self.keyprefix = 'w2p:sess:%s' % tablename.replace(
'web2py_session_', '')
#fast auto-increment id (needed for session handling)
self.serial = "%s:serial" % self.keyprefix
#index of all the session keys of this app
self.id_idx = "%s:id_idx" % self.keyprefix
#remember the session_expiry setting
self.session_expiry = session_expiry
self.with_lock = with_lock
def __call__(self, record_id, unique_key=None):
# Support DAL shortcut query: table(record_id)
q = self.id # This will call the __getattr__ below
# returning a MockQuery
# Instructs MockQuery, to behave as db(table.id == record_id)
q.op = 'eq'
q.value = record_id
q.unique_key = unique_key
row = q.select()
return row[0] if row else Storage()
def __getattr__(self, key):
if key == 'id':
#return a fake query. We need to query it just by id for normal operations
self.query = MockQuery(field='id', db=self.r_server,
prefix=self.keyprefix, session_expiry=self.session_expiry,
with_lock=self.with_lock, unique_key=self.unique_key)
return self.query
elif key == '_db':
#needed because of the calls in sessions2trash.py and globals.py
return self.db
def insert(self, **kwargs):
#usually kwargs would be a Storage with several keys:
#'locked', 'client_ip','created_datetime','modified_datetime'
#'unique_key', 'session_data'
#retrieve a new key
newid = str(self.r_server.incr(self.serial))
key = self.keyprefix + ':' + newid
if self.with_lock:
key_lock = key + ':lock'
acquire_lock(self.r_server, key_lock, newid)
with self.r_server.pipeline() as pipe:
#add it to the index
pipe.sadd(self.id_idx, key)
#set a hash key with the Storage
pipe.hmset(key, kwargs)
if self.session_expiry:
pipe.expire(key, self.session_expiry)
pipe.execute()
if self.with_lock:
release_lock(self.r_server, key_lock, newid)
return newid
class MockQuery(object):
"""a fake Query object that supports querying by id
and listing all keys. No other operation is supported
"""
def __init__(self, field=None, db=None, prefix=None, session_expiry=False,
with_lock=False, unique_key=None):
self.field = field
self.value = None
self.db = db
self.keyprefix = prefix
self.op = None
self.session_expiry = session_expiry
self.with_lock = with_lock
self.unique_key = unique_key
def __eq__(self, value, op='eq'):
self.value = value
self.op = op
def __gt__(self, value, op='ge'):
self.value = value
self.op = op
def select(self):
if self.op == 'eq' and self.field == 'id' and self.value:
#means that someone wants to retrieve the key self.value
key = self.keyprefix + ':' + str(self.value)
if self.with_lock:
acquire_lock(self.db, key + ':lock', self.value)
rtn = self.db.hgetall(key)
if rtn:
if self.unique_key:
#make sure the id and unique_key are correct
if rtn['unique_key'] == self.unique_key:
rtn['update_record'] = self.update # update record support
else:
rtn = None
return [Storage(rtn)] if rtn else []
elif self.op == 'ge' and self.field == 'id' and self.value == 0:
#means that someone wants the complete list
rtn = []
id_idx = "%s:id_idx" % self.keyprefix
#find all session keys of this app
allkeys = self.db.smembers(id_idx)
for sess in allkeys:
val = self.db.hgetall(sess)
if not val:
if self.session_expiry:
#clean up the idx, because the key expired
self.db.srem(id_idx, sess)
continue
val = Storage(val)
#add a delete_record method (necessary for sessions2trash.py)
val.delete_record = RecordDeleter(
self.db, sess, self.keyprefix)
rtn.append(val)
return rtn
else:
raise Exception("Operation not supported")
def update(self, **kwargs):
#means that the session has been found and needs an update
if self.op == 'eq' and self.field == 'id' and self.value:
key = self.keyprefix + ':' + str(self.value)
with self.db.pipeline() as pipe:
pipe.hmset(key, kwargs)
if self.session_expiry:
pipe.expire(key, self.session_expiry)
rtn = pipe.execute()[0]
if self.with_lock:
release_lock(self.db, key + ':lock', self.value)
return rtn
class RecordDeleter(object):
"""Dumb record deleter to support sessions2trash.py"""
def __init__(self, db, key, keyprefix):
self.db, self.key, self.keyprefix = db, key, keyprefix
def __call__(self):
id_idx = "%s:id_idx" % self.keyprefix
#remove from the index
self.db.srem(id_idx, self.key)
#remove the key itself
self.db.delete(self.key)
def acquire_lock(conn, lockname, identifier, ltime=10):
while True:
if conn.set(lockname, identifier, ex=ltime, nx=True):
return identifier
time.sleep(.01)
_LUA_RELEASE_LOCK = """
if redis.call("get", KEYS[1]) == ARGV[1]
then
return redis.call("del", KEYS[1])
else
return 0
end
"""
def release_lock(conn, lockname, identifier):
return RedisClient._release_script(keys=[lockname], args=[identifier],
client=conn)
| |
import unittest
try:
from unittest.mock import patch, MagicMock # Python 3.4 and later
getattr(MagicMock, 'assert_called_once') # Python 3.6 and later
except (ImportError, AttributeError):
from mock import patch, MagicMock
from ncclient import manager
from ncclient.devices.junos import JunosDeviceHandler
import logging
class TestManager(unittest.TestCase):
@patch('ncclient.transport.SSHSession')
def test_ssh(self, mock_ssh):
m = MagicMock()
mock_ssh.return_value = m
conn = self._mock_manager()
m.connect.assert_called_once_with(host='10.10.10.10',
port=22,
username='user',
password='password',
hostkey_verify=False, allow_agent=False,
timeout=3)
self.assertEqual(conn._session, m)
self.assertEqual(conn._timeout, 10)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh(self, mock_ssh):
manager.connect(host='host')
mock_ssh.assert_called_once_with(host='host')
@patch('ncclient.transport.SSHSession.load_known_hosts')
@patch('ncclient.transport.SSHSession.connect')
def test_connect_ssh1(self, mock_ssh, mock_load_known_hosts):
manager.connect(host='host')
mock_ssh.assert_called_once_with(host='host')
mock_load_known_hosts.assert_called_once_with()
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_connect_ssh2(self, mock_session, mock_hex, mock_trans, mock_socket):
conn = manager.connect_ssh(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
allow_agent=False,
keepalive=10)
self.assertEqual(mock_trans.called, 1)
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.transport.SSHSession.transport')
@patch('ncclient.transport.SSHSession.close')
def test_connect_exception(self, mock_close, mock_transport, mock_ssh):
mock_ssh.side_effect = Exception
try:
manager.connect(host='host')
except Exception:
Exception("connect occured exception")
mock_ssh.assert_called_once_with(host='host')
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.transport.SSHSession.take_notification')
def test_manager_take_notification(self, mock_take_notification, mock_ssh):
mock_take_notification.return_value = "test_take_notification"
conn = self._mock_manager()
ret = conn.take_notification()
mock_take_notification.assert_called_once_with(True, None)
self.assertEqual(ret, "test_take_notification")
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.operations.retrieve.GetConfig._request')
def test_manager_getattr(self, mock_request, mock_ssh):
conn = self._mock_manager()
conn.get_config("running")
mock_ssh.assert_called_once_with(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
allow_agent=False)
@patch('ncclient.transport.SSHSession.connect')
@patch('ncclient.transport.Session.send')
@patch('ncclient.operations.rpc.RPC._request')
def test_manager_getattr2(self, mock_request, mock_send, mock_ssh):
conn = self._mock_manager()
conn.get_edit('config')
mock_ssh.assert_called_once_with(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
allow_agent=False)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh_with_hostkey_ed25519(self, mock_ssh):
hostkey = 'AAAAC3NzaC1lZDI1NTE5AAAAIIiHpGSf8fla6tCwLpwshvMGmUK+B/0v5CsRu+5v4uT7'
manager.connect(host='host', hostkey=hostkey)
mock_ssh.assert_called_once_with(host='host', hostkey=hostkey)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh_with_hostkey_ecdsa(self, mock_ssh):
hostkey = 'AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFJV9xLkuntH3Ry0GmK4FjYlW+01Ik4j/gbW+i3yIx+YEkF0B3iM7kiyDPqvmOPuVGfW+gq5oQzzdvHKspNkw70='
manager.connect(host='host', hostkey=hostkey)
mock_ssh.assert_called_once_with(host='host', hostkey=hostkey)
@patch('ncclient.manager.connect_ssh')
def test_connect_ssh_with_hostkey_rsa(self, mock_ssh):
hostkey = 'AAAAB3NzaC1yc2EAAAADAQABAAABAQDfEAdDrz3l8+PF510ivzWyX/pjpn3Cp6UgjJOinXz82e1LTURZhKwm8blcP8aWe8Uri65Roe6Q/H1WMaR3jFJj4UW2EZY5N+M4esPhoP/APOnDu2XNKy9AK9yD/Bu64TYgkIPQ/6FHdotcQdYTAJ+ac+YfJMp5mhVPnRIh4rlF08a0/tDHzLJVMEoXzp5nfVHcA4W3+5RRhklbct10U0jxHmG8Db9XbKiEbhWs/UMy59UpJ+zr7zLUYPRntgqqkpCyyfeHFNK1P6m3FmyT06QekOioCFmY05y65dkjAwBlaO1RKj1X1lgCirRWu4vxYBo9ewIGPZtuzeyp7jnl7kGV'
manager.connect(host='host', hostkey=hostkey)
mock_ssh.assert_called_once_with(host='host', hostkey=hostkey)
@patch('ncclient.manager.connect_ssh')
def test_connect_outbound_ssh(self, mock_ssh):
manager.connect(host=None, sock_fd=6)
mock_ssh.assert_called_once_with(host=None, sock_fd=6)
@patch('ncclient.manager.connect_ioproc')
def test_connect_ioproc(self, mock_ssh):
manager.connect(host='localhost', device_params={'name': 'junos',
'local': True})
mock_ssh.assert_called_once_with(host='localhost',
device_params={'local': True, 'name': 'junos'})
@patch('paramiko.proxy.ProxyCommand')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_connect_with_ssh_config(self, mock_session, mock_hex, mock_trans, mock_proxy):
log = logging.getLogger('TestManager.test_connect_with_ssh_config')
ssh_config_path = 'test/unit/ssh_config'
conn = manager.connect(host='fake_host',
port=830,
username='user',
password='password',
hostkey_verify=False,
allow_agent=False,
ssh_config=ssh_config_path)
log.debug(mock_proxy.call_args[0][0])
self.assertEqual(mock_proxy.called, 1)
mock_proxy.assert_called_with('ssh -W 10.0.0.1:830 jumphost.domain.com')
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_ssh2(self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(mock_trans.called, 1)
self.assertEqual(conn._timeout, 10)
self.assertEqual(conn._device_handler.device_params, {'name': 'junos'})
self.assertEqual(
conn._device_handler.__class__.__name__,
"JunosDeviceHandler")
@patch('ncclient.transport.ssh.Session._post_connect')
@patch('ncclient.transport.third_party.junos.ioproc.IOProc.connect')
def test_ioproc(self, mock_connect, mock_ioproc):
conn = manager.connect(host='localhost',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False,
device_params={'local': True, 'name': 'junos'},
manager_params={'timeout': 10})
self.assertEqual(mock_connect.called, 1)
self.assertEqual(conn._timeout, 10)
self.assertEqual(conn._device_handler.device_params, {'local': True, 'name': 'junos'})
self.assertEqual(
conn._device_handler.__class__.__name__,
"JunosDeviceHandler")
def test_make_device_handler(self):
device_handler = manager.make_device_handler({'name': 'junos'})
self.assertEqual(
device_handler.__class__.__name__,
"JunosDeviceHandler")
def test_make_device_handler_provided_handler(self):
device_handler = manager.make_device_handler(
{'handler': JunosDeviceHandler})
self.assertEqual(
device_handler.__class__.__name__,
"JunosDeviceHandler")
@patch('ncclient.operations.LockContext')
def test_manager_locked(self, mock_lock):
conn = manager.Manager(None, None, timeout=20)
conn.locked(None)
mock_lock.assert_called_once_with(None, None, None)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_client_capability(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(
conn.client_capabilities,
conn._session.client_capabilities)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_custom_client_capability(
self, mock_session, mock_hex, mock_trans, mock_socket):
custom_capabilities = [
'urn:custom:capability:1.0',
'urn:custom:capability:2.0'
]
conn = self._mock_manager(nc_params={'capabilities': custom_capabilities})
self.assertEqual(
conn.client_capabilities,
conn._session.client_capabilities)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_server_capability(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(
conn.server_capabilities,
conn._session.server_capabilities)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_channel_id(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.channel_id, conn._session._channel_id)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_channel_name(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.channel_name, conn._session._channel_name)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_channel_session_id(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.session_id, conn._session.id)
@patch('socket.socket')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_manager_connected(
self, mock_session, mock_hex, mock_trans, mock_socket):
conn = self._mock_manager()
self.assertEqual(conn.connected, True)
@patch('ncclient.manager.Manager.HUGE_TREE_DEFAULT')
@patch('ncclient.transport.SSHSession')
@patch('ncclient.operations.rpc.RPC')
def test_manager_huge_node(self, mock_rpc, mock_session, default_value):
# Set default value to True only in this test through the default_value mock
default_value = True
# true should propagate all the way to the RPC
conn = self._mock_manager()
self.assertTrue(conn.huge_tree)
conn.execute(mock_rpc)
mock_rpc.assert_called_once()
self.assertTrue(mock_rpc.call_args[1]['huge_tree'])
# false should propagate all the way to the RPC
conn.huge_tree = False
self.assertFalse(conn.huge_tree)
mock_rpc.reset_mock()
conn.execute(mock_rpc)
mock_rpc.assert_called_once()
self.assertFalse(mock_rpc.call_args[1]['huge_tree'])
def _mock_manager(self, nc_params={}):
conn = manager.connect(host='10.10.10.10',
port=22,
username='user',
password='password',
timeout=3,
hostkey_verify=False, allow_agent=False,
device_params={'name': 'junos'},
manager_params={'timeout': 10},
nc_params=nc_params)
return conn
@patch('socket.fromfd')
@patch('paramiko.Transport')
@patch('ncclient.transport.ssh.hexlify')
@patch('ncclient.transport.ssh.Session._post_connect')
def test_outbound_manager_connected(
self, mock_session, mock_hex, mock_trans, mock_fromfd):
conn = self._mock_outbound_manager()
self.assertEqual(conn.connected, True)
def _mock_outbound_manager(self):
conn = manager.connect(host=None,
sock_fd=6,
username='user',
password='password',
device_params={'name': 'junos'},
hostkey_verify=False, allow_agent=False)
return conn
@patch('socket.socket')
@patch('ncclient.manager.connect_ssh')
def test_call_home(self, mock_ssh, mock_socket_open):
mock_connected_socket = MagicMock()
mock_server_socket = MagicMock()
mock_socket_open.return_value = mock_server_socket
mock_server_socket.accept.return_value = (mock_connected_socket,
'remote.host')
with manager.call_home(host='0.0.0.0', port=1234) as chm:
mock_ssh.assert_called_once_with(host='0.0.0.0',
port=1234,
sock=mock_connected_socket)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestManager)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
"""
Display upcoming Google Calendar events.
This module will display information about upcoming Google Calendar events
in one of two formats which can be toggled with a button press. The event
URL may also be opened in a web browser with a button press.
Some events details can be retreived in the Google Calendar API Documentation.
https://developers.google.com/calendar/v3/reference/events
Configuration parameters:
auth_token: The path to where the access/refresh token will be saved
after successful credential authorization.
(default '~/.config/py3status/google_calendar.auth_token')
blacklist_events: Event names in this list will not be shown in the module
(case insensitive).
(default [])
browser_invocation: Command to run to open browser. Curly braces stands for URL opened.
(default "xdg-open {}")
button_open: Opens the event URL in the default web browser.
(default 3)
button_refresh: Refreshes the module and updates the list of events.
(default 2)
button_toggle: Toggles a boolean to hide/show the data for each event.
(default 1)
cache_timeout: How often the module is refreshed in seconds
(default 60)
client_secret: the path to your client_secret file which
contains your OAuth 2.0 credentials.
(default '~/.config/py3status/google_calendar.client_secret')
events_within_hours: Select events within the next given hours.
(default 12)
force_lowercase: Sets whether to force all event output to lower case.
(default False)
format: The format for module output.
(default '{events}|\\?color=event \u2687')
format_date: The format for date related format placeholders.
May be any Python strftime directives for dates.
(default '%a %d-%m')
format_event: The format for each event. The information can be toggled
with 'button_toggle' based on the value of 'is_toggled'.
*(default '[\\?color=event {summary}][\\?if=is_toggled ({start_time}'
' - {end_time}, {start_date})|[ ({location})][ {format_timer}]]')*
format_notification: The format for event warning notifications.
(default '{summary} {start_time} - {end_time}')
format_separator: The string used to separate individual events.
(default ' \\| ')
format_time: The format for time-related placeholders except `{format_timer}`.
May use any Python strftime directives for times.
(default '%I:%M %p')
format_timer: The format used for the {format_timer} placeholder to display
time until an event starts or time until an event in progress is over.
*(default '\\?color=time ([\\?if=days {days}d ][\\?if=hours {hours}h ]'
'[\\?if=minutes {minutes}m])[\\?if=is_current left]')*
ignore_all_day_events: Sets whether to display all day events or not.
(default False)
num_events: The maximum number of events to display.
(default 3)
preferred_event_link: link to open in the browser.
accepted values :
hangoutLink (open the VC room associated with the event),
htmlLink (open the event's details in Google Calendar)
fallback to htmlLink if the preferred_event_link does not exist it the event.
(default "htmlLink")
response: Only display events for which the response status is
on the list.
Available values in the Google Calendar API's documentation,
look for the attendees[].responseStatus.
(default ['accepted'])
thresholds: Thresholds for events. The first entry is the color for event 1,
the second for event 2, and so on.
(default [])
time_to_max: Threshold (in minutes) for when to display the `{format_timer}`
string; e.g. if time_to_max is 60, `{format_timer}` will only be
displayed for events starting in 60 minutes or less.
(default 180)
warn_threshold: The number of minutes until an event starts before a
warning is displayed to notify the user; e.g. if warn_threshold is 30
and an event is starting in 30 minutes or less, a notification will be
displayed. disabled by default.
(default 0)
warn_timeout: The number of seconds before a warning should be issued again.
(default 300)
Control placeholders:
{is_toggled} a boolean toggled by button_toggle
Format placeholders:
{events} All the events to display.
format_event and format_notification placeholders:
{description} The description for the calendar event.
{end_date} The end date for the event.
{end_time} The end time for the event.
{location} The location for the event.
{start_date} The start date for the event.
{start_time} The start time for the event.
{summary} The summary (i.e. title) for the event.
{format_timer} The time until the event starts (or until it is over
if already in progress).
format_timer placeholders:
{days} The number of days until the event.
{hours} The number of hours until the event.
{minutes} The number of minutes until the event.
Color options:
color_event: Color for a single event.
color_time: Color for the time associated with each event.
Requires:
1. Python library google-api-python-client.
2. Python library python-dateutil.
3. OAuth 2.0 credentials for the Google Calendar api.
Follow Step 1 of the guide here to obtain your OAuth 2.0 credentials:
https://developers.google.com/google-apps/calendar/quickstart/python
Download the client_secret.json file which contains your client ID and
client secret. In your config file, set configuration parameter
client_secret to the path to your client_secret.json file.
The first time you run the module, a browser window will open asking you
to authorize access to your calendar. After authorization is complete,
an access/refresh token will be saved to the path configured in
auth_token, and i3status will be restarted. This restart will
occur only once after the first time you successfully authorize.
Examples:
```
# add color gradients for events and dates/times
google_calendar {
thresholds = {
'event': [(1, '#d0e6ff'), (2, '#bbdaff'), (3, '#99c7ff'),
(4, '#86bcff'), (5, '#62a9ff'), (6, '#8c8cff'), (7, '#7979ff')],
'time': [(1, '#ffcece'), (2, '#ffbfbf'), (3, '#ff9f9f'),
(4, '#ff7f7f'), (5, '#ff5f5f'), (6, '#ff3f3f'), (7, '#ff1f1f')]
}
}
```
@author Igor Grebenkov
@license BSD
SAMPLE OUTPUT
[
{'full_text': "Homer's Birthday (742 Evergreen Terrace) (1h 23m) | "},
{'full_text': "Doctor's Appointment | Lunch with John"},
]
"""
import httplib2
import datetime
import time
from pathlib import Path
try:
from googleapiclient import discovery
except ImportError:
from apiclient import discovery
from oauth2client import client
from oauth2client import clientsecrets
from oauth2client import tools
from oauth2client.file import Storage
from httplib2 import ServerNotFoundError
from dateutil import parser
from dateutil.tz import tzlocal
SCOPES = "https://www.googleapis.com/auth/calendar.readonly"
APPLICATION_NAME = "py3status google_calendar module"
class Py3status:
"""
"""
# available configuration parameters
auth_token = "~/.config/py3status/google_calendar.auth_token"
blacklist_events = []
browser_invocation = "xdg-open {}"
button_open = 3
button_refresh = 2
button_toggle = 1
cache_timeout = 60
client_secret = "~/.config/py3status/google_calendar.client_secret"
events_within_hours = 12
force_lowercase = False
format = "{events}|\\?color=event \u2687"
format_date = "%a %d-%m"
format_event = (
r"[\?color=event {summary}][\?if=is_toggled ({start_time}"
" - {end_time}, {start_date})|[ ({location})][ {format_timer}]]"
)
format_notification = "{summary} {start_time} - {end_time}"
format_separator = r" \| "
format_time = "%I:%M %p"
format_timer = (
r"\?color=time ([\?if=days {days}d ][\?if=hours {hours}h ]"
r"[\?if=minutes {minutes}m])[\?if=is_current left]"
)
ignore_all_day_events = False
num_events = 3
preferred_event_link = "htmlLink"
response = ["accepted"]
thresholds = []
time_to_max = 180
warn_threshold = 0
warn_timeout = 300
def post_config_hook(self):
self.button_states = [False] * self.num_events
self.events = None
self.no_update = False
self.client_secret = Path(self.client_secret).expanduser()
self.auth_token = Path(self.auth_token).expanduser()
self.credentials = self._get_credentials()
self.is_authorized = False
def _get_credentials(self):
"""
Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns: Credentials, the obtained credential.
"""
client_secret_path = self.client_secret.parent
auth_token_path = self.auth_token.parent
auth_token_path.mkdir(parents=True, exist_ok=True)
client_secret_path.mkdir(parents=True, exist_ok=True)
flags = tools.argparser.parse_args(args=[])
store = Storage(self.auth_token)
credentials = store.get()
if not credentials or credentials.invalid:
try:
flow = client.flow_from_clientsecrets(self.client_secret, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
except clientsecrets.InvalidClientSecretsError:
raise Exception("missing client_secret")
"""
Have to restart i3 after getting credentials to prevent bad output.
This only has to be done once on the first run of the module.
"""
self.py3.command_run(f"{self.py3.get_wm_msg()} restart")
return credentials
def _authorize_credentials(self):
"""
Fetches an access/refresh token by authorizing OAuth 2.0 credentials.
Returns: True, if the authorization was successful.
False, if a ServerNotFoundError is thrown.
"""
try:
http = self.credentials.authorize(httplib2.Http())
self.service = discovery.build("calendar", "v3", http=http)
return True
except ServerNotFoundError:
return False
def _get_events(self):
"""
Fetches events from the calendar into a list.
Returns: The list of events.
"""
self.last_update = time.monotonic()
time_min = datetime.datetime.utcnow()
time_max = time_min + datetime.timedelta(hours=self.events_within_hours)
events = []
try:
eventsResult = (
self.service.events()
.list(
calendarId="primary",
timeMax=time_max.isoformat() + "Z", # 'Z' indicates UTC time
timeMin=time_min.isoformat() + "Z", # 'Z' indicates UTC time
singleEvents=True,
orderBy="startTime",
)
.execute(num_retries=5)
)
except Exception:
return self.events or events
else:
for event in eventsResult.get("items", []):
# filter out events that we did not accept (default)
# unless we organized them with no attendees
i_organized = event.get("organizer", {}).get("self", False)
has_attendees = event.get("attendees", [])
for attendee in event.get("attendees", []):
if attendee.get("self") is True:
if attendee["responseStatus"] in self.response:
break
else:
# we did not organize the event or we did not accept it
if not i_organized or has_attendees:
continue
# strip and lower case output if needed
for key in ["description", "location", "summary"]:
event[key] = event.get(key, "").strip()
if self.force_lowercase is True:
event[key] = event[key].lower()
# ignore all day events if configured
if event["start"].get("date") is not None:
if self.ignore_all_day_events:
continue
# filter out blacklisted event names
if event["summary"] is not None:
if event["summary"].lower() in (
e.lower() for e in self.blacklist_events
):
continue
events.append(event)
return events[: self.num_events]
def _check_warn_threshold(self, time_to, event_dict):
"""
Checks if the time until an event starts is less than or equal to the
warn_threshold. If True, issue a warning with self.py3.notify_user.
"""
if time_to["total_minutes"] <= self.warn_threshold:
warn_message = self.py3.safe_format(self.format_notification, event_dict)
self.py3.notify_user(warn_message, "warning", self.warn_timeout)
def _gstr_to_date(self, date_str):
""" Returns a dateime object from calendar date string."""
return parser.parse(date_str).replace(tzinfo=tzlocal())
def _gstr_to_datetime(self, date_time_str):
""" Returns a datetime object from calendar date/time string."""
return parser.parse(date_time_str)
def _datetime_to_str(self, date_time, dt_format):
""" Returns a strftime formatted string from a datetime object."""
return date_time.strftime(dt_format)
def _delta_time(self, date_time):
"""
Returns in a dict the number of days/hours/minutes and total minutes
until date_time.
"""
now = datetime.datetime.now(tzlocal())
diff = date_time - now
days = int(diff.days)
hours = int(diff.seconds / 3600)
minutes = int((diff.seconds / 60) - (hours * 60)) + 1
total_minutes = int((diff.seconds / 60) + (days * 24 * 60)) + 1
return {
"days": days,
"hours": hours,
"minutes": minutes,
"total_minutes": total_minutes,
}
def _format_timedelta(self, index, time_delta, is_current):
"""
Formats the dict time_to containing days/hours/minutes until an
event starts into a composite according to time_to_formatted.
Returns: A formatted composite.
"""
time_delta_formatted = ""
if time_delta["total_minutes"] <= self.time_to_max:
time_delta_formatted = self.py3.safe_format(
self.format_timer,
{
"days": time_delta["days"],
"hours": time_delta["hours"],
"minutes": time_delta["minutes"],
"is_current": is_current,
},
)
return time_delta_formatted
def _build_response(self):
"""
Builds the composite response to be output by the module by looping
through all events and formatting the necessary strings.
Returns: A composite containing the individual response for each event.
"""
responses = []
self.event_urls = []
for index, event in enumerate(self.events):
self.py3.threshold_get_color(index + 1, "event")
self.py3.threshold_get_color(index + 1, "time")
event_dict = {}
event_dict["summary"] = event.get("summary")
event_dict["location"] = event.get("location")
event_dict["description"] = event.get("description")
self.event_urls.append(
event.get(self.preferred_event_link, event.get("htmlLink"))
)
if event["start"].get("date") is not None:
start_dt = self._gstr_to_date(event["start"].get("date"))
end_dt = self._gstr_to_date(event["end"].get("date"))
else:
start_dt = self._gstr_to_datetime(event["start"].get("dateTime"))
end_dt = self._gstr_to_datetime(event["end"].get("dateTime"))
if end_dt < datetime.datetime.now(tzlocal()):
continue
event_dict["start_time"] = self._datetime_to_str(start_dt, self.format_time)
event_dict["end_time"] = self._datetime_to_str(end_dt, self.format_time)
event_dict["start_date"] = self._datetime_to_str(start_dt, self.format_date)
event_dict["end_date"] = self._datetime_to_str(end_dt, self.format_date)
time_delta = self._delta_time(start_dt)
if time_delta["days"] < 0:
time_delta = self._delta_time(end_dt)
is_current = True
else:
is_current = False
event_dict["format_timer"] = self._format_timedelta(
index, time_delta, is_current
)
if self.warn_threshold > 0:
self._check_warn_threshold(time_delta, event_dict)
event_formatted = self.py3.safe_format(
self.format_event,
{
"is_toggled": self.button_states[index],
"summary": event_dict["summary"],
"location": event_dict["location"],
"description": event_dict["description"],
"start_time": event_dict["start_time"],
"end_time": event_dict["end_time"],
"start_date": event_dict["start_date"],
"end_date": event_dict["end_date"],
"format_timer": event_dict["format_timer"],
},
)
self.py3.composite_update(event_formatted, {"index": index})
responses.append(event_formatted)
self.no_update = False
format_separator = self.py3.safe_format(self.format_separator)
self.py3.composite_update(format_separator, {"index": "sep"})
responses = self.py3.composite_join(format_separator, responses)
return {"events": responses}
def google_calendar(self):
"""
The method that outputs the response.
First, we check credential authorization. If no authorization, we
display an error message, and try authorizing again in 5 seconds.
Otherwise, we fetch the events, build the response, and output
the resulting composite.
"""
composite = {}
if not self.is_authorized:
cached_until = 0
self.is_authorized = self._authorize_credentials()
else:
if not self.no_update:
self.events = self._get_events()
composite = self._build_response()
cached_until = self.cache_timeout
return {
"cached_until": self.py3.time_in(cached_until),
"composite": self.py3.safe_format(self.format, composite),
}
def on_click(self, event):
if self.is_authorized and self.events is not None:
"""
If button_refresh is clicked, we allow the events to be updated
if the last event update occurred at least 1 second ago. This
prevents a bug that can crash py3status since refreshing the
module too fast results in incomplete event information being
fetched as _get_events() is called repeatedly.
Otherwise, we disable event updates.
"""
self.no_update = True
button = event["button"]
button_index = event["index"]
if button_index == "sep":
self.py3.prevent_refresh()
elif button == self.button_refresh:
# wait before the next refresh
if time.monotonic() - self.last_update > 1:
self.no_update = False
elif button == self.button_toggle:
self.button_states[button_index] = not self.button_states[button_index]
elif button == self.button_open:
if self.event_urls:
self.py3.command_run(
self.browser_invocation.format(self.event_urls[button_index])
)
self.py3.prevent_refresh()
else:
self.py3.prevent_refresh()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
| |
from decimal import Decimal
from django.contrib.auth.models import AnonymousUser
from django.test import TestCase, RequestFactory
import waffle
from waffle.models import Switch, Flag, Sample
from waffle.testutils import override_switch, override_flag, override_sample
class OverrideSwitchTests(TestCase):
def test_switch_existed_and_was_active(self):
Switch.objects.create(name='foo', active=True)
with override_switch('foo', active=True):
assert waffle.switch_is_active('foo')
with override_switch('foo', active=False):
assert not waffle.switch_is_active('foo')
# make sure it didn't change 'active' value
assert Switch.objects.get(name='foo').active
def test_switch_existed_and_was_NOT_active(self):
Switch.objects.create(name='foo', active=False)
with override_switch('foo', active=True):
assert waffle.switch_is_active('foo')
with override_switch('foo', active=False):
assert not waffle.switch_is_active('foo')
# make sure it didn't change 'active' value
assert not Switch.objects.get(name='foo').active
def test_new_switch(self):
assert not Switch.objects.filter(name='foo').exists()
with override_switch('foo', active=True):
assert waffle.switch_is_active('foo')
with override_switch('foo', active=False):
assert not waffle.switch_is_active('foo')
assert not Switch.objects.filter(name='foo').exists()
def test_as_decorator(self):
assert not Switch.objects.filter(name='foo').exists()
@override_switch('foo', active=True)
def test_enabled():
assert waffle.switch_is_active('foo')
test_enabled()
@override_switch('foo', active=False)
def test_disabled():
assert not waffle.switch_is_active('foo')
test_disabled()
assert not Switch.objects.filter(name='foo').exists()
def test_restores_after_exception(self):
Switch.objects.create(name='foo', active=True)
def inner():
with override_switch('foo', active=False):
raise RuntimeError("Trying to break")
with self.assertRaises(RuntimeError):
inner()
assert Switch.objects.get(name='foo').active
def test_restores_after_exception_in_decorator(self):
Switch.objects.create(name='foo', active=True)
@override_switch('foo', active=False)
def inner():
raise RuntimeError("Trying to break")
with self.assertRaises(RuntimeError):
inner()
assert Switch.objects.get(name='foo').active
def req():
r = RequestFactory().get('/')
r.user = AnonymousUser()
return r
class OverrideFlagTests(TestCase):
def test_flag_existed_and_was_active(self):
Flag.objects.create(name='foo', everyone=True)
with override_flag('foo', active=True):
assert waffle.flag_is_active(req(), 'foo')
with override_flag('foo', active=False):
assert not waffle.flag_is_active(req(), 'foo')
assert Flag.objects.get(name='foo').everyone
def test_flag_existed_and_was_inactive(self):
Flag.objects.create(name='foo', everyone=False)
with override_flag('foo', active=True):
assert waffle.flag_is_active(req(), 'foo')
with override_flag('foo', active=False):
assert not waffle.flag_is_active(req(), 'foo')
assert Flag.objects.get(name='foo').everyone is False
def test_flag_existed_and_was_null(self):
Flag.objects.create(name='foo', everyone=None)
with override_flag('foo', active=True):
assert waffle.flag_is_active(req(), 'foo')
with override_flag('foo', active=False):
assert not waffle.flag_is_active(req(), 'foo')
assert Flag.objects.get(name='foo').everyone is None
def test_flag_did_not_exist(self):
assert not Flag.objects.filter(name='foo').exists()
with override_flag('foo', active=True):
assert waffle.flag_is_active(req(), 'foo')
with override_flag('foo', active=False):
assert not waffle.flag_is_active(req(), 'foo')
assert not Flag.objects.filter(name='foo').exists()
class OverrideSampleTests(TestCase):
def test_sample_existed_and_was_100(self):
Sample.objects.create(name='foo', percent='100.0')
with override_sample('foo', active=True):
assert waffle.sample_is_active('foo')
with override_sample('foo', active=False):
assert not waffle.sample_is_active('foo')
self.assertEquals(Decimal('100.0'),
Sample.objects.get(name='foo').percent)
def test_sample_existed_and_was_0(self):
Sample.objects.create(name='foo', percent='0.0')
with override_sample('foo', active=True):
assert waffle.sample_is_active('foo')
with override_sample('foo', active=False):
assert not waffle.sample_is_active('foo')
self.assertEquals(Decimal('0.0'),
Sample.objects.get(name='foo').percent)
def test_sample_existed_and_was_50(self):
Sample.objects.create(name='foo', percent='50.0')
with override_sample('foo', active=True):
assert waffle.sample_is_active('foo')
with override_sample('foo', active=False):
assert not waffle.sample_is_active('foo')
self.assertEquals(Decimal('50.0'),
Sample.objects.get(name='foo').percent)
def test_sample_did_not_exist(self):
assert not Sample.objects.filter(name='foo').exists()
with override_sample('foo', active=True):
assert waffle.sample_is_active('foo')
with override_sample('foo', active=False):
assert not waffle.sample_is_active('foo')
assert not Sample.objects.filter(name='foo').exists()
@override_switch('foo', active=False)
class OverrideSwitchOnClassTests(TestCase):
def setUp(self):
assert not Switch.objects.filter(name='foo').exists()
Switch.objects.create(name='foo', active=True)
def test_undecorated_method_is_set_properly_for_switch(self):
self.assertFalse(waffle.switch_is_active('foo'))
@override_flag('foo', active=False)
class OverrideFlagOnClassTests(TestCase):
def setUp(self):
assert not Flag.objects.filter(name='foo').exists()
Flag.objects.create(name='foo', everyone=True)
def test_undecorated_method_is_set_properly_for_flag(self):
self.assertFalse(waffle.flag_is_active(req(), 'foo'))
@override_sample('foo', active=False)
class OverrideSampleOnClassTests(TestCase):
def setUp(self):
assert not Sample.objects.filter(name='foo').exists()
Sample.objects.create(name='foo', percent='100.0')
def test_undecorated_method_is_set_properly_for_sample(self):
self.assertFalse(waffle.sample_is_active('foo'))
| |
#!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright 2014 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#==============================================================================
#Name: StartStopServices.py
#
#Purpose: Starts or stops ArcGIS Server services.
#
#==============================================================================
import sys, os, time, traceback
from datetime import datetime
totalSuccess = True
# Add "Root folder"\SupportFiles to sys path inorder to import
# modules in subfolder
sys.path.append(os.path.join(os.path.dirname(
os.path.dirname(os.path.dirname(sys.argv[0]))), "SupportFiles"))
from AGSRestFunctions import getServiceList
from AGSRestFunctions import stopStartServices
from AGSRestFunctions import getServiceStatus
validTypes = ["MapServer", "ImageServer", "GeometryServer", "GeocodeServer",
"GPServer", "FeatureServer", "GlobeServer", "GeoDataServer"]
userServiceStr = None
serviceList = None
scriptName = os.path.basename(sys.argv[0])
# ---------------------------------------------------------------------
# Check arguments
# ---------------------------------------------------------------------
if len(sys.argv) < 7:
print '\n' + scriptName + ' <Server_Name> <Server_Port> <User_Name> <Password> <Use_SSL: Yes|No> <Start|Stop> {{folder/}service.type,...| Service_List_File}'
print '\nWhere:'
print '\n\t<Server_Name> (required) server name.'
print '\n\t<Server_Port> (required) server port; if not using server port enter #'
print '\n\t<User_Name> (required) user with admin or publisher permission.'
print '\n\t<Password> (required) user password.'
print '\n\t<Use_SSL: Yes|No> (required) Flag indicating if ArcGIS Server requires HTTPS.'
print '\n\t<Start|Stop> (required) action to perform.'
print '\n\t{{folder/}service.type,...| Service_List_File} (optional) to Start|Stop specific services, specify'
print '\t\tcomma delimited list of services or specify a path to a file containing {{folder/}service.type entries.'
print '\t\tIf specifying file, each {{folder/}service.type entry must be on a separate line.'
print '\n\t\tWhere:'
print '\t\t\t{:<8}{}'.format('folder', '- (optional) is name of folder service resides')
print '\t\t\t{:<8}{}'.format('service', '- (required) is name of service')
print '\t\t\t{:<8}{}'.format('type', '- (required) is the type of of service; valid values are:')
print '\t\t\t\t' + str(validTypes)
print '\n\t\t\tNOTE: To include spaces in this list, surround with double-quotes.'
print '\n\t\tExamples:'
print '\t\t\tMyServices.MapServer'
print '\t\t\tUtilities/Geometry.GeometryServer'
print '\t\t\tMyServices.MapServer,Utilities/Geometry.GeometryServer'
print '\t\t\t"MyServices.MapServer, Utilities/Geometry.GeometryServer"\n'
sys.exit(1)
serverName = sys.argv[1]
serverPort = sys.argv[2]
userName = sys.argv[3]
passWord = sys.argv[4]
useSSL = sys.argv[5]
serviceAction = sys.argv[6]
if len(sys.argv) == 8:
userServiceStr = sys.argv[7]
if useSSL.strip().lower() in ['yes', 'ye', 'y']:
useSSL = True
else:
useSSL = False
# Perform some checks on the user specified service list
if userServiceStr is not None:
serviceList = []
# Read in the user specified serivces
if os.path.exists(userServiceStr):
# User has specified a path; make sure it's a file
if not os.path.isfile(userServiceStr):
print "Error: The specified Service_List_File " + userServiceStr + " is not a file.\n"
sys.exit(1)
f = open(userServiceStr, 'r')
for service in f:
serviceList.append(service.strip())
f.close()
if len(serviceList) == 0:
print "Error: The specfied Service_List_File " + userServiceStr + " is empty.\n"
sys.exit(1)
else:
serviceList = userServiceStr.replace(" ", ",").split(",")
# if userServiceStr had more than one space between service values then there
# will be 0-length string elements, so remove any elements with
# 0-length strings.
serviceList = [x for x in serviceList if len(x) > 0]
# Make sure each service element has "." separator
if len(serviceList) <> str(serviceList).count("."):
print "Error: There are missing '.' delimiters between service name and type.\n"
sys.exit(1)
# Make sure each service element has a valid service "type"
notValidTypes = []
for x in [x.split(".")[1].lower() for x in serviceList]:
if x not in [y.lower() for y in validTypes]:
notValidTypes.append(x)
if len(notValidTypes) > 0:
print "Error: You have specified invalid 'type' values: " + str(notValidTypes)
print "Valid values are: " + str(validTypes) + "\n"
sys.exit(1)
if serverPort.strip() == '#':
serverPort = None
validActionList = ["stop", "start"]
isActionValid = False
# Check if user specific valid actions
for validAction in validActionList:
if validAction.lower() == serviceAction.lower():
isActionValid = True
break
if not isActionValid:
print "User specified action '" + serviceAction + " is not valid."
print "Valid actions are:" + str(validActionList) + "\n"
sys.exit(1)
try:
startTime = datetime.now()
# ---------------------------------------------------------------------
# Get list of all services/or user specified list
# ---------------------------------------------------------------------
if not serviceList:
serviceList = getServiceList(serverName, serverPort, userName, passWord, useSSL)
# Remove hosted services from list since these can't be started/stopped
print '\nRemoving "Hosted" services from service list; these services can not be started/stopped.'
serviceList = [x for x in serviceList if x.find('Hosted/') == -1]
if len(serviceList) == 0:
print "\t*ERROR: No services to " + serviceAction.title() + "."
# ---------------------------------------------------------------------
# Filter the service list and remove services that are already at the requested state
# ---------------------------------------------------------------------
if len(serviceList) > 0:
actionStatusMap = {"STOP":"STOPPED", "START":"STARTED"}
modServiceList = []
print "\n{}".format("-" * 110)
print "- Check status of specified services...\n"
for service in serviceList:
folder = None
serviceNameAndType = service
service = service.replace("//", "/")
if service.find("/") > 0:
folder = service.split("/")[0]
serviceNameAndType = service.split("/")[1]
serviceStatus = getServiceStatus(serverName, serverPort, userName, passWord, folder, serviceNameAndType, useSSL)
realTimeState = serviceStatus.get("realTimeState")
if realTimeState:
if realTimeState.upper() == actionStatusMap[serviceAction.upper()]:
print "{:.<70}already at requested state '{}'.".format(service, realTimeState)
else:
print "{:.<70}will {} the service.".format(service, serviceAction.lower())
modServiceList.append(service)
else:
print "{:.<70}{}".format(service, serviceStatus)
# ---------------------------------------------------------------------
# Start/Stop all services
# ---------------------------------------------------------------------
if len(modServiceList) > 0:
print "\n{}".format("-" * 110)
print "- Will attempt to " + serviceAction.lower() + " the specified services...\n"
stopStartServices(serverName, serverPort, userName, passWord, \
serviceAction.title(), modServiceList, useSSL)
except:
totalSuccess = False
# Get the traceback object
tb = sys.exc_info()[2]
tbinfo = traceback.format_tb(tb)[0]
# Concatenate information together concerning the error into a message string
pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
# Print Python error messages for use in Python / Python Window
print
print "***** ERROR ENCOUNTERED *****"
print pymsg + "\n"
finally:
endTime = datetime.now()
print
print "Done."
print
print "Start time: " + str(startTime)
print "End time: " + str(endTime)
if totalSuccess:
sys.exit(0)
else:
sys.exit(1)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a Salesforce Hook which allows you to connect to your Salesforce instance,
retrieve data from it, and write that data to a file for other uses.
.. note:: this hook also relies on the simple_salesforce package:
https://github.com/simple-salesforce/simple-salesforce
"""
import logging
import time
from typing import Iterable, List, Optional
import pandas as pd
from simple_salesforce import Salesforce, api
from airflow.hooks.base import BaseHook
log = logging.getLogger(__name__)
class SalesforceHook(BaseHook):
"""
Create new connection to Salesforce and allows you to pull data out of SFDC and save it to a file.
You can then use that file with other Airflow operators to move the data into another data source.
:param conn_id: the name of the connection that has the parameters we need to connect to Salesforce.
The connection should be type `http` and include a user's security token in the `Extras` field.
:type conn_id: str
.. note::
For the HTTP connection type, you can include a
JSON structure in the `Extras` field.
We need a user's security token to connect to Salesforce.
So we define it in the `Extras` field as `{"security_token":"YOUR_SECURITY_TOKEN"}`
For sandbox mode, add `{"domain":"test"}` in the `Extras` field
"""
def __init__(self, conn_id: str) -> None:
super().__init__()
self.conn_id = conn_id
self.conn = None
def get_conn(self) -> api.Salesforce:
"""Sign into Salesforce, only if we are not already signed in."""
if not self.conn:
connection = self.get_connection(self.conn_id)
extras = connection.extra_dejson
self.conn = Salesforce(
username=connection.login,
password=connection.password,
security_token=extras['security_token'],
instance_url=connection.host,
domain=extras.get('domain'),
)
return self.conn
def make_query(
self, query: str, include_deleted: bool = False, query_params: Optional[dict] = None
) -> dict:
"""
Make a query to Salesforce.
:param query: The query to make to Salesforce.
:type query: str
:param include_deleted: True if the query should include deleted records.
:type include_deleted: bool
:param query_params: Additional optional arguments
:type query_params: dict
:return: The query result.
:rtype: dict
"""
conn = self.get_conn()
self.log.info("Querying for all objects")
query_params = query_params or {}
query_results = conn.query_all(query, include_deleted=include_deleted, **query_params)
self.log.info(
"Received results: Total size: %s; Done: %s", query_results['totalSize'], query_results['done']
)
return query_results
def describe_object(self, obj: str) -> dict:
"""
Get the description of an object from Salesforce.
This description is the object's schema and
some extra metadata that Salesforce stores for each object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the description of the Salesforce object.
:rtype: dict
"""
conn = self.get_conn()
return conn.__getattr__(obj).describe()
def get_available_fields(self, obj: str) -> List[str]:
"""
Get a list of all available fields for an object.
:param obj: The name of the Salesforce object that we are getting a description of.
:type obj: str
:return: the names of the fields.
:rtype: list(str)
"""
self.get_conn()
obj_description = self.describe_object(obj)
return [field['name'] for field in obj_description['fields']]
def get_object_from_salesforce(self, obj: str, fields: Iterable[str]) -> dict:
"""
Get all instances of the `object` from Salesforce.
For each model, only get the fields specified in fields.
All we really do underneath the hood is run:
SELECT <fields> FROM <obj>;
:param obj: The object name to get from Salesforce.
:type obj: str
:param fields: The fields to get from the object.
:type fields: iterable
:return: all instances of the object from Salesforce.
:rtype: dict
"""
query = f"SELECT {','.join(fields)} FROM {obj}"
self.log.info(
"Making query to Salesforce: %s",
query if len(query) < 30 else " ... ".join([query[:15], query[-15:]]),
)
return self.make_query(query)
@classmethod
def _to_timestamp(cls, column: pd.Series) -> pd.Series:
"""
Convert a column of a dataframe to UNIX timestamps if applicable
:param column: A Series object representing a column of a dataframe.
:type column: pandas.Series
:return: a new series that maintains the same index as the original
:rtype: pandas.Series
"""
# try and convert the column to datetimes
# the column MUST have a four digit year somewhere in the string
# there should be a better way to do this,
# but just letting pandas try and convert every column without a format
# caused it to convert floats as well
# For example, a column of integers
# between 0 and 10 are turned into timestamps
# if the column cannot be converted,
# just return the original column untouched
try:
column = pd.to_datetime(column)
except ValueError:
log.error("Could not convert field to timestamps: %s", column.name)
return column
# now convert the newly created datetimes into timestamps
# we have to be careful here
# because NaT cannot be converted to a timestamp
# so we have to return NaN
converted = []
for value in column:
try:
converted.append(value.timestamp())
except (ValueError, AttributeError):
converted.append(pd.np.NaN)
return pd.Series(converted, index=column.index)
def write_object_to_file(
self,
query_results: List[dict],
filename: str,
fmt: str = "csv",
coerce_to_timestamp: bool = False,
record_time_added: bool = False,
) -> pd.DataFrame:
"""
Write query results to file.
Acceptable formats are:
- csv:
comma-separated-values file. This is the default format.
- json:
JSON array. Each element in the array is a different row.
- ndjson:
JSON array but each element is new-line delimited instead of comma delimited like in `json`
This requires a significant amount of cleanup.
Pandas doesn't handle output to CSV and json in a uniform way.
This is especially painful for datetime types.
Pandas wants to write them as strings in CSV, but as millisecond Unix timestamps.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param filename: the name of the file where the data should be dumped to
:type filename: str
:param fmt: the format you want the output in. Default: 'csv'
:type fmt: str
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe that gets written to the file.
:rtype: pandas.Dataframe
"""
fmt = fmt.lower()
if fmt not in ['csv', 'json', 'ndjson']:
raise ValueError(f"Format value is not recognized: {fmt}")
df = self.object_to_df(
query_results=query_results,
coerce_to_timestamp=coerce_to_timestamp,
record_time_added=record_time_added,
)
# write the CSV or JSON file depending on the option
# NOTE:
# datetimes here are an issue.
# There is no good way to manage the difference
# for to_json, the options are an epoch or a ISO string
# but for to_csv, it will be a string output by datetime
# For JSON we decided to output the epoch timestamp in seconds
# (as is fairly standard for JavaScript)
# And for csv, we do a string
if fmt == "csv":
# there are also a ton of newline objects that mess up our ability to write to csv
# we remove these newlines so that the output is a valid CSV format
self.log.info("Cleaning data and writing to CSV")
possible_strings = df.columns[df.dtypes == "object"]
df[possible_strings] = (
df[possible_strings]
.astype(str)
.apply(lambda x: x.str.replace("\r\n", "").str.replace("\n", ""))
)
# write the dataframe
df.to_csv(filename, index=False)
elif fmt == "json":
df.to_json(filename, "records", date_unit="s")
elif fmt == "ndjson":
df.to_json(filename, "records", lines=True, date_unit="s")
return df
def object_to_df(
self, query_results: List[dict], coerce_to_timestamp: bool = False, record_time_added: bool = False
) -> pd.DataFrame:
"""
Export query results to dataframe.
By default, this function will try and leave all values as they are represented in Salesforce.
You use the `coerce_to_timestamp` flag to force all datetimes to become Unix timestamps (UTC).
This is can be greatly beneficial as it will make all of your datetime fields look the same,
and makes it easier to work with in other database environments
:param query_results: the results from a SQL query
:type query_results: list of dict
:param coerce_to_timestamp: True if you want all datetime fields to be converted into Unix timestamps.
False if you want them to be left in the same format as they were in Salesforce.
Leaving the value as False will result in datetimes being strings. Default: False
:type coerce_to_timestamp: bool
:param record_time_added: True if you want to add a Unix timestamp field
to the resulting data that marks when the data was fetched from Salesforce. Default: False
:type record_time_added: bool
:return: the dataframe.
:rtype: pandas.Dataframe
"""
# this line right here will convert all integers to floats
# if there are any None/np.nan values in the column
# that's because None/np.nan cannot exist in an integer column
# we should write all of our timestamps as FLOATS in our final schema
df = pd.DataFrame.from_records(query_results, exclude=["attributes"])
df.columns = [column.lower() for column in df.columns]
# convert columns with datetime strings to datetimes
# not all strings will be datetimes, so we ignore any errors that occur
# we get the object's definition at this point and only consider
# features that are DATE or DATETIME
if coerce_to_timestamp and df.shape[0] > 0:
# get the object name out of the query results
# it's stored in the "attributes" dictionary
# for each returned record
object_name = query_results[0]['attributes']['type']
self.log.info("Coercing timestamps for: %s", object_name)
schema = self.describe_object(object_name)
# possible columns that can be converted to timestamps
# are the ones that are either date or datetime types
# strings are too general and we risk unintentional conversion
possible_timestamp_cols = [
field['name'].lower()
for field in schema['fields']
if field['type'] in ["date", "datetime"] and field['name'].lower() in df.columns
]
df[possible_timestamp_cols] = df[possible_timestamp_cols].apply(self._to_timestamp)
if record_time_added:
fetched_time = time.time()
df["time_fetched_from_salesforce"] = fetched_time
return df
| |
#-*- coding: utf-8 -*-
"""
PyShop Pyramid configuration helpers.
"""
from pyramid.interfaces import IBeforeRender
from pyramid.url import static_path, route_path
from pyramid.httpexceptions import HTTPNotFound
from pyramid_jinja2 import renderer_factory
from pyramid_rpc.xmlrpc import XMLRPCRenderer
from pyshop.helpers import pypi
from pyshop.helpers.restxt import parse_rest
from pyshop.helpers.download import renderer_factory as dl_renderer_factory
def notfound(request):
return HTTPNotFound('Not found.')
def add_urlhelpers(event):
"""
Add helpers to the template engine.
"""
event['static_url'] = lambda x: static_path(x, event['request'])
event['route_url'] = lambda name, *args, **kwargs: \
route_path(name, event['request'], *args, **kwargs)
event['parse_rest'] = parse_rest
event['has_permission'] = event['request'].has_permission
def includeme(config):
"""
Pyramid includeme file for the :class:`pyramid.config.Configurator`
"""
settings = config.registry.settings
# config.add_renderer('json', JSONP())
# release file download
config.add_renderer('repository', dl_renderer_factory)
# Jinja configuration
# We don't use jinja2 filename, .html instead
config.add_renderer('.html', renderer_factory)
# helpers
config.add_subscriber(add_urlhelpers, IBeforeRender)
# i18n
config.add_translation_dirs('locale/')
pypi_url = settings.get('pyshop.pypi.url', 'https://pypi.python.org/pypi')
# PyPI url for XML RPC service consume
pypi.set_proxy(pypi_url, settings.get('pyshop.pypi.transport_proxy'))
# Javascript + Media
config.add_static_view('static', 'static', cache_max_age=3600)
# config.add_static_view('repository', 'repository', cache_max_age=3600)
config.add_route(u'login', u'/login',)
config.add_view(u'pyshop.views.credentials.Login',
route_name=u'login',
renderer=u'shared/login.html')
config.add_route(u'logout', u'/logout')
config.add_view(u'pyshop.views.credentials.Logout',
route_name=u'logout',
permission=u'user_view')
# Home page
config.add_route(u'index', u'/')
config.add_view(u'pyshop.views.Index',
route_name=u'index',
permission=u'user_view')
# Archive downloads
config.add_route(u'show_external_release_file',
u'/repository/ext/{release_id}/{filename:.*}',
request_method=u'GET')
config.add_view(u'pyshop.views.repository.show_external_release_file',
route_name=u'show_external_release_file',
renderer=u'repository',
permission=u'download_releasefile')
config.add_route(u'show_release_file',
u'/repository/{file_id}/{filename:.*}',
request_method=u'GET')
config.add_view(u'pyshop.views.repository.show_release_file',
route_name=u'show_release_file',
renderer=u'repository',
permission=u'download_releasefile')
# Simple views used by pip
config.add_route(u'list_simple', u'/simple/', request_method=u'GET')
config.add_view(u'pyshop.views.simple.List',
route_name=u'list_simple',
renderer=u'pyshop/simple/list.html',
permission=u'download_releasefile')
config.add_route(u'show_simple', u'/simple/{package_name}/')
config.add_view(u'pyshop.views.simple.Show',
route_name=u'show_simple',
renderer=u'pyshop/simple/show.html',
permission=u'download_releasefile')
try:
config.add_notfound_view(notfound, append_slash=True)
except AttributeError:
# Pyramid < 1.4
pass
# Used by setup.py sdist upload
config.add_route(u'upload_releasefile', u'/simple/',
request_method=u'POST')
config.add_view(u'pyshop.views.simple.UploadReleaseFile',
renderer=u'pyshop/simple/create.html',
route_name=u'upload_releasefile',
permission=u'upload_releasefile')
# Web Services
config.add_renderer('pyshopxmlrpc', XMLRPCRenderer(allow_none=True))
config.add_xmlrpc_endpoint(
'api', '/pypi/xmlrpc', default_renderer='pyshopxmlrpc')
config.scan('pyshop.views.xmlrpc')
# Backoffice Views
config.add_route(u'list_package', u'/pyshop/package')
config.add_view(u'pyshop.views.package.List',
route_name='list_package',
renderer=u'pyshop/package/list.html',
permission=u'user_view')
config.add_route(u'list_package_page', u'/pyshop/package/p/{page_no}')
config.add_view(u'pyshop.views.package.List',
route_name='list_package_page',
renderer=u'pyshop/package/list.html',
permission=u'user_view')
config.add_route(u'show_package',
u'/pyshop/package/{package_name}')
config.add_route(u'show_package_version',
u'/pyshop/package/{package_name}/{release_version}')
config.add_view(u'pyshop.views.package.Show',
route_name=u'show_package',
renderer=u'pyshop/package/show.html',
permission=u'user_view')
config.add_view(u'pyshop.views.package.Show',
route_name=u'show_package_version',
renderer=u'pyshop/package/show.html',
permission=u'user_view')
# Admin view
config.add_route(u'list_account', u'/pyshop/account')
config.add_view(u'pyshop.views.account.List',
route_name=u'list_account',
renderer=u'pyshop/account/list.html',
permission=u'admin_view')
config.add_route(u'create_account', u'/pyshop/account/new')
config.add_view(u'pyshop.views.account.Create',
route_name=u'create_account',
renderer=u'pyshop/account/create.html',
permission=u'admin_view')
config.add_route(u'edit_account', u'/pyshop/account/{user_id}')
config.add_view(u'pyshop.views.account.Edit',
route_name=u'edit_account',
renderer=u'pyshop/account/edit.html',
permission=u'admin_view')
config.add_route(u'delete_account', u'/pyshop/delete/account/{user_id}')
config.add_view(u'pyshop.views.account.Delete',
route_name=u'delete_account',
renderer=u'pyshop/account/delete.html',
permission=u'admin_view')
config.add_route(u'purge_package', u'/pyshop/purge/package/{package_id}')
config.add_view(u'pyshop.views.package.Purge',
route_name=u'purge_package',
renderer=u'pyshop/package/purge.html',
permission=u'admin_view')
# Current user can update it's information
config.add_route(u'edit_user', u'/pyshop/user')
config.add_view(u'pyshop.views.user.Edit',
route_name=u'edit_user',
renderer=u'pyshop/user/edit.html',
permission=u'user_view')
config.add_route(u'change_password', u'/pyshop/user/password')
config.add_view(u'pyshop.views.user.ChangePassword',
route_name=u'change_password',
renderer=u'pyshop/user/change_password.html',
permission=u'user_view')
# Credentials
for route in ('list_simple', 'show_simple',
'show_release_file', 'show_external_release_file',
'upload_releasefile'):
config.add_view('pyshop.views.credentials.authbasic',
route_name=route,
context='pyramid.exceptions.Forbidden'
)
config.add_view('pyshop.views.credentials.Login',
renderer=u'shared/login.html',
context=u'pyramid.exceptions.Forbidden')
| |
import math
import Sensors.mpu6050.i2cutils as I2CUtils
class MPU6050(object):
'''
Simple MPU-6050 implementation
'''
PWR_MGMT_1 = 0x6b
FS_SEL = 0x1b
FS_250 = 0
FS_500 = 1
FS_1000 = 2
FS_2000 = 3
AFS_SEL = 0x1c
AFS_2g = 0
AFS_4g = 1
AFS_8g = 2
AFS_16g = 3
ACCEL_START_BLOCK = 0x3b
ACCEL_XOUT_H = 0
ACCEL_XOUT_L = 1
ACCEL_YOUT_H = 2
ACCEL_YOUT_L = 3
ACCEL_ZOUT_H = 4
ACCEL_ZOUT_L = 5
ACCEL_SCALE = { AFS_2g : [ 2, 16384.0], AFS_4g : [ 4, 8192.0], AFS_8g : [ 8, 4096.0], AFS_16g : [16, 2048.0] }
TEMP_START_BLOCK = 0x41
TEMP_OUT_H = 0
TEMP_OUT_L = 1
GYRO_START_BLOCK = 0x43
GYRO_XOUT_H = 0
GYRO_XOUT_L = 1
GYRO_YOUT_H = 2
GYRO_YOUT_L = 3
GYRO_ZOUT_H = 4
GYRO_ZOUT_L = 5
GYRO_SCALE = { FS_250 : [ 250, 131.0], FS_500 : [ 500, 65.5], FS_1000 : [1000, 32.8], FS_2000 : [2000, 16.4] }
K = 0.98
K1 = 1 - K
def __init__(self, bus, address, name, fs_scale=FS_250, afs_scale=AFS_2g):
'''
Constructor
'''
self.bus = bus
self.address = address
self.name = name
self.fs_scale = fs_scale
self.afs_scale = afs_scale
self.raw_gyro_data = [0, 0, 0, 0, 0, 0]
self.raw_accel_data = [0, 0, 0, 0, 0, 0]
self.raw_temp_data = [0, 0]
self.gyro_raw_x = 0
self.gyro_raw_y = 0
self.gyro_raw_z = 0
self.gyro_scaled_x = 0
self.gyro_scaled_y = 0
self.gyro_scaled_z = 0
self.raw_temp = 0
self.scaled_temp = 0
self.accel_raw_x = 0
self.accel_raw_y = 0
self.accel_raw_z = 0
self.accel_scaled_x = 0
self.accel_scaled_y = 0
self.accel_scaled_z = 0
self.pitch = 0.0
self.roll = 0.0
# We need to wake up the module as it start in sleep mode
I2CUtils.i2c_write_byte(self.bus, self.address, MPU6050.PWR_MGMT_1, 0)
# Set the gryo resolution
I2CUtils.i2c_write_byte(self.bus, self.address, MPU6050.FS_SEL, self.fs_scale << 3)
# Set the accelerometer resolution
I2CUtils.i2c_write_byte(self.bus, self.address, MPU6050.AFS_SEL, self.afs_scale << 3)
def read_raw_data(self):
'''
Read the raw data from the sensor, scale it appropriately and store for later use
'''
self.raw_gyro_data = I2CUtils.i2c_read_block(self.bus, self.address, MPU6050.GYRO_START_BLOCK, 6)
self.raw_accel_data = I2CUtils.i2c_read_block(self.bus, self.address, MPU6050.ACCEL_START_BLOCK, 6)
self.raw_temp_data = I2CUtils.i2c_read_block(self.bus, self.address, MPU6050.TEMP_START_BLOCK, 2)
self.gyro_raw_x = I2CUtils.twos_compliment(self.raw_gyro_data[MPU6050.GYRO_XOUT_H], self.raw_gyro_data[MPU6050.GYRO_XOUT_L])
self.gyro_raw_y = I2CUtils.twos_compliment(self.raw_gyro_data[MPU6050.GYRO_YOUT_H], self.raw_gyro_data[MPU6050.GYRO_YOUT_L])
self.gyro_raw_z = I2CUtils.twos_compliment(self.raw_gyro_data[MPU6050.GYRO_ZOUT_H], self.raw_gyro_data[MPU6050.GYRO_ZOUT_L])
self.accel_raw_x = I2CUtils.twos_compliment(self.raw_accel_data[MPU6050.ACCEL_XOUT_H], self.raw_accel_data[MPU6050.ACCEL_XOUT_L])
self.accel_raw_y = I2CUtils.twos_compliment(self.raw_accel_data[MPU6050.ACCEL_YOUT_H], self.raw_accel_data[MPU6050.ACCEL_YOUT_L])
self.accel_raw_z = I2CUtils.twos_compliment(self.raw_accel_data[MPU6050.ACCEL_ZOUT_H], self.raw_accel_data[MPU6050.ACCEL_ZOUT_L])
self.raw_temp = I2CUtils.twos_compliment(self.raw_temp_data[MPU6050.TEMP_OUT_H], self.raw_temp_data[MPU6050.TEMP_OUT_L])
# We convert these to radians for consistency and so we can easily combine later in the filter
self.gyro_scaled_x = math.radians(self.gyro_raw_x / MPU6050.GYRO_SCALE[self.fs_scale][1])
self.gyro_scaled_y = math.radians(self.gyro_raw_y / MPU6050.GYRO_SCALE[self.fs_scale][1])
self.gyro_scaled_z = math.radians(self.gyro_raw_z / MPU6050.GYRO_SCALE[self.fs_scale][1])
self.scaled_temp = self.raw_temp / 340 + 36.53
self.accel_scaled_x = self.accel_raw_x / MPU6050.ACCEL_SCALE[self.afs_scale][1]
self.accel_scaled_y = self.accel_raw_y / MPU6050.ACCEL_SCALE[self.afs_scale][1]
self.accel_scaled_z = self.accel_raw_z / MPU6050.ACCEL_SCALE[self.afs_scale][1]
self.pitch = self.read_x_rotation(self.read_scaled_accel_x(),self.read_scaled_accel_y(),self.read_scaled_accel_z())
self.roll = self.read_y_rotation(self.read_scaled_accel_x(),self.read_scaled_accel_y(),self.read_scaled_accel_z())
def distance(self, x, y):
'''Returns the distance between two point in 2d space'''
return math.sqrt((x * x) + (y * y))
def read_x_rotation(self, x, y, z):
'''Returns the rotation around the X axis in radians'''
return math.atan2(y, self.distance(x, z))
def read_y_rotation(self, x, y, z):
'''Returns the rotation around the Y axis in radians'''
return -math.atan2(x, self.distance(y, z))
def read_raw_accel_x(self):
'''Return the RAW X accelerometer value'''
return self.accel_raw_x
def read_raw_accel_y(self):
'''Return the RAW Y accelerometer value'''
return self.accel_raw_y
def read_raw_accel_z(self):
'''Return the RAW Z accelerometer value'''
return self.accel_raw_z
def read_scaled_accel_x(self):
'''Return the SCALED X accelerometer value'''
return self.accel_scaled_x
def read_scaled_accel_y(self):
'''Return the SCALED Y accelerometer value'''
return self.accel_scaled_y
def read_scaled_accel_z(self):
'''Return the SCALED Z accelerometer value'''
return self.accel_scaled_z
def read_raw_gyro_x(self):
'''Return the RAW X gyro value'''
return self.gyro_raw_x
def read_raw_gyro_y(self):
'''Return the RAW Y gyro value'''
return self.gyro_raw_y
def read_raw_gyro_z(self):
'''Return the RAW Z gyro value'''
return self.gyro_raw_z
def read_scaled_gyro_x(self):
'''Return the SCALED X gyro value in radians/second'''
return self.gyro_scaled_x
def read_scaled_gyro_y(self):
'''Return the SCALED Y gyro value in radians/second'''
return self.gyro_scaled_y
def read_scaled_gyro_z(self):
'''Return the SCALED Z gyro value in radians/second'''
return self.gyro_scaled_z
def read_temp(self):
'''Return the temperature'''
return self.scaled_temp
def read_pitch(self):
'''Return the current pitch value in radians'''
return self.pitch
def read_roll(self):
'''Return the current roll value in radians'''
return self.roll
def read_all(self):
'''Return pitch and roll in radians and the scaled x, y & z values from the gyroscope and accelerometer'''
self.read_raw_data()
return (self.pitch, self.roll, self.gyro_scaled_x, self.gyro_scaled_y, self.gyro_scaled_z, self.accel_scaled_x, self.accel_scaled_y, self.accel_scaled_z)
| |
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 135894 if testnet else 22541
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| |
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# If WindowsSDKDir is set and SetEnv.Cmd exists then we are using the
# depot_tools build tools and should run SetEnv.Cmd to set up the
# environment. The check for WindowsSDKDir alone is not sufficient because
# this is set by running vcvarsall.bat.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if sdk_dir:
setup_path = os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd'))
if self.sdk_based and sdk_dir and os.path.exists(setup_path):
return [setup_path, '/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| |
# -*- coding: utf-8 -*-
"""
Module :mod:`config` holds the whole configuration mechanism.
Configuration can be conducted either from a dictionary :func:`dict_config` or
from a file :func:`file_config` holding directly the dictionary.
Configuration may be done several times but overlaps will be overwritten,
keeping the most recent one. This is also true with the preconfigured
formatter, callbacks and rules, so beware !
The dictionary has the following form:
{
"version": <#num>,
"callbacks": {
<$aaa>: <mod.function_name>,
...
},
"rules": {
<$bbb>: <mod.function_name>,
...
},
"formatters": {
<$ccc>: <mod.function_name>,
...
},
"generator_monitors": {
<gen_name>: {
<args>: <value>,
...
},
},
"function_monitors": {
<func_name>: {
<args>: <value>,
...
},
},
"code_monitors": {
<code_name>: {
<args>: <value>,
...
},
},
}
Version section (mandatory)
<#num> => the version number
callback section (optional)
<$aaa> => a string starting with '$' : the shortcut name for the callback
factory
rules section (optional)
<$bbb> => a string starting with '$' : the shortcut name for the rule
factory
formatters section (optional)
<$ccc> => a string starting with '$' : the shortcut name for the formatter
factory
generator_monitors section (optional)
<gen_name> => the name of the monitor
<args> => the name of the argument
<value> => the value for the arguments
function_monitors section (optional)
<func_name> => the name of the monitor
<args> => the name of the argument
<value> => the value for the arguments
code_monitors section (optional)
<code_name> => the name of the monitor
<args> => the name of the argument
<value> => the value for the arguments
<mod.function_name> => the full path to the function (will be loaded
dynamically)
The monitor name have hierarchical structure alike the logging.
"""
__author__ = "Begon Jean-Michel <jm.begon@gmail.com>"
__copyright__ = "3-clause BSD License"
__version__ = '1.0'
__date__ = "08 January 2015"
import re
from ast import literal_eval
import logging
from .factory import monitor_generator_factory
from .factory import (monitor_function_factory, formated_code_monitoring)
from .formatter import __formatter_factories__
from .rule import __rule_factories__
from .callback import __callback_factories__
from .util import IdProxy
# ============================ MANAGER ============================ #
class Manager(object):
UNKNOWN_MONITOR = 0
GENERATOR_MONITOR = 1
FUNCTION_MONITOR = 2
CODE_MONITOR = 3
_singleton = None
def __new__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = super(Manager, cls).__new__(cls, *args, **kwargs)
cls._singleton._meta = dict()
return cls._singleton
def add_config(self, monitor_name, conf, monitor_type):
self._meta[monitor_name] = (conf, monitor_type)
def _get_ancestors_conf(self, monitor_name):
unknown = Manager.UNKNOWN_MONITOR
monitor_type = unknown
conf = dict()
prefixes = [monitor_name[:m.start()]
for m in re.finditer('\.',monitor_name)]
prefixes.append(monitor_name)
for prefix in prefixes:
anc_conf, type_ = self._meta.get(prefix, (dict(), unknown))
if type_ != Manager.UNKNOWN_MONITOR:
monitor_type = type_
conf.update(anc_conf)
return conf, monitor_type
def get_config(self, monitor_name, **kwargs):
conf, monitor_type = self._get_ancestors_conf(monitor_name)
if len(kwargs) > 0:
conf.update(kwargs)
return conf, monitor_type
# ============================ DICT PARSING ============================ #
class Const(object):
VERSION = "version"
MONITORS = "generator_monitors"
FUNC_MONITORS = "function_monitors"
CODE_MONITORS = "code_monitors"
CALLBACK_SEC = "callbacks"
RULE_SEC = "rules"
FORMATTER_SEC = "formatters"
def _external_load(string):
mod_str, obj_str = string.rsplit(".", 1)
mod = __import__(mod_str, fromlist=[obj_str])
obj = getattr(mod, obj_str)
return obj
def _substitute(struct, substit_dict):
if hasattr(struct, "startswith"):
if struct.startswith("$"):
return substit_dict[struct]
elif hasattr(struct, "iteritems"):
# dict --> inspect
for k, v in struct.iteritems():
struct[k] = _substitute(v, substit_dict)
else:
try:
# List -> inspect
for i, elem in enumerate(struct):
struct[i] = _substitute(elem, substit_dict)
except TypeError:
pass
return struct
def _dict_config_v1(config_dict):
manager = Manager()
# ---- Predefined replacement rules ---- #
substit_dict = dict()
substit_dict.update(__rule_factories__)
substit_dict.update(__formatter_factories__)
substit_dict.update(__callback_factories__)
# ---- Adding the substitutions ---- #
# rules
if Const.RULE_SEC in config_dict:
for k, v in config_dict[Const.RULE_SEC].iteritems():
if k.startswith("$"):
loaded = _external_load(v)
substit_dict[k] = loaded
__rule_factories__[k] = loaded
# hook
if Const.FORMATTER_SEC in config_dict:
for k, v in config_dict[Const.FORMATTER_SEC].iteritems():
if k.startswith("$"):
loaded = _external_load(v)
substit_dict[k] = loaded
__formatter_factories__[k] = loaded
# callback
if Const.CALLBACK_SEC in config_dict:
for k, v in config_dict[Const.CALLBACK_SEC].iteritems():
if k.startswith("$"):
loaded = _external_load(v)
substit_dict[k] = loaded
__callback_factories__[k] = loaded
# ---- Performing the substitutions ---- #
config_dict = _substitute(config_dict, substit_dict)
# ---- Getting the monitors for generators---- #
if Const.MONITORS in config_dict:
for name, conf in config_dict[Const.MONITORS].iteritems():
# Adding to the manager
manager.add_config(name, conf, Manager.GENERATOR_MONITOR)
# ---- Getting the monitors for functions ---- #
if Const.FUNC_MONITORS in config_dict:
for name, conf in config_dict[Const.FUNC_MONITORS].iteritems():
# Adding to the manager
manager.add_config(name, conf, Manager.FUNCTION_MONITOR)
# ---- Getting the monitors for functions ---- #
if Const.CODE_MONITORS in config_dict:
for name, conf in config_dict[Const.CODE_MONITORS].iteritems():
# Adding to the manager
manager.add_config(name, conf, Manager.CODE_MONITOR)
# ============================ PUBLIC EXPOSURE ============================ #
def get_config(monitor_name, **kwargs):
conf, _ = Manager().get_config(monitor_name, **kwargs)
return conf
def get_monitor(monitor_name, **kwargs):
conf, monitor_type = Manager().get_config(monitor_name, **kwargs)
if monitor_type == Manager.GENERATOR_MONITOR:
return monitor_generator_factory(**conf)
elif monitor_type == Manager.FUNCTION_MONITOR:
return monitor_function_factory(**conf)
elif monitor_type == Manager.CODE_MONITOR:
return formated_code_monitoring(**conf)
else:
# If unknown, do not crash caller code
logger = logging.getLogger('progressmonitor.config')
msg = "Unknown monitor name '%s'. Skipping monitoring." % monitor_name
logger.warning(msg)
return IdProxy()
def get_generator_monitor(monitor_name, **kwargs):
conf = get_config(monitor_name, **kwargs)
return monitor_generator_factory(**conf)
def get_function_monitor(monitor_name, **kwargs):
conf = get_config(monitor_name, **kwargs)
return monitor_function_factory(**conf)
def get_code_monitor(monitor_name, **kwargs):
conf = get_config(monitor_name, **kwargs)
return formated_code_monitoring(**conf)
def parse_dict_config(config_dict):
version = config_dict.get(Const.VERSION, 1)
if version == 1:
_dict_config_v1(config_dict)
else:
raise AttributeError("Version "+str(version)+" is not supported")
def parse_file_config(config_file):
with open(config_file) as fp:
config_dict = literal_eval(fp.read())
parse_dict_config(config_dict)
| |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells Scheduler
"""
import copy
import time
from oslo.config import cfg
from nova.cells import filters
from nova.cells import weights
from nova import compute
from nova.compute import instance_actions
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.db import base
from nova import exception
from nova.openstack.common import log as logging
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova.scheduler import utils as scheduler_utils
cell_scheduler_opts = [
cfg.ListOpt('scheduler_filter_classes',
default=['nova.cells.filters.all_filters'],
help='Filter classes the cells scheduler should use. '
'An entry of "nova.cells.filters.all_filters"'
'maps to all cells filters included with nova.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.cells.weights.all_weighers'],
help='Weigher classes the cells scheduler should use. '
'An entry of "nova.cells.weights.all_weighers"'
'maps to all cell weighers included with nova.'),
cfg.IntOpt('scheduler_retries',
default=10,
help='How many retries when no cells are available.'),
cfg.IntOpt('scheduler_retry_delay',
default=2,
help='How often to retry in seconds when no cells are '
'available.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(cell_scheduler_opts, group='cells')
class CellsScheduler(base.Base):
"""The cells scheduler."""
def __init__(self, msg_runner):
super(CellsScheduler, self).__init__()
self.msg_runner = msg_runner
self.state_manager = msg_runner.state_manager
self.compute_api = compute.API()
self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
self.compute_task_api = conductor.ComputeTaskAPI()
self.filter_handler = filters.CellFilterHandler()
self.filter_classes = self.filter_handler.get_matching_classes(
CONF.cells.scheduler_filter_classes)
self.weight_handler = weights.CellWeightHandler()
self.weigher_classes = self.weight_handler.get_matching_classes(
CONF.cells.scheduler_weight_classes)
def _create_instances_here(self, ctxt, instance_uuids, instance_properties,
instance_type, image, security_groups, block_device_mapping):
instance_values = copy.copy(instance_properties)
num_instances = len(instance_uuids)
for i, instance_uuid in enumerate(instance_uuids):
instance_values['uuid'] = instance_uuid
instance = self.compute_api.create_db_entry_for_new_instance(
ctxt,
instance_type,
image,
instance_values,
security_groups,
block_device_mapping,
num_instances, i)
self.msg_runner.instance_update_at_top(ctxt, instance)
def _create_action_here(self, ctxt, instance_uuids):
for instance_uuid in instance_uuids:
action = compute_utils.pack_action_start(ctxt, instance_uuid,
instance_actions.CREATE)
self.db.action_start(ctxt, action)
def _get_possible_cells(self):
cells = self.state_manager.get_child_cells()
our_cell = self.state_manager.get_my_state()
# Include our cell in the list, if we have any capacity info
if not cells or our_cell.capacities:
cells.append(our_cell)
return cells
def _grab_target_cells(self, filter_properties):
cells = self._get_possible_cells()
cells = self.filter_handler.get_filtered_objects(self.filter_classes,
cells,
filter_properties)
# NOTE(comstud): I know this reads weird, but the 'if's are nested
# this way to optimize for the common case where 'cells' is a list
# containing at least 1 entry.
if not cells:
if cells is None:
# None means to bypass further scheduling as a filter
# took care of everything.
return
raise exception.NoCellsAvailable()
weighted_cells = self.weight_handler.get_weighed_objects(
self.weigher_classes, cells, filter_properties)
LOG.debug(_("Weighted cells: %(weighted_cells)s"),
{'weighted_cells': weighted_cells})
target_cells = [cell.obj for cell in weighted_cells]
return target_cells
def _run_instance(self, message, target_cells, instance_uuids,
host_sched_kwargs):
"""Attempt to schedule instance(s)."""
ctxt = message.ctxt
request_spec = host_sched_kwargs['request_spec']
instance_properties = request_spec['instance_properties']
instance_type = request_spec['instance_type']
image = request_spec['image']
security_groups = request_spec['security_group']
block_device_mapping = request_spec['block_device_mapping']
LOG.debug(_("Scheduling with routing_path=%(routing_path)s"),
{'routing_path': message.routing_path})
for target_cell in target_cells:
try:
if target_cell.is_me:
# Need to create instance DB entries as the host scheduler
# expects that the instance(s) already exists.
self._create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image,
security_groups, block_device_mapping)
# Need to record the create action in the db as the
# scheduler expects it to already exist.
self._create_action_here(ctxt, instance_uuids)
self.scheduler_rpcapi.run_instance(ctxt,
**host_sched_kwargs)
return
self.msg_runner.schedule_run_instance(ctxt, target_cell,
host_sched_kwargs)
return
except Exception:
LOG.exception(_("Couldn't communicate with cell '%s'") %
target_cell.name)
# FIXME(comstud): Would be nice to kick this back up so that
# the parent cell could retry, if we had a parent.
msg = _("Couldn't communicate with any cells")
LOG.error(msg)
raise exception.NoCellsAvailable()
def _build_instances(self, message, target_cells, instance_uuids,
build_inst_kwargs):
"""Attempt to build instance(s) or send msg to child cell."""
ctxt = message.ctxt
instance_properties = build_inst_kwargs['instances'][0]
filter_properties = build_inst_kwargs['filter_properties']
instance_type = filter_properties['instance_type']
image = build_inst_kwargs['image']
security_groups = build_inst_kwargs['security_groups']
block_device_mapping = build_inst_kwargs['block_device_mapping']
LOG.debug(_("Building instances with routing_path=%(routing_path)s"),
{'routing_path': message.routing_path})
for target_cell in target_cells:
try:
if target_cell.is_me:
# Need to create instance DB entries as the conductor
# expects that the instance(s) already exists.
self._create_instances_here(ctxt, instance_uuids,
instance_properties, instance_type, image,
security_groups, block_device_mapping)
# Need to record the create action in the db as the
# conductor expects it to already exist.
self._create_action_here(ctxt, instance_uuids)
self.compute_task_api.build_instances(ctxt,
**build_inst_kwargs)
return
self.msg_runner.build_instances(ctxt, target_cell,
build_inst_kwargs)
return
except Exception:
LOG.exception(_("Couldn't communicate with cell '%s'") %
target_cell.name)
# FIXME(comstud): Would be nice to kick this back up so that
# the parent cell could retry, if we had a parent.
msg = _("Couldn't communicate with any cells")
LOG.error(msg)
raise exception.NoCellsAvailable()
def build_instances(self, message, build_inst_kwargs):
image = build_inst_kwargs['image']
instance_uuids = [inst['uuid'] for inst in
build_inst_kwargs['instances']]
instances = build_inst_kwargs['instances']
request_spec = scheduler_utils.build_request_spec(message.ctxt,
image, instances)
filter_properties = copy.copy(build_inst_kwargs['filter_properties'])
filter_properties.update({'context': message.ctxt,
'scheduler': self,
'routing_path': message.routing_path,
'host_sched_kwargs': build_inst_kwargs,
'request_spec': request_spec})
self._schedule_build_to_cells(message, instance_uuids,
filter_properties, self._build_instances, build_inst_kwargs)
def run_instance(self, message, host_sched_kwargs):
request_spec = host_sched_kwargs['request_spec']
instance_uuids = request_spec['instance_uuids']
filter_properties = copy.copy(host_sched_kwargs['filter_properties'])
filter_properties.update({'context': message.ctxt,
'scheduler': self,
'routing_path': message.routing_path,
'host_sched_kwargs': host_sched_kwargs,
'request_spec': request_spec})
self._schedule_build_to_cells(message, instance_uuids,
filter_properties, self._run_instance, host_sched_kwargs)
def _schedule_build_to_cells(self, message, instance_uuids,
filter_properties, method, method_kwargs):
"""Pick a cell where we should create a new instance(s)."""
try:
for i in xrange(max(0, CONF.cells.scheduler_retries) + 1):
try:
target_cells = self._grab_target_cells(filter_properties)
return method(message, target_cells, instance_uuids,
method_kwargs)
except exception.NoCellsAvailable:
if i == max(0, CONF.cells.scheduler_retries):
raise
sleep_time = max(1, CONF.cells.scheduler_retry_delay)
LOG.info(_("No cells available when scheduling. Will "
"retry in %(sleep_time)s second(s)"),
{'sleep_time': sleep_time})
time.sleep(sleep_time)
continue
except Exception:
LOG.exception(_("Error scheduling instances %(instance_uuids)s"),
{'instance_uuids': instance_uuids})
ctxt = message.ctxt
for instance_uuid in instance_uuids:
self.msg_runner.instance_update_at_top(ctxt,
{'uuid': instance_uuid,
'vm_state': vm_states.ERROR})
try:
self.db.instance_update(ctxt,
instance_uuid,
{'vm_state': vm_states.ERROR})
except Exception:
pass
| |
"""
TOXCAST dataset loader.
"""
import os
import deepchem as dc
from deepchem.molnet.load_function.molnet_loader import TransformerGenerator, _MolnetLoader
from deepchem.data import Dataset
from typing import List, Optional, Tuple, Union
TOXCAST_URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/toxcast_data.csv.gz"
TOXCAST_TASKS = [
'ACEA_T47D_80hr_Negative', 'ACEA_T47D_80hr_Positive',
'APR_HepG2_CellCycleArrest_24h_dn', 'APR_HepG2_CellCycleArrest_24h_up',
'APR_HepG2_CellCycleArrest_72h_dn', 'APR_HepG2_CellLoss_24h_dn',
'APR_HepG2_CellLoss_72h_dn', 'APR_HepG2_MicrotubuleCSK_24h_dn',
'APR_HepG2_MicrotubuleCSK_24h_up', 'APR_HepG2_MicrotubuleCSK_72h_dn',
'APR_HepG2_MicrotubuleCSK_72h_up', 'APR_HepG2_MitoMass_24h_dn',
'APR_HepG2_MitoMass_24h_up', 'APR_HepG2_MitoMass_72h_dn',
'APR_HepG2_MitoMass_72h_up', 'APR_HepG2_MitoMembPot_1h_dn',
'APR_HepG2_MitoMembPot_24h_dn', 'APR_HepG2_MitoMembPot_72h_dn',
'APR_HepG2_MitoticArrest_24h_up', 'APR_HepG2_MitoticArrest_72h_up',
'APR_HepG2_NuclearSize_24h_dn', 'APR_HepG2_NuclearSize_72h_dn',
'APR_HepG2_NuclearSize_72h_up', 'APR_HepG2_OxidativeStress_24h_up',
'APR_HepG2_OxidativeStress_72h_up', 'APR_HepG2_StressKinase_1h_up',
'APR_HepG2_StressKinase_24h_up', 'APR_HepG2_StressKinase_72h_up',
'APR_HepG2_p53Act_24h_up', 'APR_HepG2_p53Act_72h_up',
'APR_Hepat_Apoptosis_24hr_up', 'APR_Hepat_Apoptosis_48hr_up',
'APR_Hepat_CellLoss_24hr_dn', 'APR_Hepat_CellLoss_48hr_dn',
'APR_Hepat_DNADamage_24hr_up', 'APR_Hepat_DNADamage_48hr_up',
'APR_Hepat_DNATexture_24hr_up', 'APR_Hepat_DNATexture_48hr_up',
'APR_Hepat_MitoFxnI_1hr_dn', 'APR_Hepat_MitoFxnI_24hr_dn',
'APR_Hepat_MitoFxnI_48hr_dn', 'APR_Hepat_NuclearSize_24hr_dn',
'APR_Hepat_NuclearSize_48hr_dn', 'APR_Hepat_Steatosis_24hr_up',
'APR_Hepat_Steatosis_48hr_up', 'ATG_AP_1_CIS_dn', 'ATG_AP_1_CIS_up',
'ATG_AP_2_CIS_dn', 'ATG_AP_2_CIS_up', 'ATG_AR_TRANS_dn', 'ATG_AR_TRANS_up',
'ATG_Ahr_CIS_dn', 'ATG_Ahr_CIS_up', 'ATG_BRE_CIS_dn', 'ATG_BRE_CIS_up',
'ATG_CAR_TRANS_dn', 'ATG_CAR_TRANS_up', 'ATG_CMV_CIS_dn', 'ATG_CMV_CIS_up',
'ATG_CRE_CIS_dn', 'ATG_CRE_CIS_up', 'ATG_C_EBP_CIS_dn', 'ATG_C_EBP_CIS_up',
'ATG_DR4_LXR_CIS_dn', 'ATG_DR4_LXR_CIS_up', 'ATG_DR5_CIS_dn',
'ATG_DR5_CIS_up', 'ATG_E2F_CIS_dn', 'ATG_E2F_CIS_up', 'ATG_EGR_CIS_up',
'ATG_ERE_CIS_dn', 'ATG_ERE_CIS_up', 'ATG_ERRa_TRANS_dn',
'ATG_ERRg_TRANS_dn', 'ATG_ERRg_TRANS_up', 'ATG_ERa_TRANS_up',
'ATG_E_Box_CIS_dn', 'ATG_E_Box_CIS_up', 'ATG_Ets_CIS_dn', 'ATG_Ets_CIS_up',
'ATG_FXR_TRANS_up', 'ATG_FoxA2_CIS_dn', 'ATG_FoxA2_CIS_up',
'ATG_FoxO_CIS_dn', 'ATG_FoxO_CIS_up', 'ATG_GAL4_TRANS_dn',
'ATG_GATA_CIS_dn', 'ATG_GATA_CIS_up', 'ATG_GLI_CIS_dn', 'ATG_GLI_CIS_up',
'ATG_GRE_CIS_dn', 'ATG_GRE_CIS_up', 'ATG_GR_TRANS_dn', 'ATG_GR_TRANS_up',
'ATG_HIF1a_CIS_dn', 'ATG_HIF1a_CIS_up', 'ATG_HNF4a_TRANS_dn',
'ATG_HNF4a_TRANS_up', 'ATG_HNF6_CIS_dn', 'ATG_HNF6_CIS_up',
'ATG_HSE_CIS_dn', 'ATG_HSE_CIS_up', 'ATG_IR1_CIS_dn', 'ATG_IR1_CIS_up',
'ATG_ISRE_CIS_dn', 'ATG_ISRE_CIS_up', 'ATG_LXRa_TRANS_dn',
'ATG_LXRa_TRANS_up', 'ATG_LXRb_TRANS_dn', 'ATG_LXRb_TRANS_up',
'ATG_MRE_CIS_up', 'ATG_M_06_TRANS_up', 'ATG_M_19_CIS_dn',
'ATG_M_19_TRANS_dn', 'ATG_M_19_TRANS_up', 'ATG_M_32_CIS_dn',
'ATG_M_32_CIS_up', 'ATG_M_32_TRANS_dn', 'ATG_M_32_TRANS_up',
'ATG_M_61_TRANS_up', 'ATG_Myb_CIS_dn', 'ATG_Myb_CIS_up', 'ATG_Myc_CIS_dn',
'ATG_Myc_CIS_up', 'ATG_NFI_CIS_dn', 'ATG_NFI_CIS_up', 'ATG_NF_kB_CIS_dn',
'ATG_NF_kB_CIS_up', 'ATG_NRF1_CIS_dn', 'ATG_NRF1_CIS_up',
'ATG_NRF2_ARE_CIS_dn', 'ATG_NRF2_ARE_CIS_up', 'ATG_NURR1_TRANS_dn',
'ATG_NURR1_TRANS_up', 'ATG_Oct_MLP_CIS_dn', 'ATG_Oct_MLP_CIS_up',
'ATG_PBREM_CIS_dn', 'ATG_PBREM_CIS_up', 'ATG_PPARa_TRANS_dn',
'ATG_PPARa_TRANS_up', 'ATG_PPARd_TRANS_up', 'ATG_PPARg_TRANS_up',
'ATG_PPRE_CIS_dn', 'ATG_PPRE_CIS_up', 'ATG_PXRE_CIS_dn', 'ATG_PXRE_CIS_up',
'ATG_PXR_TRANS_dn', 'ATG_PXR_TRANS_up', 'ATG_Pax6_CIS_up',
'ATG_RARa_TRANS_dn', 'ATG_RARa_TRANS_up', 'ATG_RARb_TRANS_dn',
'ATG_RARb_TRANS_up', 'ATG_RARg_TRANS_dn', 'ATG_RARg_TRANS_up',
'ATG_RORE_CIS_dn', 'ATG_RORE_CIS_up', 'ATG_RORb_TRANS_dn',
'ATG_RORg_TRANS_dn', 'ATG_RORg_TRANS_up', 'ATG_RXRa_TRANS_dn',
'ATG_RXRa_TRANS_up', 'ATG_RXRb_TRANS_dn', 'ATG_RXRb_TRANS_up',
'ATG_SREBP_CIS_dn', 'ATG_SREBP_CIS_up', 'ATG_STAT3_CIS_dn',
'ATG_STAT3_CIS_up', 'ATG_Sox_CIS_dn', 'ATG_Sox_CIS_up', 'ATG_Sp1_CIS_dn',
'ATG_Sp1_CIS_up', 'ATG_TAL_CIS_dn', 'ATG_TAL_CIS_up', 'ATG_TA_CIS_dn',
'ATG_TA_CIS_up', 'ATG_TCF_b_cat_CIS_dn', 'ATG_TCF_b_cat_CIS_up',
'ATG_TGFb_CIS_dn', 'ATG_TGFb_CIS_up', 'ATG_THRa1_TRANS_dn',
'ATG_THRa1_TRANS_up', 'ATG_VDRE_CIS_dn', 'ATG_VDRE_CIS_up',
'ATG_VDR_TRANS_dn', 'ATG_VDR_TRANS_up', 'ATG_XTT_Cytotoxicity_up',
'ATG_Xbp1_CIS_dn', 'ATG_Xbp1_CIS_up', 'ATG_p53_CIS_dn', 'ATG_p53_CIS_up',
'BSK_3C_Eselectin_down', 'BSK_3C_HLADR_down', 'BSK_3C_ICAM1_down',
'BSK_3C_IL8_down', 'BSK_3C_MCP1_down', 'BSK_3C_MIG_down',
'BSK_3C_Proliferation_down', 'BSK_3C_SRB_down',
'BSK_3C_Thrombomodulin_down', 'BSK_3C_Thrombomodulin_up',
'BSK_3C_TissueFactor_down', 'BSK_3C_TissueFactor_up', 'BSK_3C_VCAM1_down',
'BSK_3C_Vis_down', 'BSK_3C_uPAR_down', 'BSK_4H_Eotaxin3_down',
'BSK_4H_MCP1_down', 'BSK_4H_Pselectin_down', 'BSK_4H_Pselectin_up',
'BSK_4H_SRB_down', 'BSK_4H_VCAM1_down', 'BSK_4H_VEGFRII_down',
'BSK_4H_uPAR_down', 'BSK_4H_uPAR_up', 'BSK_BE3C_HLADR_down',
'BSK_BE3C_IL1a_down', 'BSK_BE3C_IP10_down', 'BSK_BE3C_MIG_down',
'BSK_BE3C_MMP1_down', 'BSK_BE3C_MMP1_up', 'BSK_BE3C_PAI1_down',
'BSK_BE3C_SRB_down', 'BSK_BE3C_TGFb1_down', 'BSK_BE3C_tPA_down',
'BSK_BE3C_uPAR_down', 'BSK_BE3C_uPAR_up', 'BSK_BE3C_uPA_down',
'BSK_CASM3C_HLADR_down', 'BSK_CASM3C_IL6_down', 'BSK_CASM3C_IL6_up',
'BSK_CASM3C_IL8_down', 'BSK_CASM3C_LDLR_down', 'BSK_CASM3C_LDLR_up',
'BSK_CASM3C_MCP1_down', 'BSK_CASM3C_MCP1_up', 'BSK_CASM3C_MCSF_down',
'BSK_CASM3C_MCSF_up', 'BSK_CASM3C_MIG_down',
'BSK_CASM3C_Proliferation_down', 'BSK_CASM3C_Proliferation_up',
'BSK_CASM3C_SAA_down', 'BSK_CASM3C_SAA_up', 'BSK_CASM3C_SRB_down',
'BSK_CASM3C_Thrombomodulin_down', 'BSK_CASM3C_Thrombomodulin_up',
'BSK_CASM3C_TissueFactor_down', 'BSK_CASM3C_VCAM1_down',
'BSK_CASM3C_VCAM1_up', 'BSK_CASM3C_uPAR_down', 'BSK_CASM3C_uPAR_up',
'BSK_KF3CT_ICAM1_down', 'BSK_KF3CT_IL1a_down', 'BSK_KF3CT_IP10_down',
'BSK_KF3CT_IP10_up', 'BSK_KF3CT_MCP1_down', 'BSK_KF3CT_MCP1_up',
'BSK_KF3CT_MMP9_down', 'BSK_KF3CT_SRB_down', 'BSK_KF3CT_TGFb1_down',
'BSK_KF3CT_TIMP2_down', 'BSK_KF3CT_uPA_down', 'BSK_LPS_CD40_down',
'BSK_LPS_Eselectin_down', 'BSK_LPS_Eselectin_up', 'BSK_LPS_IL1a_down',
'BSK_LPS_IL1a_up', 'BSK_LPS_IL8_down', 'BSK_LPS_IL8_up',
'BSK_LPS_MCP1_down', 'BSK_LPS_MCSF_down', 'BSK_LPS_PGE2_down',
'BSK_LPS_PGE2_up', 'BSK_LPS_SRB_down', 'BSK_LPS_TNFa_down',
'BSK_LPS_TNFa_up', 'BSK_LPS_TissueFactor_down', 'BSK_LPS_TissueFactor_up',
'BSK_LPS_VCAM1_down', 'BSK_SAg_CD38_down', 'BSK_SAg_CD40_down',
'BSK_SAg_CD69_down', 'BSK_SAg_Eselectin_down', 'BSK_SAg_Eselectin_up',
'BSK_SAg_IL8_down', 'BSK_SAg_IL8_up', 'BSK_SAg_MCP1_down',
'BSK_SAg_MIG_down', 'BSK_SAg_PBMCCytotoxicity_down',
'BSK_SAg_PBMCCytotoxicity_up', 'BSK_SAg_Proliferation_down',
'BSK_SAg_SRB_down', 'BSK_hDFCGF_CollagenIII_down', 'BSK_hDFCGF_EGFR_down',
'BSK_hDFCGF_EGFR_up', 'BSK_hDFCGF_IL8_down', 'BSK_hDFCGF_IP10_down',
'BSK_hDFCGF_MCSF_down', 'BSK_hDFCGF_MIG_down', 'BSK_hDFCGF_MMP1_down',
'BSK_hDFCGF_MMP1_up', 'BSK_hDFCGF_PAI1_down',
'BSK_hDFCGF_Proliferation_down', 'BSK_hDFCGF_SRB_down',
'BSK_hDFCGF_TIMP1_down', 'BSK_hDFCGF_VCAM1_down', 'CEETOX_H295R_11DCORT_dn',
'CEETOX_H295R_ANDR_dn', 'CEETOX_H295R_CORTISOL_dn', 'CEETOX_H295R_DOC_dn',
'CEETOX_H295R_DOC_up', 'CEETOX_H295R_ESTRADIOL_dn',
'CEETOX_H295R_ESTRADIOL_up', 'CEETOX_H295R_ESTRONE_dn',
'CEETOX_H295R_ESTRONE_up', 'CEETOX_H295R_OHPREG_up',
'CEETOX_H295R_OHPROG_dn', 'CEETOX_H295R_OHPROG_up', 'CEETOX_H295R_PROG_up',
'CEETOX_H295R_TESTO_dn', 'CLD_ABCB1_48hr', 'CLD_ABCG2_48hr',
'CLD_CYP1A1_24hr', 'CLD_CYP1A1_48hr', 'CLD_CYP1A1_6hr', 'CLD_CYP1A2_24hr',
'CLD_CYP1A2_48hr', 'CLD_CYP1A2_6hr', 'CLD_CYP2B6_24hr', 'CLD_CYP2B6_48hr',
'CLD_CYP2B6_6hr', 'CLD_CYP3A4_24hr', 'CLD_CYP3A4_48hr', 'CLD_CYP3A4_6hr',
'CLD_GSTA2_48hr', 'CLD_SULT2A_24hr', 'CLD_SULT2A_48hr', 'CLD_UGT1A1_24hr',
'CLD_UGT1A1_48hr', 'NCCT_HEK293T_CellTiterGLO', 'NCCT_QuantiLum_inhib_2_dn',
'NCCT_QuantiLum_inhib_dn', 'NCCT_TPO_AUR_dn', 'NCCT_TPO_GUA_dn',
'NHEERL_ZF_144hpf_TERATOSCORE_up', 'NVS_ADME_hCYP19A1', 'NVS_ADME_hCYP1A1',
'NVS_ADME_hCYP1A2', 'NVS_ADME_hCYP2A6', 'NVS_ADME_hCYP2B6',
'NVS_ADME_hCYP2C19', 'NVS_ADME_hCYP2C9', 'NVS_ADME_hCYP2D6',
'NVS_ADME_hCYP3A4', 'NVS_ADME_hCYP4F12', 'NVS_ADME_rCYP2C12',
'NVS_ENZ_hAChE', 'NVS_ENZ_hAMPKa1', 'NVS_ENZ_hAurA', 'NVS_ENZ_hBACE',
'NVS_ENZ_hCASP5', 'NVS_ENZ_hCK1D', 'NVS_ENZ_hDUSP3', 'NVS_ENZ_hES',
'NVS_ENZ_hElastase', 'NVS_ENZ_hFGFR1', 'NVS_ENZ_hGSK3b', 'NVS_ENZ_hMMP1',
'NVS_ENZ_hMMP13', 'NVS_ENZ_hMMP2', 'NVS_ENZ_hMMP3', 'NVS_ENZ_hMMP7',
'NVS_ENZ_hMMP9', 'NVS_ENZ_hPDE10', 'NVS_ENZ_hPDE4A1', 'NVS_ENZ_hPDE5',
'NVS_ENZ_hPI3Ka', 'NVS_ENZ_hPTEN', 'NVS_ENZ_hPTPN11', 'NVS_ENZ_hPTPN12',
'NVS_ENZ_hPTPN13', 'NVS_ENZ_hPTPN9', 'NVS_ENZ_hPTPRC', 'NVS_ENZ_hSIRT1',
'NVS_ENZ_hSIRT2', 'NVS_ENZ_hTrkA', 'NVS_ENZ_hVEGFR2', 'NVS_ENZ_oCOX1',
'NVS_ENZ_oCOX2', 'NVS_ENZ_rAChE', 'NVS_ENZ_rCNOS', 'NVS_ENZ_rMAOAC',
'NVS_ENZ_rMAOAP', 'NVS_ENZ_rMAOBC', 'NVS_ENZ_rMAOBP', 'NVS_ENZ_rabI2C',
'NVS_GPCR_bAdoR_NonSelective', 'NVS_GPCR_bDR_NonSelective',
'NVS_GPCR_g5HT4', 'NVS_GPCR_gH2', 'NVS_GPCR_gLTB4', 'NVS_GPCR_gLTD4',
'NVS_GPCR_gMPeripheral_NonSelective', 'NVS_GPCR_gOpiateK',
'NVS_GPCR_h5HT2A', 'NVS_GPCR_h5HT5A', 'NVS_GPCR_h5HT6', 'NVS_GPCR_h5HT7',
'NVS_GPCR_hAT1', 'NVS_GPCR_hAdoRA1', 'NVS_GPCR_hAdoRA2a',
'NVS_GPCR_hAdra2A', 'NVS_GPCR_hAdra2C', 'NVS_GPCR_hAdrb1',
'NVS_GPCR_hAdrb2', 'NVS_GPCR_hAdrb3', 'NVS_GPCR_hDRD1', 'NVS_GPCR_hDRD2s',
'NVS_GPCR_hDRD4.4', 'NVS_GPCR_hH1', 'NVS_GPCR_hLTB4_BLT1', 'NVS_GPCR_hM1',
'NVS_GPCR_hM2', 'NVS_GPCR_hM3', 'NVS_GPCR_hM4', 'NVS_GPCR_hNK2',
'NVS_GPCR_hOpiate_D1', 'NVS_GPCR_hOpiate_mu', 'NVS_GPCR_hTXA2',
'NVS_GPCR_p5HT2C', 'NVS_GPCR_r5HT1_NonSelective',
'NVS_GPCR_r5HT_NonSelective', 'NVS_GPCR_rAdra1B',
'NVS_GPCR_rAdra1_NonSelective', 'NVS_GPCR_rAdra2_NonSelective',
'NVS_GPCR_rAdrb_NonSelective', 'NVS_GPCR_rNK1', 'NVS_GPCR_rNK3',
'NVS_GPCR_rOpiate_NonSelective', 'NVS_GPCR_rOpiate_NonSelectiveNa',
'NVS_GPCR_rSST', 'NVS_GPCR_rTRH', 'NVS_GPCR_rV1', 'NVS_GPCR_rabPAF',
'NVS_GPCR_rmAdra2B', 'NVS_IC_hKhERGCh', 'NVS_IC_rCaBTZCHL',
'NVS_IC_rCaDHPRCh_L', 'NVS_IC_rNaCh_site2', 'NVS_LGIC_bGABARa1',
'NVS_LGIC_h5HT3', 'NVS_LGIC_hNNR_NBungSens', 'NVS_LGIC_rGABAR_NonSelective',
'NVS_LGIC_rNNR_BungSens', 'NVS_MP_hPBR', 'NVS_MP_rPBR', 'NVS_NR_bER',
'NVS_NR_bPR', 'NVS_NR_cAR', 'NVS_NR_hAR', 'NVS_NR_hCAR_Antagonist',
'NVS_NR_hER', 'NVS_NR_hFXR_Agonist', 'NVS_NR_hFXR_Antagonist', 'NVS_NR_hGR',
'NVS_NR_hPPARa', 'NVS_NR_hPPARg', 'NVS_NR_hPR', 'NVS_NR_hPXR',
'NVS_NR_hRAR_Antagonist', 'NVS_NR_hRARa_Agonist', 'NVS_NR_hTRa_Antagonist',
'NVS_NR_mERa', 'NVS_NR_rAR', 'NVS_NR_rMR', 'NVS_OR_gSIGMA_NonSelective',
'NVS_TR_gDAT', 'NVS_TR_hAdoT', 'NVS_TR_hDAT', 'NVS_TR_hNET', 'NVS_TR_hSERT',
'NVS_TR_rNET', 'NVS_TR_rSERT', 'NVS_TR_rVMAT2', 'OT_AR_ARELUC_AG_1440',
'OT_AR_ARSRC1_0480', 'OT_AR_ARSRC1_0960', 'OT_ER_ERaERa_0480',
'OT_ER_ERaERa_1440', 'OT_ER_ERaERb_0480', 'OT_ER_ERaERb_1440',
'OT_ER_ERbERb_0480', 'OT_ER_ERbERb_1440', 'OT_ERa_EREGFP_0120',
'OT_ERa_EREGFP_0480', 'OT_FXR_FXRSRC1_0480', 'OT_FXR_FXRSRC1_1440',
'OT_NURR1_NURR1RXRa_0480', 'OT_NURR1_NURR1RXRa_1440',
'TOX21_ARE_BLA_Agonist_ch1', 'TOX21_ARE_BLA_Agonist_ch2',
'TOX21_ARE_BLA_agonist_ratio', 'TOX21_ARE_BLA_agonist_viability',
'TOX21_AR_BLA_Agonist_ch1', 'TOX21_AR_BLA_Agonist_ch2',
'TOX21_AR_BLA_Agonist_ratio', 'TOX21_AR_BLA_Antagonist_ch1',
'TOX21_AR_BLA_Antagonist_ch2', 'TOX21_AR_BLA_Antagonist_ratio',
'TOX21_AR_BLA_Antagonist_viability', 'TOX21_AR_LUC_MDAKB2_Agonist',
'TOX21_AR_LUC_MDAKB2_Antagonist', 'TOX21_AR_LUC_MDAKB2_Antagonist2',
'TOX21_AhR_LUC_Agonist', 'TOX21_Aromatase_Inhibition',
'TOX21_AutoFluor_HEK293_Cell_blue', 'TOX21_AutoFluor_HEK293_Media_blue',
'TOX21_AutoFluor_HEPG2_Cell_blue', 'TOX21_AutoFluor_HEPG2_Cell_green',
'TOX21_AutoFluor_HEPG2_Media_blue', 'TOX21_AutoFluor_HEPG2_Media_green',
'TOX21_ELG1_LUC_Agonist', 'TOX21_ERa_BLA_Agonist_ch1',
'TOX21_ERa_BLA_Agonist_ch2', 'TOX21_ERa_BLA_Agonist_ratio',
'TOX21_ERa_BLA_Antagonist_ch1', 'TOX21_ERa_BLA_Antagonist_ch2',
'TOX21_ERa_BLA_Antagonist_ratio', 'TOX21_ERa_BLA_Antagonist_viability',
'TOX21_ERa_LUC_BG1_Agonist', 'TOX21_ERa_LUC_BG1_Antagonist',
'TOX21_ESRE_BLA_ch1', 'TOX21_ESRE_BLA_ch2', 'TOX21_ESRE_BLA_ratio',
'TOX21_ESRE_BLA_viability', 'TOX21_FXR_BLA_Antagonist_ch1',
'TOX21_FXR_BLA_Antagonist_ch2', 'TOX21_FXR_BLA_agonist_ch2',
'TOX21_FXR_BLA_agonist_ratio', 'TOX21_FXR_BLA_antagonist_ratio',
'TOX21_FXR_BLA_antagonist_viability', 'TOX21_GR_BLA_Agonist_ch1',
'TOX21_GR_BLA_Agonist_ch2', 'TOX21_GR_BLA_Agonist_ratio',
'TOX21_GR_BLA_Antagonist_ch2', 'TOX21_GR_BLA_Antagonist_ratio',
'TOX21_GR_BLA_Antagonist_viability', 'TOX21_HSE_BLA_agonist_ch1',
'TOX21_HSE_BLA_agonist_ch2', 'TOX21_HSE_BLA_agonist_ratio',
'TOX21_HSE_BLA_agonist_viability', 'TOX21_MMP_ratio_down',
'TOX21_MMP_ratio_up', 'TOX21_MMP_viability', 'TOX21_NFkB_BLA_agonist_ch1',
'TOX21_NFkB_BLA_agonist_ch2', 'TOX21_NFkB_BLA_agonist_ratio',
'TOX21_NFkB_BLA_agonist_viability', 'TOX21_PPARd_BLA_Agonist_viability',
'TOX21_PPARd_BLA_Antagonist_ch1', 'TOX21_PPARd_BLA_agonist_ch1',
'TOX21_PPARd_BLA_agonist_ch2', 'TOX21_PPARd_BLA_agonist_ratio',
'TOX21_PPARd_BLA_antagonist_ratio', 'TOX21_PPARd_BLA_antagonist_viability',
'TOX21_PPARg_BLA_Agonist_ch1', 'TOX21_PPARg_BLA_Agonist_ch2',
'TOX21_PPARg_BLA_Agonist_ratio', 'TOX21_PPARg_BLA_Antagonist_ch1',
'TOX21_PPARg_BLA_antagonist_ratio', 'TOX21_PPARg_BLA_antagonist_viability',
'TOX21_TR_LUC_GH3_Agonist', 'TOX21_TR_LUC_GH3_Antagonist',
'TOX21_VDR_BLA_Agonist_viability', 'TOX21_VDR_BLA_Antagonist_ch1',
'TOX21_VDR_BLA_agonist_ch2', 'TOX21_VDR_BLA_agonist_ratio',
'TOX21_VDR_BLA_antagonist_ratio', 'TOX21_VDR_BLA_antagonist_viability',
'TOX21_p53_BLA_p1_ch1', 'TOX21_p53_BLA_p1_ch2', 'TOX21_p53_BLA_p1_ratio',
'TOX21_p53_BLA_p1_viability', 'TOX21_p53_BLA_p2_ch1', 'TOX21_p53_BLA_p2_ch2',
'TOX21_p53_BLA_p2_ratio', 'TOX21_p53_BLA_p2_viability',
'TOX21_p53_BLA_p3_ch1', 'TOX21_p53_BLA_p3_ch2', 'TOX21_p53_BLA_p3_ratio',
'TOX21_p53_BLA_p3_viability', 'TOX21_p53_BLA_p4_ch1', 'TOX21_p53_BLA_p4_ch2',
'TOX21_p53_BLA_p4_ratio', 'TOX21_p53_BLA_p4_viability',
'TOX21_p53_BLA_p5_ch1', 'TOX21_p53_BLA_p5_ch2', 'TOX21_p53_BLA_p5_ratio',
'TOX21_p53_BLA_p5_viability', 'Tanguay_ZF_120hpf_AXIS_up',
'Tanguay_ZF_120hpf_ActivityScore', 'Tanguay_ZF_120hpf_BRAI_up',
'Tanguay_ZF_120hpf_CFIN_up', 'Tanguay_ZF_120hpf_CIRC_up',
'Tanguay_ZF_120hpf_EYE_up', 'Tanguay_ZF_120hpf_JAW_up',
'Tanguay_ZF_120hpf_MORT_up', 'Tanguay_ZF_120hpf_OTIC_up',
'Tanguay_ZF_120hpf_PE_up', 'Tanguay_ZF_120hpf_PFIN_up',
'Tanguay_ZF_120hpf_PIG_up', 'Tanguay_ZF_120hpf_SNOU_up',
'Tanguay_ZF_120hpf_SOMI_up', 'Tanguay_ZF_120hpf_SWIM_up',
'Tanguay_ZF_120hpf_TRUN_up', 'Tanguay_ZF_120hpf_TR_up',
'Tanguay_ZF_120hpf_YSE_up'
]
class _ToxcastLoader(_MolnetLoader):
def create_dataset(self) -> Dataset:
dataset_file = os.path.join(self.data_dir, "toxcast_data.csv.gz")
if not os.path.exists(dataset_file):
dc.utils.data_utils.download_url(url=TOXCAST_URL, dest_dir=self.data_dir)
loader = dc.data.CSVLoader(
tasks=self.tasks, feature_field="smiles", featurizer=self.featurizer)
return loader.create_dataset(dataset_file, shard_size=8192)
def load_toxcast(
featurizer: Union[dc.feat.Featurizer, str] = 'ECFP',
splitter: Union[dc.splits.Splitter, str, None] = 'scaffold',
transformers: List[Union[TransformerGenerator, str]] = ['balancing'],
reload: bool = True,
data_dir: Optional[str] = None,
save_dir: Optional[str] = None,
**kwargs
) -> Tuple[List[str], Tuple[Dataset, ...], List[dc.trans.Transformer]]:
"""Load Toxcast dataset
ToxCast is an extended data collection from the same
initiative as Tox21, providing toxicology data for a large
library of compounds based on in vitro high-throughput
screening. The processed collection includes qualitative
results of over 600 experiments on 8k compounds.
Random splitting is recommended for this dataset.
The raw data csv file contains columns below:
- "smiles": SMILES representation of the molecular structure
- "ACEA_T47D_80hr_Negative" ~ "Tanguay_ZF_120hpf_YSE_up": Bioassays results.
Please refer to the section "high-throughput assay information" at
https://www.epa.gov/chemical-research/toxicity-forecaster-toxcasttm-data
for details.
Parameters
----------
featurizer: Featurizer or str
the featurizer to use for processing the data. Alternatively you can pass
one of the names from dc.molnet.featurizers as a shortcut.
splitter: Splitter or str
the splitter to use for splitting the data into training, validation, and
test sets. Alternatively you can pass one of the names from
dc.molnet.splitters as a shortcut. If this is None, all the data
will be included in a single dataset.
transformers: list of TransformerGenerators or strings
the Transformers to apply to the data. Each one is specified by a
TransformerGenerator or, as a shortcut, one of the names from
dc.molnet.transformers.
reload: bool
if True, the first call for a particular featurizer and splitter will cache
the datasets to disk, and subsequent calls will reload the cached datasets.
data_dir: str
a directory to save the raw data in
save_dir: str
a directory to save the dataset in
References
----------
.. [1] Richard, Ann M., et al. "ToxCast chemical landscape: paving the road
to 21st century toxicology." Chemical research in toxicology 29.8 (2016):
1225-1251.
"""
loader = _ToxcastLoader(featurizer, splitter, transformers, TOXCAST_TASKS,
data_dir, save_dir, **kwargs)
return loader.load_dataset('toxcast', reload)
| |
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from resource_management.core.logger import Logger
from resource_management.libraries.functions import conf_select
from resource_management.libraries.script import Script
@patch("os.path.exists", new = MagicMock(return_value=True))
@patch("os.path.isfile", new = MagicMock(return_value=False))
class TestHookAfterInstall(RMFTestCase):
CONFIG_OVERRIDES = {"serviceName":"HIVE", "role":"HIVE_SERVER"}
def setUp(self):
Logger.initialize_logger()
Script.config = dict()
Script.config.update( { "configurations" : { "cluster-env" : {} }, "hostLevelParams": {} } )
Script.config["configurations"]["cluster-env"]["stack_packages"] = RMFTestCase.get_stack_packages()
Script.config["hostLevelParams"] = { "stack_name" : "HDP" }
def test_hook_default(self):
self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
classname="AfterInstallHook",
command="hook",
config_file="default.json",
config_overrides = self.CONFIG_OVERRIDES
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = '/etc/hadoop/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
only_if="ls /etc/hadoop/conf")
self.assertResourceCalled('Directory',
'/etc/ambari-logsearch-logfeeder/conf',
mode = 0755,
cd_access = 'a',
create_parents = True)
self.assertNoMoreResources()
@patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
@patch("resource_management.libraries.functions.conf_select.create")
@patch("resource_management.libraries.functions.conf_select.select")
@patch("os.symlink")
@patch("shutil.rmtree")
def test_hook_default_conf_select(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):
def mocked_conf_select(arg1, arg2, arg3, dry_run = False):
return "/etc/{0}/{1}/0".format(arg2, arg3)
conf_select_create_mock.side_effect = mocked_conf_select
config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
classname="AfterInstallHook",
command="hook",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES)
self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.3.0.0-1234'),
sudo = True)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = "/usr/hdp/current/hadoop-client/conf",
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
only_if="ls /usr/hdp/current/hadoop-client/conf")
self.assertResourceCalled('Directory',
'/etc/ambari-logsearch-logfeeder/conf',
mode = 0755,
cd_access = 'a',
create_parents = True)
package_dirs = conf_select.get_package_dirs();
for package, dir_defs in package_dirs.iteritems():
for dir_def in dir_defs:
conf_dir = dir_def['conf_dir']
conf_backup_dir = conf_dir + ".backup"
self.assertResourceCalled('Execute', ('cp', '-R', '-p', conf_dir, conf_backup_dir),
not_if = 'test -e ' + conf_backup_dir,
sudo = True,)
for dir_def in dir_defs:
conf_dir = dir_def['conf_dir']
current_dir = dir_def['current_dir']
self.assertResourceCalled('Directory', conf_dir,
action = ['delete'],)
self.assertResourceCalled('Link', conf_dir,
to = current_dir,)
#HACK for Atlas
if package in ["atlas", ]:
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cp -R --no-clobber /etc/atlas/conf.backup/* /etc/atlas/conf',
only_if = 'test -e ' + "/etc/atlas/conf")
self.assertNoMoreResources()
@patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
@patch("resource_management.libraries.functions.conf_select.create")
@patch("resource_management.libraries.functions.conf_select.select")
@patch("os.symlink")
@patch("shutil.rmtree")
def test_hook_default_conf_select_with_error(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):
def mocked_conf_select(arg1, arg2, arg3, dry_run = False, ignore_errors = False):
if arg2 == "pig" and not dry_run:
if not ignore_errors:
raise Exception("whoops")
else:
return None
return "/etc/{0}/{1}/0".format(arg2, arg3)
conf_select_create_mock.side_effect = mocked_conf_select
conf_select_select_mock.side_effect = mocked_conf_select
config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
classname="AfterInstallHook",
command="hook",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES)
self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.3.0.0-1234'),
sudo = True)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = "/usr/hdp/current/hadoop-client/conf",
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
only_if="ls /usr/hdp/current/hadoop-client/conf")
self.assertResourceCalled('Directory',
'/etc/ambari-logsearch-logfeeder/conf',
mode = 0755,
cd_access = 'a',
create_parents = True)
package_dirs = conf_select.get_package_dirs();
for package, dir_defs in package_dirs.iteritems():
for dir_def in dir_defs:
conf_dir = dir_def['conf_dir']
conf_backup_dir = conf_dir + ".backup"
self.assertResourceCalled('Execute', ('cp', '-R', '-p', conf_dir, conf_backup_dir),
not_if = 'test -e ' + conf_backup_dir,
sudo = True,)
for dir_def in dir_defs:
conf_dir = dir_def['conf_dir']
current_dir = dir_def['current_dir']
self.assertResourceCalled('Directory', conf_dir,
action = ['delete'],)
self.assertResourceCalled('Link', conf_dir,
to = current_dir,)
#HACK for Atlas
if package in ["atlas", ]:
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cp -R --no-clobber /etc/atlas/conf.backup/* /etc/atlas/conf',
only_if = 'test -e ' + "/etc/atlas/conf")
self.assertNoMoreResources()
@patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
@patch("resource_management.libraries.functions.conf_select.create")
@patch("resource_management.libraries.functions.conf_select.select")
@patch("os.symlink")
@patch("shutil.rmtree")
def test_hook_default_stack_select_specific_version(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):
"""
Tests that <stack-selector-tool> set all on a specific version, not a 2.3* wildcard is used when
installing a component when the cluster version is already set.
:param rmtree_mock:
:param symlink_mock:
:param conf_select_select_mock:
:param conf_select_create_mock:
:return:
"""
def mocked_conf_select(arg1, arg2, arg3, dry_run = False):
return "/etc/{0}/{1}/0".format(arg2, arg3)
conf_select_create_mock.side_effect = mocked_conf_select
config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
classname="AfterInstallHook",
command="hook",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES)
self.assertResourceCalled('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hive-server2', '2.3.0.0-1234'),
sudo = True)
@patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
@patch("resource_management.libraries.functions.conf_select.create")
@patch("resource_management.libraries.functions.conf_select.select")
@patch("os.symlink")
@patch("shutil.rmtree")
def test_hook_default_conf_select_suspended(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock):
def mocked_conf_select(arg1, arg2, arg3, dry_run = False):
return "/etc/{0}/{1}/0".format(arg2, arg3)
conf_select_create_mock.side_effect = mocked_conf_select
config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
json_content['roleParams']['upgrade_suspended'] = "true"
self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
classname="AfterInstallHook",
command="hook",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES)
# same assertions as test_hook_default_conf_select, but skip hdp-select set all
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hdfs',
group = 'hadoop',
conf_dir = "/usr/hdp/current/hadoop-client/conf",
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configuration_attributes']['core-site'],
only_if="ls /usr/hdp/current/hadoop-client/conf")
self.assertResourceCalled('Directory',
'/etc/ambari-logsearch-logfeeder/conf',
mode = 0755,
cd_access = 'a',
create_parents = True)
package_dirs = conf_select.get_package_dirs();
for package, dir_defs in package_dirs.iteritems():
for dir_def in dir_defs:
conf_dir = dir_def['conf_dir']
conf_backup_dir = conf_dir + ".backup"
self.assertResourceCalled('Execute', ('cp', '-R', '-p', conf_dir, conf_backup_dir),
not_if = 'test -e ' + conf_backup_dir,
sudo = True,)
for dir_def in dir_defs:
conf_dir = dir_def['conf_dir']
current_dir = dir_def['current_dir']
self.assertResourceCalled('Directory', conf_dir,
action = ['delete'],)
self.assertResourceCalled('Link', conf_dir,
to = current_dir,)
#HACK for Atlas
if package in ["atlas", ]:
self.assertResourceCalled('Execute', 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cp -R --no-clobber /etc/atlas/conf.backup/* /etc/atlas/conf',
only_if = 'test -e ' + "/etc/atlas/conf")
self.assertNoMoreResources()
@patch("resource_management.core.Logger.warning")
@patch("shared_initialization.load_version", new = MagicMock(return_value="2.3.0.0-1234"))
@patch("resource_management.libraries.functions.conf_select.create")
@patch("resource_management.libraries.functions.conf_select.select")
@patch("os.symlink")
@patch("shutil.rmtree")
def test_hook_setup_stack_symlinks_skipped(self, rmtree_mock, symlink_mock, conf_select_select_mock, conf_select_create_mock, logger_warning_mock):
"""
Tests that <stack-selector-tool> set all is not called on sys_prepped hosts
:return:
"""
def mocked_conf_select(arg1, arg2, arg3, dry_run = False):
return "/etc/{0}/{1}/0".format(arg2, arg3)
conf_select_create_mock.side_effect = mocked_conf_select
config_file = self.get_src_folder() + "/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
json_content['hostLevelParams']['stack_version'] = "2.3"
json_content['hostLevelParams']['host_sys_prepped'] = "true"
self.executeScript("2.0.6/hooks/after-INSTALL/scripts/hook.py",
classname="AfterInstallHook",
command="hook",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES)
logger_warning_mock.assert_any_call('Skipping running stack-selector-tool becase this is a sys_prepped host. This may cause symlink pointers not to be created for HDP componets installed later on top of an already sys_prepped host.')
| |
from teafacto.blocks.basic import Linear as Lin, Softmax
from teafacto.blocks.basic import VectorEmbed, IdxToOneHot, MatDot
from teafacto.blocks.memory import MemoryStack, MemoryBlock, DotMemAddr
from teafacto.blocks.seq.rnn import MakeRNU
from teafacto.blocks.seq.rnn import SeqDecoder, BiRNU, SeqEncoder, MaskSetMode, MaskMode, RecStack
from teafacto.blocks.seq.rnu import GRU
from teafacto.core.base import Block, tensorops as T, Val, asblock
from teafacto.core.stack import stack
from teafacto.util import issequence
from teafacto.use.recsearch import SeqTransDecWrapper
"""
class SeqEncDec(Block):
def __init__(self, enc, dec, statetrans=None, **kw):
super(SeqEncDec, self).__init__(**kw)
self.enc = enc
self.dec = dec
if isinstance(statetrans, Block):
self.statetrans = asblock(lambda x, y: statetrans(x))
elif statetrans is True:
self.statetrans = asblock(lambda x, y: x)
else:
self.statetrans = statetrans
def apply(self, inpseq, outseq, maskseq=None):
if maskseq is None:
mask = "auto"
else:
mask = maskseq
enco, allenco, encmask = self.enc(inpseq, mask=mask)
mask = None
if self.statetrans is not None:
topstate = self.statetrans(enco, allenco)
deco = self.dec(allenco, outseq, initstates=[topstate], mask=mask, encmask=encmask)
else:
deco = self.dec(allenco, outseq, mask=mask, encmask=encmask) # no state transfer
return deco
def get_init_info(self, inpseq, batsize, maskseq=None): # TODO: must evaluate enc here, in place, without any side effects
enco, allenco, encmask = self.enc.predict(inpseq, mask=maskseq)
if self.statetrans is not None:
topstate = self.statetrans.predict(enco, allenco) # this gives unused input warning in theano - it's normal
initstates = [topstate]
else:
initstates = batsize
return self.dec.get_init_info(Val(allenco),
None,
[Val(x) for x in initstates]
if issequence(initstates)
else initstates,
encmask=Val(encmask))
def rec(self, x_t, *states):
return self.dec.rec(x_t, *states)
class SeqEncDecAtt(SeqEncDec):
def __init__(self, enclayers, declayers, attgen, attcon,
decinnerdim, inconcat, outconcat,
statetrans=None, vecout=False, **kw):
enc = SeqEncoder(*enclayers)\
.with_outputs\
.with_mask\
.maskoptions(-1, MaskMode.AUTO, MaskSetMode.ZERO)
smo = False if vecout else None
dec = SeqDecoder(
declayers,
attention=Attention(attgen, attcon),
innerdim=decinnerdim,
outconcat=outconcat,
inconcat=inconcat,
softmaxoutblock=smo,
)
super(SeqEncDecAtt, self).__init__(enc, dec, statetrans=statetrans, **kw)
class SimpleSeqEncDecAtt(SeqEncDecAtt):
def __init__(self,
inpvocsize=400,
inpembdim=None,
outvocsize=100,
outembdim=None,
encdim=100,
decdim=100,
attdim=100,
bidir=False,
rnu=GRU,
outconcat=True,
inconcat=False,
statetrans=None,
vecout=False,
**kw):
encinnerdim = [encdim] if not issequence(encdim) else encdim
decinnerdim = [decdim] if not issequence(decdim) else decdim
self.enclayers, lastencinnerdim = self.getenclayers(inpembdim, inpvocsize, encinnerdim, bidir, rnu)
# attention
lastdecinnerdim = decinnerdim[-1]
attgen = LinearGateAttentionGenerator(indim=lastencinnerdim + lastdecinnerdim, attdim=attdim)
attcon = WeightedSumAttCon()
self.declayers = self.getdeclayers(outembdim, outvocsize, encinnerdim, decinnerdim, inconcat, rnu)
argdecinnerdim = lastdecinnerdim if outconcat is False else lastencinnerdim + lastdecinnerdim
if statetrans is True:
if lastencinnerdim != lastdecinnerdim: # state shape mismatch
statetrans = MatDot(lastencinnerdim, lastdecinnerdim)
super(SimpleSeqEncDecAtt, self).__init__(self.enclayers, self.declayers,
attgen, attcon, argdecinnerdim, inconcat, outconcat,
statetrans=statetrans, vecout=vecout, **kw)
def getenclayers(self, inpembdim, inpvocsize, encinnerdim, bidir, rnu):
if inpembdim is None:
inpemb = IdxToOneHot(inpvocsize)
inpembdim = inpvocsize
elif isinstance(inpembdim, Block):
inpemb = inpembdim
inpembdim = inpemb.outdim
else:
inpemb = VectorEmbed(indim=inpvocsize, dim=inpembdim)
encrnus = []
dims = [inpembdim] + encinnerdim
#print dims
i = 1
lastencinnerdim = dims[-1] if not bidir else dims[-1] * 2
while i < len(dims):
if bidir:
newrnu = BiRNU.fromrnu(rnu, dim=dims[i - 1], innerdim=dims[i])
else:
newrnu = rnu(dim=dims[i - 1], innerdim=dims[i])
encrnus.append(newrnu)
i += 1
enclayers = [inpemb] + encrnus
return enclayers, lastencinnerdim
def getdeclayers(self, outembdim, outvocsize, encinnerdim,
decinnerdim, inconcat, rnu):
if outembdim is None:
outemb = IdxToOneHot(outvocsize)
outembdim = outvocsize
elif isinstance(outembdim, Block):
outemb = outembdim
outembdim = outemb.outdim
else:
outemb = VectorEmbed(indim=outvocsize, dim=outembdim)
decrnus = []
firstdecdim = outembdim if inconcat is False else outembdim + encinnerdim
dims = [firstdecdim] + decinnerdim
i = 1
while i < len(dims):
decrnus.append(rnu(dim=dims[i - 1], innerdim=dims[i]))
i += 1
declayers = [outemb] + decrnus
return declayers
"""
class SeqTransducer(Block):
def __init__(self, embedder, *layers, **kw):
""" layers must have an embedding layers first, final softmax layer is added automatically"""
assert("smodim" in kw and "outdim" in kw)
self.embedder = embedder
smodim = kw["smodim"]
outdim = kw["outdim"]
del kw["smodim"]; del kw["outdim"]
super(SeqTransducer, self).__init__(**kw)
self.block = RecStack(*(layers +
(Lin(indim=smodim, dim=outdim),
Softmax())))
def apply(self, inpseq, maskseq=None): # inpseq: idx^(batsize, seqlen), maskseq: f32^(batsize, seqlen)
embseq = self.embedder(inpseq)
res = self.block(embseq, mask=maskseq) # f32^(batsize, seqlen, outdim)
ret = self.applymask(res, maskseq=maskseq)
return ret
@classmethod
def applymask(cls, xseq, maskseq=None):
if maskseq is None:
ret = xseq
else:
mask = T.tensordot(maskseq, T.ones((xseq.shape[2],)), 0) # f32^(batsize, seqlen, outdim) -- maskseq stacked
masker = T.concatenate([T.ones((xseq.shape[0], xseq.shape[1], 1)), T.zeros((xseq.shape[0], xseq.shape[1], xseq.shape[2] - 1))], axis=2) # f32^(batsize, seqlen, outdim) -- gives 100% prob to output 0
ret = xseq * mask + masker * (1.0 - mask)
return ret
class SimpleSeqTransducer(SeqTransducer):
def __init__(self, indim=400, embdim=50, inpemb=None, innerdim=100, outdim=50, rnu=GRU, **kw):
if inpemb is None:
self.emb = VectorEmbed(indim=indim, dim=embdim)
else:
self.emb = inpemb
embdim = self.emb.outdim
if not issequence(innerdim):
innerdim = [innerdim]
innerdim = [embdim] + innerdim
self.rnn = MakeRNU.fromdims(innerdim, rnu=rnu)[0]
super(SimpleSeqTransducer, self).__init__(self.emb, *self.rnn, smodim=innerdim[-1], outdim=outdim, **kw)
class SeqTransDec(Block):
searchwrapper = SeqTransDecWrapper
def __init__(self, *layers, **kw):
""" first two layers must be embedding layers. Final softmax is added automatically"""
assert("smodim" in kw and "outdim" in kw)
smodim = kw["smodim"]
outdim = kw["outdim"]
del kw["smodim"]; del kw["outdim"]
super(SeqTransDec, self).__init__(**kw)
self.inpemb = layers[0]
self.outemb = layers[1]
self.block = RecStack(*(layers[2:] + (Lin(indim=smodim, dim=outdim), Softmax())))
def apply(self, inpseq, outseq, maskseq=None):
# embed with the two embedding layers
emb = self._get_emb(inpseq, outseq)
res = self.block(emb)
ret = SeqTransducer.applymask(res, maskseq=maskseq)
return ret
def _get_emb(self, inpseq, outseq):
iemb = self.inpemb(inpseq) # (batsize, seqlen, inpembdim)
oemb = self.outemb(outseq) # (batsize, seqlen, outembdim)
emb = T.concatenate([iemb, oemb], axis=iemb.ndim-1) # (batsize, seqlen, inpembdim+outembdim)
return emb
def rec(self, inpa, inpb, *states):
emb = self._get_emb(inpa, inpb)
return self.block.rec(emb, *states)
def get_init_info(self, initstates):
return self.block.get_init_info(initstates)
def get_inits(self, initstates):
return self.get_init_info(initstates)
class SimpleSeqTransDec(SeqTransDec):
def __init__(self, indim=400, outdim=50, inpembdim=50, outembdim=50, innerdim=100, **kw):
self.inpemb = VectorEmbed(indim=indim, dim=inpembdim)
self.outemb = VectorEmbed(indim=outdim, dim=outembdim)
self.rnn = []
if not issequence(innerdim):
innerdim = [innerdim]
innerdim = [inpembdim+outembdim] + innerdim
self.rnn = MakeRNU.fromdims(innerdim)[0]
super(SimpleSeqTransDec, self).__init__(self.inpemb, self.outemb, *self.rnn, smodim=innerdim[-1], outdim=outdim, **kw)
# BASIC SEQ TO IDX
# specify by enc and out
# specify by layers
# specify by dims
# components:
# seq2vec
# specify by layers
# specify by dims
# vec2idx:
# specify by layers
class Vec2Idx(Block):
def __init__(self, outlayers, **kw):
super(Vec2Idx, self).__init__(**kw)
if isinstance(outlayers, MemoryStack):
out = outlayers
else:
if not issequence(outlayers):
outlayers = [outlayers]
if type(outlayers[-1]) is not Softmax:
outlayers.append(Softmax())
out = stack(*outlayers)
self.out = out
def apply(self, x, *args):
return self.out(x, *args)
# specify by dims
class SimpleVec2Idx(Vec2Idx):
def __init__(self, indim=100, outdim=100, **kw):
outl = MatDot(indim=indim, dim=outdim)
super(SimpleVec2Idx, self).__init__(outl, **kw)
class MemVec2Idx(Vec2Idx):
def __init__(self, memenc, memdata, memaddr=DotMemAddr, memdim=None, memattdim=100, **kw):
assert(memenc is not None)
memblock = MemoryBlock(memenc, memdata, indim=memdata.shape[0], outdim=memdim)
memstack = MemoryStack(memblock, memaddr, memattdim=memattdim)
super(MemVec2Idx, self).__init__(memstack, **kw)
class DynMemVec2Idx(MemVec2Idx):
def __init__(self, memenc, memaddr=DotMemAddr, memdim=None, memattdim=100, **kw):
super(self, DynMemVec2Idx).__init__(memenc, None, memaddr=memaddr, memdim=memdim, memattdim=memattdim, **kw)
| |
import io
import textwrap
import pytest
from .. import validate_docstrings
class BadDocstrings:
"""Everything here has a bad docstring"""
def private_classes(self):
"""
This mentions NDFrame, which is not correct.
"""
def prefix_pandas(self):
"""
Have `pandas` prefix in See Also section.
See Also
--------
pandas.Series.rename : Alter Series index labels or name.
DataFrame.head : The first `n` rows of the caller object.
"""
pass
def redundant_import(self, foo=None, bar=None):
"""
A sample DataFrame method.
Should not import numpy and pandas.
Examples
--------
>>> import numpy as np
>>> import pandas as pd
>>> df = pd.DataFrame(np.ones((3, 3)),
... columns=('a', 'b', 'c'))
>>> df.all(1)
0 True
1 True
2 True
dtype: bool
>>> df.all(bool_only=True)
Series([], dtype: bool)
"""
pass
def unused_import(self):
"""
Examples
--------
>>> import pandas as pdf
>>> df = pd.DataFrame(np.ones((3, 3)), columns=('a', 'b', 'c'))
"""
pass
def missing_whitespace_around_arithmetic_operator(self):
"""
Examples
--------
>>> 2+5
7
"""
pass
def indentation_is_not_a_multiple_of_four(self):
"""
Examples
--------
>>> if 2 + 5:
... pass
"""
pass
def missing_whitespace_after_comma(self):
"""
Examples
--------
>>> df = pd.DataFrame(np.ones((3,3)),columns=('a','b', 'c'))
"""
pass
class TestValidator:
def _import_path(self, klass=None, func=None):
"""
Build the required import path for tests in this module.
Parameters
----------
klass : str
Class name of object in module.
func : str
Function name of object in module.
Returns
-------
str
Import path of specified object in this module
"""
base_path = "scripts.tests.test_validate_docstrings"
if klass:
base_path = ".".join([base_path, klass])
if func:
base_path = ".".join([base_path, func])
return base_path
def test_bad_class(self, capsys):
errors = validate_docstrings.pandas_validate(
self._import_path(klass="BadDocstrings")
)["errors"]
assert isinstance(errors, list)
assert errors
@pytest.mark.parametrize(
"klass,func,msgs",
[
(
"BadDocstrings",
"private_classes",
(
"Private classes (NDFrame) should not be mentioned in public "
"docstrings",
),
),
(
"BadDocstrings",
"prefix_pandas",
(
"pandas.Series.rename in `See Also` section "
"does not need `pandas` prefix",
),
),
# Examples tests
(
"BadDocstrings",
"redundant_import",
("Do not import numpy, as it is imported automatically",),
),
(
"BadDocstrings",
"redundant_import",
("Do not import pandas, as it is imported automatically",),
),
(
"BadDocstrings",
"unused_import",
("flake8 error: F401 'pandas as pdf' imported but unused",),
),
(
"BadDocstrings",
"missing_whitespace_around_arithmetic_operator",
(
"flake8 error: "
"E226 missing whitespace around arithmetic operator",
),
),
(
"BadDocstrings",
"indentation_is_not_a_multiple_of_four",
# with flake8 3.9.0, the message ends with four spaces,
# whereas in earlier versions, it ended with "four"
("flake8 error: E111 indentation is not a multiple of 4",),
),
(
"BadDocstrings",
"missing_whitespace_after_comma",
("flake8 error: E231 missing whitespace after ',' (3 times)",),
),
],
)
def test_bad_docstrings(self, capsys, klass, func, msgs):
result = validate_docstrings.pandas_validate(
self._import_path(klass=klass, func=func)
)
for msg in msgs:
assert msg in " ".join(err[1] for err in result["errors"])
def test_validate_all_ignore_deprecated(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"warnings": [],
"examples_errors": "",
"deprecated": True,
},
)
result = validate_docstrings.validate_all(prefix=None, ignore_deprecated=True)
assert len(result) == 0
class TestApiItems:
@property
def api_doc(self):
return io.StringIO(
textwrap.dedent(
"""
.. currentmodule:: itertools
Itertools
---------
Infinite
~~~~~~~~
.. autosummary::
cycle
count
Finite
~~~~~~
.. autosummary::
chain
.. currentmodule:: random
Random
------
All
~~~
.. autosummary::
seed
randint
"""
)
)
@pytest.mark.parametrize(
"idx,name",
[
(0, "itertools.cycle"),
(1, "itertools.count"),
(2, "itertools.chain"),
(3, "random.seed"),
(4, "random.randint"),
],
)
def test_item_name(self, idx, name):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][0] == name
@pytest.mark.parametrize(
"idx,func",
[(0, "cycle"), (1, "count"), (2, "chain"), (3, "seed"), (4, "randint")],
)
def test_item_function(self, idx, func):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert callable(result[idx][1])
assert result[idx][1].__name__ == func
@pytest.mark.parametrize(
"idx,section",
[
(0, "Itertools"),
(1, "Itertools"),
(2, "Itertools"),
(3, "Random"),
(4, "Random"),
],
)
def test_item_section(self, idx, section):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][2] == section
@pytest.mark.parametrize(
"idx,subsection",
[(0, "Infinite"), (1, "Infinite"), (2, "Finite"), (3, "All"), (4, "All")],
)
def test_item_subsection(self, idx, subsection):
result = list(validate_docstrings.get_api_items(self.api_doc))
assert result[idx][3] == subsection
class TestPandasDocstringClass:
@pytest.mark.parametrize(
"name", ["pandas.Series.str.isdecimal", "pandas.Series.str.islower"]
)
def test_encode_content_write_to_file(self, name):
# GH25466
docstr = validate_docstrings.PandasDocstring(name).validate_pep8()
# the list of pep8 errors should be empty
assert not list(docstr)
class TestMainFunction:
def test_exit_status_for_main(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"pandas_validate",
lambda func_name: {
"docstring": "docstring1",
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"examples_errs": "",
},
)
exit_status = validate_docstrings.main(
func_name="docstring1",
prefix=None,
errors=[],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 0
def test_exit_status_errors_for_validate_all(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"docstring1": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"file": "module1.py",
"file_line": 23,
},
"docstring2": {
"errors": [("ER04", "err desc"), ("ER05", "err desc")],
"file": "module2.py",
"file_line": 925,
},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=[],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 5
def test_no_exit_status_noerrors_for_validate_all(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"docstring1": {"errors": [], "warnings": [("WN01", "warn desc")]},
"docstring2": {"errors": []},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=[],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 0
def test_exit_status_for_validate_all_json(self, monkeypatch):
print("EXECUTED")
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"docstring1": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
]
},
"docstring2": {"errors": [("ER04", "err desc"), ("ER05", "err desc")]},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=[],
output_format="json",
ignore_deprecated=False,
)
assert exit_status == 0
def test_errors_param_filters_errors(self, monkeypatch):
monkeypatch.setattr(
validate_docstrings,
"validate_all",
lambda prefix, ignore_deprecated=False: {
"Series.foo": {
"errors": [
("ER01", "err desc"),
("ER02", "err desc"),
("ER03", "err desc"),
],
"file": "series.py",
"file_line": 142,
},
"DataFrame.bar": {
"errors": [("ER01", "err desc"), ("ER02", "err desc")],
"file": "frame.py",
"file_line": 598,
},
"Series.foobar": {
"errors": [("ER01", "err desc")],
"file": "series.py",
"file_line": 279,
},
},
)
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=["ER01"],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 3
exit_status = validate_docstrings.main(
func_name=None,
prefix=None,
errors=["ER03"],
output_format="default",
ignore_deprecated=False,
)
assert exit_status == 1
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2019, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os
import tempfile
import unittest
import uuid
import pathlib
import pandas as pd
import qiime2.plugin
import qiime2.core.type
from qiime2 import Metadata
from qiime2.sdk import Artifact
from qiime2.sdk.result import ResultMetadata
from qiime2.plugin.model import ValidationError
import qiime2.core.archive as archive
from qiime2.core.testing.type import IntSequence1, FourInts, Mapping, SingleInt
from qiime2.core.testing.util import get_dummy_plugin, ArchiveTestingMixin
class TestArtifact(unittest.TestCase, ArchiveTestingMixin):
def setUp(self):
# Ignore the returned dummy plugin object, just run this to verify the
# plugin exists as the tests rely on it being loaded.
get_dummy_plugin()
# TODO standardize temporary directories created by QIIME 2
self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-')
self.provenance_capture = archive.ImportProvenanceCapture()
def tearDown(self):
self.test_dir.cleanup()
def test_private_constructor(self):
with self.assertRaisesRegex(
NotImplementedError,
'Artifact constructor.*private.*Artifact.load'):
Artifact()
# Note on testing strategy below: many of the tests for `_from_view` and
# `load` are similar, with the exception that when `load`ing, the
# artifact's UUID is known so more specific assertions can be performed.
# While these tests appear somewhat redundant, they are important because
# they exercise the same operations on Artifact objects constructed from
# different sources, whose codepaths have very different internal behavior.
# This internal behavior could be tested explicitly but it is safer to test
# the public API behavior (e.g. as a user would interact with the object)
# in case the internals change.
def test_from_view(self):
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
self.assertEqual(artifact.type, FourInts)
# We don't know what the UUID is because it's generated within
# Artifact._from_view.
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
# Can produce same view if called again.
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_from_view_different_type_with_multiple_view_types(self):
artifact = Artifact._from_view(IntSequence1, [42, 42, 43, -999, 42],
list, self.provenance_capture)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_from_view_and_save(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
# Using four-ints data layout because it has multiple files, some of
# which are in a nested directory.
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
artifact.save(fp)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_load(self):
saved_artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, FourInts)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_load_different_type_with_multiple_view_types(self):
saved_artifact = Artifact.import_data(IntSequence1,
[42, 42, 43, -999, 42])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_load_and_save(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact = Artifact.load(fp1)
# Overwriting its source file works.
artifact.save(fp1)
# Saving to a new file works.
artifact.save(fp2)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp1, root_dir, expected)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp2, root_dir, expected)
def test_roundtrip(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact1 = Artifact.load(fp1)
artifact1.save(fp2)
artifact2 = Artifact.load(fp2)
self.assertEqual(artifact1.type, artifact2.type)
self.assertEqual(artifact1.format, artifact2.format)
self.assertEqual(artifact1.uuid, artifact2.uuid)
self.assertEqual(artifact1.view(list),
artifact2.view(list))
# double view to make sure multiple views can be taken
self.assertEqual(artifact1.view(list),
artifact2.view(list))
def test_load_with_archive_filepath_modified(self):
# Save an artifact for use in the following test case.
fp = os.path.join(self.test_dir.name, 'artifact.qza')
Artifact.import_data(FourInts, [-1, 42, 0, 43]).save(fp)
# Load the artifact from a filepath then save a different artifact to
# the same filepath. Assert that both artifacts produce the correct
# views of their data.
#
# `load` used to be lazy, only extracting data when it needed to (e.g.
# when `save` or `view` was called). This was buggy as the filepath
# could have been deleted, or worse, modified to contain a different
# .qza file. Thus, the wrong archive could be extracted on demand, or
# the archive could be missing altogether. There isn't an easy
# cross-platform compatible way to solve this problem, so Artifact.load
# is no longer lazy and always extracts its data immediately. The real
# motivation for lazy loading was for quick inspection of archives
# without extracting/copying data, so that API is now provided through
# Artifact.peek.
artifact1 = Artifact.load(fp)
Artifact.import_data(FourInts, [10, 11, 12, 13]).save(fp)
artifact2 = Artifact.load(fp)
self.assertEqual(artifact1.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact2.view(list), [10, 11, 12, 13])
def test_extract(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp)
root_dir = str(artifact.uuid)
# pathlib normalizes away the `.`, it doesn't matter, but this is the
# implementation we're using, so let's test against that assumption.
output_dir = pathlib.Path(self.test_dir.name) / 'artifact-extract-test'
result_dir = Artifact.extract(fp, output_dir=output_dir)
self.assertEqual(result_dir, str(output_dir / root_dir))
expected = {
'VERSION',
'checksums.md5',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/citations.bib',
'provenance/action/action.yaml'
}
self.assertExtractedArchiveMembers(output_dir, root_dir, expected)
def test_peek(self):
artifact = Artifact.import_data(FourInts, [0, 0, 42, 1000])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact.save(fp)
metadata = Artifact.peek(fp)
self.assertIsInstance(metadata, ResultMetadata)
self.assertEqual(metadata.type, 'FourInts')
self.assertEqual(metadata.uuid, str(artifact.uuid))
self.assertEqual(metadata.format, 'FourIntsDirectoryFormat')
def test_import_data_invalid_type(self):
with self.assertRaisesRegex(TypeError,
'concrete semantic type.*Visualization'):
Artifact.import_data(qiime2.core.type.Visualization, self.test_dir)
with self.assertRaisesRegex(TypeError,
'concrete semantic type.*Visualization'):
Artifact.import_data('Visualization', self.test_dir)
def test_import_data_with_filepath_multi_file_data_layout(self):
fp = os.path.join(self.test_dir.name, 'test.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
with self.assertRaisesRegex(qiime2.plugin.ValidationError,
"FourIntsDirectoryFormat.*directory"):
Artifact.import_data(FourInts, fp)
def test_import_data_with_wrong_number_of_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
error_regex = ("Missing.*MappingDirectoryFormat.*mapping.tsv")
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(Mapping, data_dir)
def test_import_data_with_unrecognized_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('43\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('44\n')
with open(os.path.join(nested, 'foo.txt'), 'w') as fh:
fh.write('45\n')
error_regex = ("Unrecognized.*foo.txt.*FourIntsDirectoryFormat")
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(FourInts, data_dir)
def test_import_data_with_unreachable_path(self):
with self.assertRaisesRegex(qiime2.plugin.ValidationError,
"does not exist"):
Artifact.import_data(IntSequence1,
os.path.join(self.test_dir.name, 'foo.txt'))
with self.assertRaisesRegex(qiime2.plugin.ValidationError,
"does not exist"):
Artifact.import_data(FourInts,
os.path.join(self.test_dir.name, 'bar', ''))
def test_import_data_with_invalid_format_single_file(self):
fp = os.path.join(self.test_dir.name, 'foo.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
fh.write('43\n')
fh.write('abc\n')
fh.write('123\n')
error_regex = "foo.txt.*IntSequenceFormat.*\n\n.*Line 3"
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(IntSequence1, fp)
def test_import_data_with_invalid_format_multi_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('43\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('44\n')
with open(os.path.join(nested, 'file4.txt'), 'w') as fh:
fh.write('foo\n')
error_regex = "file4.txt.*SingleIntFormat.*\n\n.*integer"
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(FourInts, data_dir)
def test_import_data_with_good_validation_multi_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('1\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('1\n')
a = Artifact.import_data(SingleInt, data_dir)
self.assertEqual(1, a.view(int))
def test_import_data_with_bad_validation_multi_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('1\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('2\n')
error_regex = ("test.*RedundantSingleIntDirectoryFormat.*\n\n"
".*does not match")
with self.assertRaisesRegex(ValidationError, error_regex):
Artifact.import_data(SingleInt, data_dir)
def test_import_data_with_filepath(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
# Filename shouldn't matter for single-file case.
fp = os.path.join(data_dir, 'foo.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
fh.write('43\n')
fh.write('42\n')
fh.write('0\n')
artifact = Artifact.import_data(IntSequence1, fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [42, 43, 42, 0])
def test_import_data_with_directory_single_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
fp = os.path.join(data_dir, 'ints.txt')
with open(fp, 'w') as fh:
fh.write('-1\n')
fh.write('-2\n')
fh.write('10\n')
fh.write('100\n')
artifact = Artifact.import_data(IntSequence1, data_dir)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, -2, 10, 100])
def test_import_data_with_directory_multi_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('41\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('43\n')
with open(os.path.join(nested, 'file4.txt'), 'w') as fh:
fh.write('40\n')
artifact = Artifact.import_data(FourInts, data_dir)
self.assertEqual(artifact.type, FourInts)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [42, 41, 43, 40])
def test_eq_identity(self):
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
self.assertEqual(artifact, artifact)
def test_eq_same_uuid(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact1.save(fp)
artifact2 = Artifact.load(fp)
self.assertEqual(artifact1, artifact2)
def test_ne_same_data_different_uuid(self):
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact2 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
self.assertNotEqual(artifact1, artifact2)
def test_ne_different_data_different_uuid(self):
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact2 = Artifact.import_data(FourInts, [1, 2, 3, 4])
self.assertNotEqual(artifact1, artifact2)
def test_ne_subclass_same_uuid(self):
class ArtifactSubclass(Artifact):
pass
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact1 = ArtifactSubclass.import_data(FourInts, [-1, 42, 0, 43])
artifact1.save(fp)
artifact2 = Artifact.load(fp)
self.assertNotEqual(artifact1, artifact2)
self.assertNotEqual(artifact2, artifact1)
def test_ne_different_type_same_uuid(self):
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
class Faker:
@property
def uuid(self):
return artifact.uuid
faker = Faker()
self.assertNotEqual(artifact, faker)
def test_artifact_validate_max(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
A.validate()
self.assertTrue(True) # Checkpoint assertion
A.validate(level='max')
self.assertTrue(True) # Checkpoint assertion
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4, 5, 6, 7, 10])
with self.assertRaisesRegex(ValidationError, '3 more'):
A.validate('max')
def test_artifact_validate_min(self):
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
A.validate(level='min')
self.assertTrue(True) # Checkpoint assertion
A = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
A.validate(level='min')
self.assertTrue(True) # Checkpoint assertion
def test_artifact_validate_invalid_level(self):
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
with self.assertRaisesRegex(ValueError, 'peanut'):
A.validate(level='peanut')
def test_view_as_metadata(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
obs_md = A.view(Metadata)
exp_df = pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id', dtype=object),
dtype=object)
exp_md = Metadata(exp_df)
exp_md._add_artifacts([A])
self.assertEqual(obs_md, exp_md)
# This check is redundant because `Metadata.__eq__` being used above
# takes source artifacts into account. Doesn't hurt to have an explicit
# check though, since this API didn't always track source artifacts
# (this check also future-proofs the test in case `Metadata.__eq__`
# changes in the future).
self.assertEqual(obs_md.artifacts, (A,))
def test_cannot_be_viewed_as_metadata(self):
A = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
with self.assertRaisesRegex(TypeError,
'Artifact.*IntSequence1.*cannot be viewed '
'as QIIME 2 Metadata'):
A.view(Metadata)
if __name__ == '__main__':
unittest.main()
| |
#***********************************************************
#* Software License Agreement (BSD License)
#*
#* Copyright (c) 2010, CSIRO Autonomous Systems Laboratory
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions
#* are met:
#*
#* * Redistributions of source code must retain the above copyright
#* notice, this list of conditions and the following disclaimer.
#* * Redistributions in binary form must reproduce the above
#* copyright notice, this list of conditions and the following
#* disclaimer in the documentation and/or other materials provided
#* with the distribution.
#* * Neither the name of the CSIRO nor the names of its
#* contributors may be used to endorse or promote products derived
#* from this software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#***********************************************************
# Author: Brett Grandbois
#$Id$
import socket
import errno
from exceptions import *
class XPort(object):
'''Class representation of a socket transport thread.
Simplification wrapper around socket.socket and related objects.
The send and recv methods will wait for all requested data and/or
provide a filtered exception (something good, bad, or ugly happened).
'''
def __init__ (self, host, port):
''' XPort constructor, connect to requested peer.
Connect to requested network peer or raise reason why not.
@param host: hostname to connect to. can be name or IPv4 dot notation.
@type host: str
@param port: network port to connect to
@type port: int
@raise FatalExcetpion: Host/port does not exist or is unreachable.
@raise TimeoutException: Network is reachable but timeout on connect.
Likely host is not yet ready.
@raise InfoRestartException: Non-fatal network problem, able to re-try.
@raise ErrorRestartException: More serious network problem, possibly can
re-try.
'''
try:
self.__shutdown = False
self.__sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# don't want small messages lingering in the buffers
self.__sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# needed for just calling connect()? doesn't hurt to set it...
self.__sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# 5 second timeout should be more than sufficient
# in any case of this timeout happening we want the exception to
# force a restart
self.__sock.settimeout(5.0)
host_ip = socket.gethostbyname(host)
self.__sock.connect((host_ip, port))
# addressing error is fatal, it means a supplied host does not exist
# or can not be found
except (socket.herror, socket.gaierror), e:
raise FatalException('address exception in XPort init: %s' % e)
# in init a timeout means restart the system, most likely the LD-MRS
# isn't ready yet
except socket.timeout, e:
raise TimeoutException('timeout exceeded in host connect: %s' % e)
except socket.error, e:
fatals = [errno.ENETUNREACH, errno.EACCES, errno.EPERM, errno.EFAULT]
not_found = [errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.EHOSTDOWN,
errno.EALREADY]
if e.errno in fatals:
raise FatalException('fatal error in socket init sequence: %s'
% e)
elif e.errno in not_found:
raise InfoRestartException('no host found at %s:%d...continuing to try'
% (host, port))
else:
raise ErrorRestartException('error in socket init sequence: %s'
% e)
def recv(self, length):
''' Wait for data from the network peer.
Wait until all requested data from the network peer has arrived.
@param length: how much data is requested
@type length: int
@return: data from network peer
@rtype: str
@raise TimeoutException: no data has arrived in the timeout period
@raise InfoRestartException: remote peer has closed the socket, the
data request can not be fulfilled but able to reconnect to peer.
@raise NextMsgException: signal interruption, current data request
can not be fulfilled but new request can be issued
@raise ErrorRestartException: network problem reported and data request
can not be fulfilled. should reconnect to peer.
'''
data_recv = 0
return_string = ''
next_msg = [errno.EINTR, ]
info = [errno.ECONNRESET, errno.EPIPE]
while data_recv < length:
try:
data = self.__sock.recv(length - data_recv, socket.MSG_WAITALL)
except socket.timeout:
raise TimeoutException('timeout on socket.recv')
except socket.error, e:
if e.errno in next_msg:
raise NextMsgException()
elif e.errno in info:
raise InfoRestartException('socket.recv restarting xport: %s'
% e)
else:
raise ErrorRestartException('error in socket recv: %s' % e)
if not data:
raise InfoRestartException('socket.recv zero-length, likely a shutdown signal')
else:
return_string += data
data_recv += len(data)
return return_string
def send(self, cmd):
'''Send a data message to the network peer.
Send all requested message data.
@param cmd: data to send
@type cmd: str
@raise TimeoutException: data could not be sent during the timeout
period, likely the peer has gone down
@raise InfoRestartException: peer has closed connection or local shutdown
signal caught. either way current send can not be fulfilled.
@raise ErrorRestartException: network problem reported and data send
can not be fulfilled. should reconnect to peer.
'''
info = [errno.EPIPE, errno.ECONNRESET, errno.EINTR]
try:
self.__sock.sendall(cmd)
except socket.timeout:
raise TimeoutException('timeout on socket.sendall')
except socket.error, e:
if e.errno in info:
# EPIPE should be a shutdown signal
raise InfoRestartException('socket.sendall: %s' % e)
else:
raise ErrorRestartExcetpion('error in socket.sendall: %s' % e)
def shutdown(self):
''' Call socket.shutdown to pop out of any blocking send/recv '''
# this can be called from external callbacks as well
# as the main loop exit
if not self.__shutdown:
# call shutdown first to immediately terminate comms
# close will linger trying to flush any pending data
self.__shutdown = True
try:
self.__sock.shutdown(socket.SHUT_RDWR)
except socket.error:
# we're shutting down so don't care about socket level errors
pass
def close(self):
'''close the xport socket'''
try:
self.__sock.close()
except socket.error:
# we're shutting down so don't care about socket-level errors
pass
| |
import siconos.kernel as SK
import siconos.numerics as SN
import numpy as np
import matplotlib.pyplot as plt
## \brief Constructor
#
# \param is a (optional)
h = 1e-3
withPlot = True
class ZI(object):
def __init__(self, h, xk, theta, gamma, kappa, g):
self.xk = xk
self.h = h
self.theta = theta
self.gamma = gamma
self._kappa = kappa
self._g = g
self.Jacg = np.zeros((2, 4))
self.r = np.zeros((2,))
self.f_eval = 0
self.nabla_eval = 0
def compute_Fmcp(self, n, z, F):
l0 = 2.0*z[0] - 1.0
l1 = 2.0*z[2] - 1.0
r = self.computeg(l0, l1)
v_theta = ((1.0 - self.theta)*self.xk[1] + self.theta*(self.xk[1] + self.h*self.r[1]))
F[0] = self.xk[0] + self.h*v_theta + self.h*self.r[0] + z[1]
F[2] = self.xk[1] + self.h*self.r[1] + z[3]
F[1] = 1.0 - z[0]
F[3] = 1.0 - z[2]
self.f_eval += 1
pass
def compute_nabla_Fmcp(self, n, z, nabla_Fmcp):
l0 = 2.0*z[0] - 1.0
l1 = 2.0*z[2] - 1.0
Jacg = self.h*self.computeJacg(l0, l1)
nabla_Fmcp[0, :] = Jacg[0, :] + self.theta*self.h*Jacg[1, :]
nabla_Fmcp[0, 1] += 1.0
nabla_Fmcp[1, 0] = -1.0
nabla_Fmcp[1, 1:] = 0.0
nabla_Fmcp[2, :] = Jacg[1, :]
nabla_Fmcp[2, 3] += 1.0
nabla_Fmcp[3, :] = 0.0
nabla_Fmcp[3, 2] = -1.0
self.nabla_eval += 1
pass
def computeg(self, l0, l1):
self.r[1] = self._g*l0/(1.0 - self._kappa*l0*l1)
v_gamma = ((1.0 - self.gamma)*self.xk[1] + self.gamma*(self.xk[1] + self.h*self.r[1]))
self.r[0] = -self._kappa*l0*l1*(v_gamma)
#print(R)
#print('computeg done')
return self.r
def computeJacg(self, l0, l1):
#print('call computeJacglambda')
#print(B)
rr = self.computeg(l0, l1)
v_gamma = ((1.0 - self.gamma)*self.xk[1] + self.gamma*(self.xk[1] + self.h*rr[1]))
B = self.Jacg
B[1, 0] = 2.0*self._g/(1-self._kappa*l0*l1)**2
B[1, 2] = 2.0*(self._g*self._kappa*l0**2)/(1.0-self._kappa*l0*l1)**2
B[0, 0] = -2.0*self._kappa*l1*(v_gamma) - self.gamma*self.h*self._kappa*l0*l1*B[1, 0]
B[0, 2] = -2.0*self._kappa*l0*(v_gamma) - self.gamma*self.h*self._kappa*l0*l1*B[1, 2]
return B
if __name__ == '__main__':
xk = np.array((1., 10.))
T = 4.0
t = 0.0
z = np.zeros((4,))
w = np.empty((4,))
kappa = 0.41
g = 9.81
theta = 1.0
gamma = 1.0
zi_syst = ZI(h, xk, theta, gamma, kappa, g)
mcp = SN.MixedComplementarityProblem2(0, 4, zi_syst)
SO=SN.SolverOptions(mcp, SN.SICONOS_MCP_NEWTON_FBLSA)
SO.dparam[0] = 1e-24
SO.iparam[0] = 150
SO.iparam[3] = 2
SO.iparam[4] = 10
N = int(T/h + 10)
print(N)
lambdaPM = np.empty((N, 4))
signs = np.empty((N, 2))
sol = np.empty((N, 2))
sol[0, :] = xk
k = 0
while t <= T:
k += 1
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
#info = SN.mcp_newton_FBLSA(mcp, z, w, SO)
#print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
if info > 0:
zi_syst.compute_Fmcp(0, 4, z, w)
sol[k, 0] = w[0] - z[1]
sol[k, 1] = w[2] - z[3]
if sol[k, 0] < -1e-7 and np.abs(z[1]) < 1e-10:
z[1] = -sol[k, 0]
z[0] = 1.0
if xk[1] < -1e-7 and np.abs(z[3]) < 1e-10:
z[3] = -sol[k, 1]
z[2] = 1.0
if z[1] < -1e-7:
z[1] = 0.0
z[0] = 0.0
if z[3] < -1e-7:
z[3] = 0.0
z[2] = 0.0
if z[1] > 1e-7 and z[0] < 1.0 - 1e-7:
z[0] = 1.0
if z[3] > 1e-7 and z[2] < 1.0 - 1e-7:
z[2] = 1.0
info = SN.mcp_newton_minFBLSA(mcp, z, w, SO)
print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
if info >0:
print('MCP solver failed ! info = {:}'.format(info))
print(xk)
print(z)
print(w)
# else:
# print('iter {:} ; solver iter = {:} ; prec = {:}'.format(k, SO.iparam[1], SO.dparam[1]))
zi_syst.compute_Fmcp(0 ,4, z, w)
sol[k, 0] = w[0] - z[1]
sol[k, 1] = w[2] - z[3]
xk[:] = sol[k, :]
signs[k, 0] = z[0] - w[1]
signs[k, 1] = z[2] - w[3]
t = k*h
#z[:] = 0.0
print('f_eval', zi_syst.f_eval, 'nabla_eval', zi_syst.nabla_eval)
pos = np.abs(sol[:, 0])
velocity = (1 - kappa*np.sign(sol[:, 0]*sol[:, 1]))*sol[:, 1]*np.sign(sol[:, 0])
if True:
np.savetxt("dataZIsol.txt", sol)
np.savetxt("dataZIlambdaPM.txt", lambdaPM)
np.savetxt("dataZIsign.txt", signs)
np.savetxt("dataZIpos.txt", pos)
np.savetxt("dataZIvel.txt", velocity)
if withPlot:
plt.figure()
plt.plot(sol[:, 0], sol[:, 1], 'b-*')
plt.figure()
plt.plot(sol[:, 0])
plt.plot(sol[:, 1])
plt.figure()
plt.plot(signs[:, 0])
plt.plot(signs[:, 1])
plt.show()
plt.subplot(311)
plt.title('position')
plt.plot(pos)
plt.grid()
plt.subplot(312)
plt.title('velocity')
plt.plot(velocity)
plt.grid()
# plt.subplot(313)
# plt.title('control input')
# plt.plot(dataPlot[:,0], control)
# plt.grid()
plt.show()
# indx = np.nonzero(dataPlot[:, 0]>30)
# ttt = dataPlot[indx, 0].flatten()
#
# plt.subplot(311)
# plt.title('position')
# plt.plot(ttt, pos[indx])
# plt.grid()
# plt.subplot(312)
# plt.title('velocity')
# plt.plot(ttt, velocity[indx])
# plt.grid()
## plt.subplot(313)
## plt.title('control input')
## plt.plot(ttt, control[indx])
# plt.grid()
# plt.show()
| |
"""Provide estimates of sample purity and subclonal copy number using THetA.
Identifying cellularity and subclonal populations within somatic calling using
tumor normal pairs.
https://github.com/raphael-group/THetA
"""
import os
import sys
import subprocess
import pybedtools
import pysam
import toolz as tz
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.variation import vcfutils
from bcbio.provenance import do
from bcbio.structural import cnvkit, ensemble
def run(vrn_info, cnvs_by_name, somatic_info):
"""Run THetA analysis given output from CNV caller on a tumor/normal pair.
"""
cmd = _get_cmd("RunTHeTA.py")
if not cmd:
logger.info("THetA scripts not found in current PATH. Skipping.")
else:
work_dir = _sv_workdir(somatic_info.tumor_data)
assert "cnvkit" in cnvs_by_name, "THetA requires CNVkit calls"
cnv_info = cnvkit.export_theta(cnvs_by_name["cnvkit"], somatic_info.tumor_data)
cnv_info["theta_input"] = subset_by_supported(cnv_info["theta_input"], _theta_to_coords,
cnvs_by_name, work_dir, somatic_info.tumor_data)
return _run_theta(cnv_info, somatic_info.tumor_data, work_dir, run_n3=False)
def _theta_to_coords(line):
_, chrom, start, end = line.split()[:4]
return (chrom, start, end)
def subset_by_supported(input_file, get_coords, calls_by_name, work_dir, data,
headers=("#",)):
"""Limit CNVkit input to calls with support from another caller.
get_coords is a function that return chrom, start, end from a line of the
input_file, allowing handling of multiple input file types.
"""
support_files = [(c, tz.get_in([c, "vrn_file"], calls_by_name))
for c in ensemble.SUBSET_BY_SUPPORT["cnvkit"]]
support_files = [(c, f) for (c, f) in support_files if f and vcfutils.vcf_has_variants(f)]
if len(support_files) == 0:
return input_file
else:
out_file = os.path.join(work_dir, "%s-havesupport%s" %
utils.splitext_plus(os.path.basename(input_file)))
if not utils.file_uptodate(out_file, input_file):
input_bed = _input_to_bed(input_file, work_dir, get_coords, headers)
pass_coords = set([])
with file_transaction(data, out_file) as tx_out_file:
support_beds = " ".join([_sv_vcf_to_bed(f, c, out_file) for c, f in support_files])
tmp_cmp_bed = "%s-intersectwith.bed" % utils.splitext_plus(tx_out_file)[0]
cmd = "bedtools intersect -wa -f 0.5 -r -a {input_bed} -b {support_beds} > {tmp_cmp_bed}"
do.run(cmd.format(**locals()), "Intersect CNVs with support files")
for r in pybedtools.BedTool(tmp_cmp_bed):
pass_coords.add((str(r.chrom), str(r.start), str(r.stop)))
with open(input_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
passes = True
if not line.startswith(headers):
passes = get_coords(line) in pass_coords
if passes:
out_handle.write(line)
return out_file
def _sv_vcf_to_bed(orig_vcf, caller, base_out):
out_file = "%s-inputcmp-%s.bed" % (utils.splitext_plus(base_out)[0], caller)
if not utils.file_exists(out_file):
with pysam.VariantFile(orig_vcf) as bcf_in:
with open(out_file, "w") as out_handle:
for rec in bcf_in:
if len(rec.filter.keys()) == 0 or rec.filter.keys()[0] in ["PASS", "."]:
out_handle.write("%s\t%s\t%s\n" % (rec.chrom, rec.start, rec.info.get("END", rec.start)))
return out_file
def _input_to_bed(theta_input, work_dir, get_coords, headers):
"""Convert input file to a BED file for comparisons
"""
theta_bed = os.path.join(work_dir, "%s.bed" % os.path.splitext(os.path.basename(theta_input))[0])
with open(theta_input) as in_handle:
with open(theta_bed, "w") as out_handle:
for line in in_handle:
if not line.startswith(headers):
chrom, start, end = get_coords(line)
out_handle.write("\t".join([chrom, start, end]) + "\n")
return theta_bed
def _run_theta(cnv_info, data, work_dir, run_n3=True):
"""Run theta, calculating subpopulations and normal contamination.
"""
out = {"caller": "theta"}
max_normal = "0.9"
opts = ["-m", max_normal]
n2_result = _safe_run_theta(cnv_info["theta_input"], os.path.join(work_dir, "n2"), ".n2.results",
["-n", "2"] + opts, data)
if n2_result:
out["estimate"] = n2_result
if run_n3:
n2_bounds = "%s.withBounds" % os.path.splitext(n2_result)[0]
n3_result = _safe_run_theta(n2_bounds, os.path.join(work_dir, "n3"), ".n3.results",
["-n", "3", "--RESULTS", n2_result] + opts,
data)
if n3_result:
best_result = _select_model(n2_bounds, n2_result, n3_result,
os.path.join(work_dir, "n3"), data)
out["estimate"] = best_result
out["cnvs"] = _merge_theta_calls(n2_bounds, best_result, cnv_info["vrn_file"], data)
return out
def _update_with_calls(result_file, cnv_file):
"""Update bounds with calls from CNVkit, inferred copy numbers and p-values from THetA.
"""
results = {}
with open(result_file) as in_handle:
in_handle.readline() # header
_, _, cs, ps = in_handle.readline().strip().split()
for i, (c, p) in enumerate(zip(cs.split(":"), ps.split(","))):
results[i] = (c, p)
cnvs = {}
with open(cnv_file) as in_handle:
for line in in_handle:
chrom, start, end, _, count = line.rstrip().split()[:5]
cnvs[(chrom, start, end)] = count
def update(i, line):
parts = line.rstrip().split("\t")
chrom, start, end = parts[1:4]
parts += cnvs.get((chrom, start, end), ".")
parts += list(results[i])
return "\t".join(parts) + "\n"
return update
def _merge_theta_calls(bounds_file, result_file, cnv_file, data):
"""Create a final output file with merged CNVkit and THetA copy and population estimates.
"""
out_file = "%s-merged.txt" % (result_file.replace(".BEST.results", ""))
if not utils.file_uptodate(out_file, result_file):
with file_transaction(data, out_file) as tx_out_file:
updater = _update_with_calls(result_file, cnv_file)
with open(bounds_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
i = 0
for line in in_handle:
if line.startswith("#"):
parts = line.rstrip().split("\t")
parts += ["cnv", "pop_cnvs", "pop_pvals"]
out_handle.write("\t".join(parts) + "\n")
else:
out_handle.write(updater(i, line))
i += 1
return out_file
def _select_model(n2_bounds, n2_result, n3_result, out_dir, data):
"""Run final model selection from n=2 and n=3 options.
"""
n2_out_file = n2_result.replace(".n2.results", ".BEST.results")
n3_out_file = n3_result.replace(".n3.results", ".BEST.results")
if not utils.file_exists(n2_out_file) and not utils.file_exists(n3_out_file):
cmd = _get_cmd("ModelSelection.py") + [n2_bounds, n2_result, n3_result]
do.run(cmd, "Select best THetA model")
if utils.file_exists(n2_out_file):
return n2_out_file
else:
assert utils.file_exists(n3_out_file)
return n3_out_file
def _safe_run_theta(input_file, out_dir, output_ext, args, data):
"""Run THetA, catching and continuing on any errors.
"""
out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext)
skip_file = out_file + ".skipped"
if utils.file_exists(skip_file):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_dir) as tx_out_dir:
utils.safe_makedir(tx_out_dir)
cmd = _get_cmd("RunTHetA.py") + args + \
[input_file, "--NUM_PROCESSES", dd.get_cores(data),
"--FORCE", "-d", tx_out_dir]
try:
do.run(cmd, "Run THetA to calculate purity", log_error=False)
except subprocess.CalledProcessError, msg:
if ("Number of intervals must be greater than 1" in str(msg) or
"This sample isn't a good candidate for THetA analysis" in str(msg)):
with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle:
out_handle.write("Expected TheTA failure, skipping")
return None
else:
raise
return out_file
def _split_theta_ext(fname):
base = os.path.splitext(os.path.basename(fname))[0]
if base.endswith((".n2", ".n3")):
base = os.path.splitext(base)[0]
return base
def _sv_workdir(data):
return utils.safe_makedir(os.path.join(data["dirs"]["work"], "heterogeneity",
dd.get_sample_name(data), "theta"))
def _get_cmd(cmd):
"""Retrieve required commands for running THetA with our local bcbio python.
"""
check_cmd = "RunTHetA.py"
try:
local_cmd = subprocess.check_output(["which", check_cmd]).strip()
except subprocess.CalledProcessError:
return None
return [sys.executable, "%s/%s" % (os.path.dirname(os.path.realpath(local_cmd)), cmd)]
| |
#!/usr/bin/python
# __*__ coding: utf8 __*__
#import string
#import copy
#import math
#import random
from model_ngbr import model_ngbr
#--- some functions --------------------------------------------------
# periodic boundary conditions
def bound(x, y):
if x > y/2.: return x-y
if x < -y/2. : return x+y
return x
def bound_v(a, b, c):
return [((x-y)+0.5*z)%z-0.5*z for x,y,z in zip(a,b,c) ]
#-------------------------------------------------------------
# vector product of two vectors
def vec_prod(a,b):
v=[]
v.append(a[1]*b[2]-a[2]*b[1])
v.append(a[2]*b[0]-a[0]*b[2])
v.append(a[0]*b[1]-a[1]*b[0])
return v
#------------------------------------------------------------
# dot product of two vectors (3)
def dot_prod(a,b):
return a[0]*b[0]+a[1]*b[1]+a[2]*b[2]
#------------------------------------------------------------
def sistema(a,b,c):
'''solution of linear system by Krammers method '''
d1=a[1]*b[2]-a[2]*b[1]
d2=a[2]*b[0]-a[0]*b[2]
d3=a[0]*b[1]-a[1]*b[0]
dt1=-a[3]*b[0]+a[0]*b[3]
dt2= a[3]*b[1]-a[1]*b[3]
dt3= a[3]*b[2]-a[2]*b[3]
det=d1*c[0]+d2*c[1]+d3*c[2]
if (abs(det) < 1e-12):
# print 'det=',det
return
v=[]
v.append( (c[3]*d1-c[1]*dt3+c[2]*dt2)/det /2. )
v.append( (c[0]*dt3-c[3]*(-d2)+c[2]*dt1)/det /2. )
v.append( (-c[0]*dt2-c[1]*dt1+c[3]*d3)/det /2.)
return v
#--------------------------------------------------------
def sistema_del(r,a,b,c):
u=[(i*i-dot_prod(ii,ii)-r[0]*r[0])/2. for i,ii in zip(r[1:],[a,b,c])] +[0]
dr=[ ii-r[0] for ii in r[1:] ] +[0]
x=[-a[0],-b[0],-c[0]]
y=[-a[1],-b[1],-c[1]]
z=[-a[2],-b[2],-c[2]]
q=[ dot_prod(dr,vec_prod(i,j)) for i,j in [(y,z),(z,x),(x,y)] ]
w=[ dot_prod(u,vec_prod(i,j)) for i,j in [(y,z),(z,x),(x,y)] ]
det= dot_prod(x,vec_prod(y,z))
det2=det*det
E=(dot_prod(q,q)-det2)
G=(dot_prod(q,w)-r[0]*det2)
F=(dot_prod(w,w)-(r[0]*r[0])*det2)
DIS=G*G-E*F
if DIS<=0: return []
DIS=pow(DIS,0.5)
v=[]
RAD=(-G-DIS)/E
if RAD >0 :
v.append( (RAD,[(x*RAD+y)/det for x,y in zip(q,w)] ) )
RAD=(-G+DIS)/E
if RAD >0 :
v.append( (RAD,[(x*RAD+y)/det for x,y in zip(q,w)] ) )
return v
#---------------------------------------------------
def fnd_del(r,a,b=None,c=None):
if b==None:
return [a[3]**0.5-r[0]-r[1],0,[0,0,0]]
if c==None:
c=vec_prod(a,b)
for rrr,v in sistema_del(r,a,b,c):
v2=dot_prod(v,v)
return [v2,rrr,v]
return [3e20,3e10,[1e10,1e10,1e10]]
def fnd_vor(a,b=None,c=None):
if b==None:
return [a[3],0,[0,0,0]]
if c==None:
c=vec_prod(a,b)
c.append( dot_prod(a,c))
v=sistema(a,b,c)
if v== None: v=[1e10,1e10,1e10]
v2=dot_prod(v,v)
return [v2,v2**0.5,v]
#===================================================================
class model_voronoi(model_ngbr):
# --------------------------------------------------------------------
def __init__(self,d={}):
model_ngbr.__init__(self,d)
#=========== Make radii =========================================
def make_radii(self):
print "RAD go"
def f_rad(v2,r):
if r==0: return pow(v2,0.5)/2.
return pow(v2,0.5)-r
rad=[0 for x in self.atoms]
sos=[-1 for x in self.atoms]
while not all(tuple(rad)):
for i in range(len(self.atoms)):
if rad[i]==0:
rm,jat=min( (f_rad(vec[3],rad[i]),i) for i,vec in self.ngbr_it(i))
sos[i]=jat
if rad[jat]<>0:
rad[i]=rm
elif sos[jat]==i:
rad[i]=rm
rad[jat]=rm
## check
# sos=[0 for x in self.atoms]
# for i in range(len(self.atoms)):
# ng=self.ngbr.index[i]
# for ke in ng.keys():
# d=pow(ng[ke][3],0.5)-rad[ke]-rad[i]
# if abs(d)<1e-7: sos[i] = 1
## if d<0: print 'bad radii',i,ke,rad[i],rad[ke],ng[ke][3],d
# x=reduce(lambda x,y: x*y, sos)
# if x<>1: print 'bad x',x
#
# print "RAD done",x
return rad
#------------------------------------------------------------------
def del123(self,iat):
ir = self.legend.index('rad')
r0=[self.atoms[iat][ir]]
q,rrr,q1,vec0,jat = min(( fnd_del(r0+[self.atoms[i][ir]],vec)+[vec,i] for i,vec in self.ngbr_it(iat) ))
r0+=[self.atoms[jat][ir]]
q,rrr,q1,vec1,kat=min(( fnd_del(r0+[self.atoms[i][ir]],vec0,vec)+[vec,i] for i,vec in self.ngbr_it(iat) )) # second
r0+=[self.atoms[kat][ir]]
q,rrr,vec2,mat=min(( fnd_del(r0+[self.atoms[i][ir]],vec0,vec1,vec)+[i] for i,vec in self.ngbr_it(iat) )) # second
return [iat,jat,kat,mat],vec2,rrr
def vor123(self,iat):
q,rrr,q1,vec0,jat = min(( fnd_vor(vec)+[vec,i] for i,vec in self.ngbr_it(iat) ))
q,rrr,q1,vec1,kat=min(( fnd_vor(vec0,vec)+[vec,i] for i,vec in self.ngbr_it(iat) )) # second
q,rrr,vec2,mat=min(( fnd_vor(vec0,vec1,vec)+[i] for i,vec in self.ngbr_it(iat) )) # second
return [iat,jat,kat,mat],vec2,rrr
#=========================================================================
def vor_next(self,at,vert):
vc=self.vc
ix=self.legend.index('x')
crd_abs=self.atoms[at[0]][ix:ix+3]
# known vertex in relative coordinates
point= [ ((x-y)+0.5*z)%z-0.5*z for x,y,z in zip(vert[4:7],crd_abs,vc) ]
ng=self.ngbr.index[at[0]]
a=ng[at[1]]
b=ng[at[2]]
#++++
d1=a[1]*b[2]-a[2]*b[1]
d2=a[2]*b[0]-a[0]*b[2]
d3=a[0]*b[1]-a[1]*b[0]
dt1=-a[3]*b[0]+a[0]*b[3]
dt2= a[3]*b[1]-a[1]*b[3]
dt3= a[3]*b[2]-a[2]*b[3]
#+++++
old=[ i for i in vert[0:4] if i not in at][0] # old, opposit atom
vo=ng[old]
min=1e9
for ik,c in self.ngbr.index[at[0]].iteritems():
if ik in vert[0:4]: continue
#+++++++++
det=d1*c[0]+d2*c[1]+d3*c[2]
if (abs(det) < 1e-12):
continue
v=[ (c[3]*d1-c[1]*dt3+c[2]*dt2)/det /2. ]
v.append( (c[0]*dt3-c[3]*(-d2)+c[2]*dt1)/det /2. )
v.append( (-c[0]*dt2-c[1]*dt1+c[3]*d3)/det /2.)
#+++++++++
# print v,vo
if 2*dot_prod(v,vo) > vo[3]: continue # atom opposit to old one
dv=sum((x-y)*(x-y) for x,y in zip(v,point))
if dv<min:
min=dv # nearest to known vertex
mat=ik
vec=v
rrr=dot_prod(vec,vec)**0.5
vec_abs= [ (x+y)%z for x,y,z in zip(vec,crd_abs,vc) ] # vertex in absolute coordinates
return mat,vec_abs,rrr
#===================================================================
def del_next(self,at,vert):
att=self.atoms
vc=self.vc
ix = self.legend.index('x')
crd_abs=att[at[0]][ix:ix+3]
# known vertex in relative coordinates
point= [ ((x-y)+0.5*z)%z-0.5*z for x,y,z in zip(vert[4:7],crd_abs,vc) ]
ng=self.ngbr.index[at[0]]
a=ng[at[1]]
b=ng[at[2]]
ng1=self.ngbr.index[at[1]].keys()
ng2=self.ngbr.index[at[2]].keys()
old=[ i for i in vert[0:4] if i not in at][0] # old, opposit atom
vo=ng[old]
ir = self.legend.index('rad')
r0=[ att[at[0]][ir],att[at[1]][ir],att[at[2]][ir] ]
# print 'as1'
min=1e9
mat=0
for ik,c in self.ngbr_it(at[0]):
if ik in vert[0:4]: continue
if ik not in ng1: continue
if ik not in ng2: continue
v1=sistema_del(r0+[att[ik][ir]],a,b,c)
if v1== None: continue
for rrr,v in v1:
if sum((x-y)**2 for x,y in zip(v,vo))**0.5-rrr-att[old][ir] >=0:
dv=sum((x-y)*(x-y) for x,y, in zip(v,point) )
if dv<min:
min=dv # nearest to known vertex
mat=ik
vec=v+[rrr]
vec_abs= [ (x+y)%z for x,y,z in zip(vec,crd_abs,vc) ] # vertex in absolute coordinates
return mat,vec_abs,vec[3]
#======= Make Voronoi =====================================================
# construct Voronoi mesh
def make_voronoi(self,iat=1):
""" constructs Voronoi mesh
"""
#---------------------------------------------------------
vc=self.vc
ngbr=self.ngbr.index
ix=self.legend.index('x')
print "Voronoi mesh go"
try:
self.legend.index('rad')
fnd123=self.del123
fnd_next=self.del_next
print 'Delone triangulation'
except:
fnd123=self.vor123
fnd_next=self.vor_next
print 'Voronoi polihedra'
at,vec2,rrr=fnd123(iat)
# print at
index_back=at
index = [-1 for x in self.atoms]
index[at[0]]=0
index[at[1]]=1
index[at[2]]=2
index[at[3]]=3
# convert to real coordinates
vec_abs= [ (x+y)%v for x,y,v in zip(vec2,self.atoms[iat][ix:ix+3],vc) ]
vertexes=[at+vec_abs+[dot_prod(vec2,vec2)]]
self.edges = [ [0,1,2,0],[0,1,3,0],[0,2,3,0],[1,2,3,0] ]
ne=0
while ne <> len(self.edges):
# print ne,len(self.edges)
ed=self.edges[ne]
if len(ed) < 5:
vert=vertexes[ed[3]]
at=[index_back[ed[x]] for x in range(3)]
mat,cr_abs,rrr = fnd_next(at,vert)
#atom, coord[0:3], edge as vector[0:3]
if index[mat] == -1: # new atom
index[mat]=len(index_back)
index_back.append(mat)
# add vertex, add edges
nver=len(vertexes)
vertexes += [at+[mat]+cr_abs+[rrr]]
self.edges[ne] += [nver] # edge -- 2 vertexes
self.add_edge([ed[0],ed[2],index[mat]],nver,ne)
self.add_edge([ed[1],ed[2],index[mat]],nver,ne)
self.add_edge([ed[0],ed[1],index[mat]],nver,ne)
ne += 1
#check fxor n22 and n3
for i in xrange(len(self.edges)):
ed=self.edges[i]
if len(ed)==5: continue
if len(ed)==7:
at=[index_back[x] for x in ed[0:3]]
crd_abs=self.atoms[at[0]][ix:ix+3]
ng=ngbr[at[0]]
c=vec_prod(ng[at[1]],ng[at[2]])
v = ( vertexes[x][4:7] for x in ed[3:7] )
v1= ( dot_prod(c,bound_v(x,crd_abs,vc)) for x in v )
s=sorted([(j,i) for i,j in enumerate(v1)])
ed1=ed[0:3]+[ ed[3+s[0][1]],ed[3+s[1][1]] ]
ed2=ed[0:3]+[ ed[3+s[2][1]],ed[3+s[3][1]] ]
self.edges[i]=ed1
self.edges.insert(i,ed2)
ne +=1
continue
print 'bad delone!!!!'
self.index=index
self.index_back=index_back
self.vertexes=vertexes
print 'Voronoi done',ne
return
#<<<<<<<<<<<<<< construction is done, find parameters >>>>>>>>>>>>>>>>>>
# calculate V, S distribution
def get_voronoi_param(self):
index=self.index
index_back=self.index_back
ix = self.legend.index('x')
sq=[0. for x in self.atoms]
vol=[0. for x in self.atoms]
pl_mv=[{} for x in self.atoms]
for ed in self.edges:
# if len(ed) <> 5: print ed,'bad!!!'
# make plates for each MV
at=[ index_back[x] for x in ed[0:3]]
crd_abs=self.atoms[at[0]][ix:ix+3]
w= [bound(x-y,z) for x,y,z in zip(self.vertexes[ed[3]][4:7],crd_abs,self.vc) ]
w1= [bound(x-y,z) for x,y,z in zip(self.vertexes[ed[4]][4:7],crd_abs,self.vc) ]
nv=[x-y for x,y in zip(w1,w)] #ed[5:9]
nv+=[dot_prod(nv,nv)]
ng=self.ngbr.index[at[0]]
a=ng[at[1]] # to 1-st ngbr
b=ng[at[2]] # to 2-nd ngbr
ab=vec_prod(a,b)
# print ab,w,nv
alf = - dot_prod(ab,w) / dot_prod(ab,nv)
# center of triangle - alf*nv+w
# r2 - square of radius of external circle
r2=alf*alf*nv[3]+dot_prod(w,w)+2*alf*dot_prod(nv,w)
# squares of length
l=[a[3]/4.,b[3]/4.,(a[3] + b[3]-2*dot_prod(a,b))/4.]
# perpendiculars from sides of triangle to the center of circle
try:
h=[pow(abs(r2-x),0.5) for x in l]
except (ValueError,):
print r2,l
raise
# if angle >90, should substract the value
if l[0]>l[1]+l[2]: h[0] *= -1
if l[1]>l[0]+l[2]: h[1] *= -1
if l[2]>l[0]+l[1]: h[2] *= -1
# print 'h',h
# squares of plates
s= [x*pow(nv[3],0.5)/2. for x in h]
# volumes
v= [x*pow(y,0.5)/3. for x,y in zip(s,l)]
pl_mv[at[0]][at[1]] = pl_mv[at[0]].get(at[1],[0,0.,0.])
pl_mv[at[0]][at[2]] = pl_mv[at[0]].get(at[2],[0,0.,0.])
pl_mv[at[1]][at[2]] = pl_mv[at[1]].get(at[2],[0,0.,0.])
pl_mv[at[0]][at[1]][0] +=1
pl_mv[at[0]][at[2]][0] +=1
pl_mv[at[1]][at[2]][0] +=1
pl_mv[at[0]][at[1]][1] += s[0]
pl_mv[at[0]][at[2]][1] += s[1]
pl_mv[at[1]][at[2]][1] += s[2]
pl_mv[at[0]][at[1]][2] += v[0]
pl_mv[at[0]][at[2]][2] += v[1]
pl_mv[at[1]][at[2]][2] += v[2]
pl_mv[at[1]][at[0]] = pl_mv[at[0]][at[1]]
pl_mv[at[2]][at[0]] = pl_mv[at[0]][at[2]]
pl_mv[at[2]][at[1]] = pl_mv[at[1]][at[2]]
sq[at[0]] += s[0]+s[1]
sq[at[1]] += s[0]+s[2]
sq[at[2]] += s[1]+s[2]
vol[at[0]] += v[0]+v[1]
vol[at[1]] += v[0]+v[2]
vol[at[2]] += v[1]+v[2]
for i in range(len(pl_mv)):
for j in pl_mv[i]:
pl_mv[i][j].append(pow(self.ngbr.index[i][j][3], 0.5))
k_sph = [36.*3.1416*x*x/(y*y*y) for x,y in zip(vol,sq)]
print "Voronoi done"
return sq,vol,k_sph,pl_mv
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# calculate T, O, S, N parameters
def get_delone_param(self):
index=self.index
index_back=self.index_back
vert=self.vertexes
tet=[]
oct=[]
sov=[]
n_par=[0 for x in vert]
n_test=[0 for x in vert]
for ver in self.vert:
l=[aat[ver[i]][ver[j]][3]**0.5 for i,j in [(0,1),(0,2),(0,3),(1,2),(1,3),(2,3)] ]
l0=sum(l)/6
t = sum((x-y)**2 for x,y in zip(l,l))/15./l0/l0
l.sort()
o = sum((x-l[6]/pow(2,0.5))**2 for x in l[0:6])/5/l0/l0
o1 = sum((l[i]-l[5])**2 for i in l[0:5])
o1 += sum((l[i]-l[4])**2 for i in l[0:4])
o1 += sum((l[i]-l[3])**2 for i in l[0:3])
o1 += sum((l[i]-l[2])**2 for i in l[0:2])
o1 += (l[0]-l[1])**2
o = o1/10/l0/l0 +o
tet +=[t]
oct +=[o]
sov += [min(t/0.018,o/0.030)]
for ed in self.edges:
v1=ed[3]
v2=ed[4]
v=map(to_center, vert[v1][4:7],vert[v2][4:7],self.vc)
r=pow(dot_prod(v,v),0.5)-vert[v1][7]-vert[v2][7]
n_test[v1] +=1
n_test[v2] +=1
if r <0:
n_par[v1] +=1
n_par[v2] +=1
print "Delone parameter done"
return n_par,n_test,tet,oct,sov
def ngbr_it(self,iat,r=None,part=''):
filt={}
filt['gt']=lambda x,y: x>y
filt['ge']=lambda x,y: x>=y
filt['lt']=lambda x,y: x<y
filt['le']=lambda x,y: x<=y
filt['ne']=lambda x,y: x<>y
filt['']=lambda x,y: 1==1
ff=filt[part]
if hasattr(self,'ngbr'):
for k,vec in self.ngbr.index[iat].iteritems():
if ff(k,iat):
yield k,vec
else:
if not hasattr(self,'ngbr_short'): self.make_ngbr_short(r)
for k in self.ngbr_short.index[iat]:
if ff(k,iat):
yield k,[None,None,None,None]
#<<<<<<<<<<<<<< >>>>>>>>>>>>>>>>>>
def get_voronoi_ngbr(self):
index=self.index
index_back=self.index_back
ix = self.legend.index('x')
pl_mv=[[] for x in self.atoms]
for ed in self.edges:
at=[ index_back[x] for x in ed[0:3]]
if pl_mv[at[0]].count(at[1])==0:
pl_mv[at[0]].append(at[1])
pl_mv[at[1]].append(at[0])
if pl_mv[at[0]].count(at[2])==0:
pl_mv[at[0]].append(at[2])
pl_mv[at[2]].append(at[0])
if pl_mv[at[1]].count(at[2])==0:
pl_mv[at[1]].append(at[2])
pl_mv[at[2]].append(at[1])
return pl_mv
#<<<<<<<<<<<<<< >>>>>>>>>>>>>>>>>>
def get_voronoi_ngbr1(self,iat=1):
index=self.index
index_back=self.index_back
ix = self.legend.index('x')
jat=self.index[iat]
# print jat,iat
ind0=[]
ed0=[]
for ed in self.edges:
if not jat in ed[0:3]: continue
v1=ed[3]
v2=ed[4]
if not v1 in ind0: ind0.append(v1)
n1=ind0.index(v1)
if not v2 in ind0: ind0.append(v2)
n2=ind0.index(v2)
ed0.append([n1,n2])
vec0=zip(*[self.vertexes[i][4:7] for i in ind0])
ind1=[iat]
ed1=[]
for ed in self.edges:
if not jat in ed[0:3]: continue
a=[index_back[i] for i in ed[0:3]]
for i in a:
if not i in ind1: ind1.append(i)
nn=sorted(ind1.index(i) for i in a)
# print nn
if not [nn[0],nn[1]] in ed1:
ed1.append([nn[0],nn[1]])
if not [nn[0],nn[2]] in ed1:
ed1.append([nn[0],nn[2]])
if not [nn[1],nn[2]] in ed1:
ed1.append([nn[1],nn[2]])
vec1=zip(*[self.atoms[i][ix:ix+3] for i in ind1])
return ind0,ed0,vec0
# --------------------------------------------------------------------
# Fast insert, ne elements have sorted
def add_edge(self,ed,ver,ne):
#----------------------
def bigger(a,b):
if a[0] <> b[0]:
return a[0] - b[0]
if a[1] <> b[1]:
return a[1] - b[1]
if a[2] <> b[2]:
return a[2] - b[2]
return 0
#---------------------
ed.sort()
nmin=ne # already sorted, don't touch
nmax=len(self.edges)-1
if bigger(self.edges[nmax],ed) <0 : # insert at left side
self.edges.insert(nmax+1,ed+[ver])
return
while nmax-nmin > 1: # bisection method
ind=(nmax+nmin)/2
b=bigger(self.edges[ind],ed)
if b ==0:
self.edges[ind].append(ver) # known edge, add 2-nd vertex
return
if b > 0:
nmax=ind
else:
nmin=ind
if bigger(self.edges[nmax],ed) ==0 : # known edge, add 2-nd vertex
self.edges[nmax].append(ver)
return
if bigger(self.edges[nmin],ed) ==0 : # known edge, add 2-nd vertex
self.edges[nmin].append(ver)
return
# insert in nmin+1
self.edges.insert(nmax,ed+[ver]) # add new edge
#************************************************************************
if __name__=='__main__': #run as programm
from model_io import mod_lammps_dump
print 'go'
mod=model_voronoi(mod_lammps_dump('dump.lmp'))
mod.make_verlet(5)
mod.make_ngbr(5,'ne')
# rad=mod.make_radii()
# mod.add_prop(rad,leg_st='rad',for_st='f')
mod.make_voronoi()
mod.get_voronoi_param()
| |
import os
from os.path import realpath, dirname
import struct
import itertools
import functools
import ctypes
from ctypes import byref, WINFUNCTYPE, HRESULT, WinError
from simple_com import COMInterface, IDebugOutputCallbacksVtable
import resource_emulation
import driver_upgrade
from driver_upgrade import DU_MEMALLOC_IOCTL, DU_KCALL_IOCTL, DU_OUT_IOCTL, DU_IN_IOCTL
import windows
import windows.hooks
import windows.winproxy as winproxy
from windows.generated_def.winstructs import *
from dbgdef import *
from dbgtype import DbgEngType
# Based on the trick used in PRAW
# http://stackoverflow.com/a/22023805
IS_SPHINX_BUILD = bool(os.environ.get('SPHINX_BUILD', '0'))
# The COM Interfaces we need for the LocalKernelDebugger
class IDebugClient(COMInterface):
_functions_ = {
"QueryInterface": ctypes.WINFUNCTYPE(HRESULT, PVOID, PVOID)(0, "QueryInterface"),
"AddRef": ctypes.WINFUNCTYPE(HRESULT)(1, "AddRef"),
"Release": ctypes.WINFUNCTYPE(HRESULT)(2, "Release"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff538145%28v=vs.85%29.aspx
"AttachKernel": ctypes.WINFUNCTYPE(HRESULT, ULONG, c_char_p)(3, "AttachKernel"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff541851%28v=vs.85%29.aspx
"DetachProcesses": WINFUNCTYPE(HRESULT)(25, "DetachProcesses"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff543004%28v=vs.85%29.aspx
"EndSession": WINFUNCTYPE(HRESULT, c_ulong)(26, "EndSession"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff556751%28v=vs.85%29.aspx
"SetOutputCallbacks": ctypes.WINFUNCTYPE(HRESULT, c_void_p)(34, "SetOutputCallbacks"),
}
class IDebugDataSpaces(COMInterface):
_functions_ = {
"QueryInterface": ctypes.WINFUNCTYPE(HRESULT, PVOID, PVOID)(0, "QueryInterface"),
"AddRef": ctypes.WINFUNCTYPE(HRESULT)(1, "AddRef"),
"Release": ctypes.WINFUNCTYPE(HRESULT)(2, "Release"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff554359%28v=vs.85%29.aspx
"ReadVirtual": WINFUNCTYPE(HRESULT, ULONG64, PVOID, ULONG, PULONG)(3, "ReadVirtual"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff561468%28v=vs.85%29.aspx
"WriteVirtual": WINFUNCTYPE(HRESULT, ULONG64, PVOID, ULONG, PULONG)(4, "WriteVirtual"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff554310%28v=vs.85%29.aspx
"ReadPhysical": WINFUNCTYPE(HRESULT, ULONG64, PVOID, ULONG, PULONG)(10, "ReadPhysical"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff561432%28v=vs.85%29.aspx
"WritePhysical": WINFUNCTYPE(HRESULT, ULONG64, PVOID, ULONG, PULONG)(11, "WritePhysical"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff553573%28v=vs.85%29.aspx
"ReadIo": WINFUNCTYPE(HRESULT, ULONG, ULONG, ULONG, ULONG64, PVOID, ULONG, PULONG)(14, "ReadIo"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff561402%28v=vs.85%29.aspx
"WriteIo": WINFUNCTYPE(HRESULT, ULONG, ULONG, ULONG, ULONG64, PVOID, ULONG, PULONG)(15, "WriteIo"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff554289%28v=vs.85%29.aspx
"ReadMsr": WINFUNCTYPE(HRESULT, ULONG, PULONG64)(16, "ReadMsr"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff561424%28v=vs.85%29.aspx
"WriteMsr": WINFUNCTYPE(HRESULT, ULONG, ULONG64)(17, "WriteMsr"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff553519%28v=vs.85%29.aspx
"ReadBusData": WINFUNCTYPE(HRESULT, ULONG, ULONG, ULONG, ULONG, PVOID, ULONG, PULONG)(18, "ReadBusData"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff561371%28v=vs.85%29.aspx
"WriteBusData": WINFUNCTYPE(HRESULT, ULONG, ULONG, ULONG, ULONG, PVOID, ULONG, PULONG)(19, "WriteBusData"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff554326%28v=vs.85%29.aspx
"ReadProcessorSystemData": WINFUNCTYPE(HRESULT, ULONG, ULONG, PVOID, ULONG, PULONG)(22, "ReadProcessorSystemData"),
}
class IDebugDataSpaces2(COMInterface):
_functions_ = dict(IDebugDataSpaces._functions_)
_functions_.update({
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff560335%28v=vs.85%29.aspx
"VirtualToPhysical": WINFUNCTYPE(HRESULT, ULONG64, PULONG64)(23, "VirtualToPhysical"),
})
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff550856%28v=vs.85%29.aspx
class IDebugSymbols(COMInterface):
_functions_ = {
"QueryInterface": ctypes.WINFUNCTYPE(HRESULT, PVOID, PVOID)(0, "QueryInterface"),
"AddRef": ctypes.WINFUNCTYPE(HRESULT)(1, "AddRef"),
"Release": ctypes.WINFUNCTYPE(HRESULT)(2, "Release"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff556798%28v=vs.85%29.aspx
"SetSymbolOption": WINFUNCTYPE(HRESULT, ctypes.c_ulong)(6, "SetSymbolOption"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff547183%28v=vs.85%29.aspx
"GetNameByOffset": WINFUNCTYPE(HRESULT, ULONG64, PVOID, ULONG, PULONG, PULONG64)(7, "GetNameByOffset"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff548035%28v=vs.85%29.aspx
"GetOffsetByName": WINFUNCTYPE(HRESULT, c_char_p, POINTER(ctypes.c_uint64))(8, "GetOffsetByName"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff547927%28v=vs.85%29.aspx
"GetNumberModules": WINFUNCTYPE(HRESULT, LPDWORD, LPDWORD)(12, "GetNumberModules"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff547080%28v=vs.85%29.aspx
"GetModuleByIndex": WINFUNCTYPE(HRESULT, DWORD, PULONG64)(13, "GetModuleByIndex"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff547146%28v=vs.85%29.aspx
"GetModuleNames": WINFUNCTYPE(HRESULT, DWORD, c_uint64,
PVOID, DWORD, LPDWORD, PVOID, DWORD, LPDWORD, PVOID, DWORD, LPDWORD)(16, "GetModuleNames"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff549408%28v=vs.85%29.aspx
"GetTypeName": WINFUNCTYPE(HRESULT, ULONG64, ULONG, PVOID, ULONG, PULONG)(19, "GetTypeName"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff549376%28v=vs.85%29.aspx
"GetTypeId": WINFUNCTYPE(HRESULT, ULONG64, c_char_p, PULONG)(20, "GetTypeId"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff549457%28v=vs.85%29.aspx
"GetTypeSize": WINFUNCTYPE(HRESULT, ULONG64, ULONG, PULONG)(21, "GetTypeSize"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff546763%28v=vs.85%29.aspx
"GetFieldOffset": WINFUNCTYPE(HRESULT, ULONG64, ULONG, c_char_p, PULONG)(22, "GetFieldOffset"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff549173%28v=vs.85%29.aspx
"GetSymbolTypeId": WINFUNCTYPE(HRESULT, c_char_p, PULONG, PULONG64)(23, "GetSymbolTypeId"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff558815%28v=vs.85%29.aspx
"StartSymbolMatch": WINFUNCTYPE(HRESULT, c_char_p, PULONG64)(36, "StartSymbolMatch"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff547856%28v=vs.85%29.aspx
"GetNextSymbolMatch": WINFUNCTYPE(HRESULT, ULONG64, PVOID, ULONG, PULONG, PULONG64)(37, "GetNextSymbolMatch"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff543008%28v=vs.85%29.aspx
"EndSymbolMatch": WINFUNCTYPE(HRESULT, ULONG64)(38, "EndSymbolMatch"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff554379%28v=vs.85%29.aspx
"Reload": WINFUNCTYPE(HRESULT, c_char_p)(39, "Reload"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff556802%28v=vs.85%29.aspx
"SetSymbolPath": WINFUNCTYPE(HRESULT, c_char_p)(41, "SetSymbolPath"),
}
class IDebugSymbols2(COMInterface):
_functions_ = dict(IDebugSymbols._functions_)
_functions_.update({
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff546747%28v=vs.85%29.aspx
"GetFieldName": WINFUNCTYPE(HRESULT, ULONG64, ULONG, ULONG, PVOID, ULONG, PULONG)(55, "GetFieldName"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff546771%28v=vs.85%29.aspx
"GetFieldTypeAndOffset": WINFUNCTYPE(HRESULT, ULONG64, ULONG, c_char_p, PULONG, PULONG)(105, "GetFieldTypeAndOffset"),
})
class IDebugSymbols3(COMInterface):
_functions_ = dict(IDebugSymbols2._functions_)
_functions_.update({
})
class IDebugControl(COMInterface):
_functions_ = {
"QueryInterface": ctypes.WINFUNCTYPE(HRESULT, PVOID, PVOID)(0, "QueryInterface"),
"AddRef": ctypes.WINFUNCTYPE(HRESULT)(1, "AddRef"),
"Release": ctypes.WINFUNCTYPE(HRESULT)(2, "Release"),
"GetInterrupt": ctypes.WINFUNCTYPE(HRESULT)(3, "GetInterrupt"),
"SetInterrupt": ctypes.WINFUNCTYPE(HRESULT, ULONG)(4, "SetInterrupt"),
"GetExecutionStatus": ctypes.WINFUNCTYPE(HRESULT, PULONG)(49, "GetExecutionStatus"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff543208%28v=vs.85%29.aspx
"Execute": ctypes.WINFUNCTYPE(HRESULT, ULONG, c_char_p, ULONG)(66, "Execute"),
# https://msdn.microsoft.com/en-us/library/windows/hardware/ff561229%28v=vs.85%29.aspx
"WaitForEvent": WINFUNCTYPE(HRESULT, DWORD, DWORD)(93, "WaitForEvent")
}
class DummyIATEntry(ctypes.Structure):
_fields_ = [
("value", DWORD)]
@classmethod
def create(cls, addr, dll, name):
self = cls.from_address(addr)
self.addr = addr
self.hook = None
self.nonhookvalue = windows.utils.get_func_addr(dll, name)
return self
def set_hook(self, callback, types=None):
hook = windows.hooks.IATHook(self, callback, types)
self.hook = hook
hook.enable()
return hook
@windows.hooks.GetModuleFileNameWCallback
def EmulateWinDBGName(hModule, lpFilename, nSize, real_function):
if hModule is not None:
return real_function()
ptr_addr = ctypes.cast(lpFilename, ctypes.c_void_p).value
v = (c_char * 100).from_address(ptr_addr)
path = "C:\\windbg.exe"
path_wchar = "\x00".join(path) + "\x00\x00\x00"
v[0:len(path_wchar)] = path_wchar
return len(path_wchar)
def require_upgraded_driver(f):
if IS_SPHINX_BUILD:
if f.__doc__.strip().startswith("| "):
nextline = ""
else:
nextline = "| "
# Strip all leading space for rst parsing by sphinx
new_doc = "| <require upgraded driver>\n" + nextline + f.__doc__ + "\n"
new_doc = "\n".join([l.strip() for l in new_doc.split("\n")])
f.__doc__ = new_doc
return f
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if not hasattr(self, 'upgrader') or not self.upgrader.is_upgraded:
raise ValueError('Cannot call {0} without upgraded driver'.format(f.__name__))
return f(self, *args, **kwargs)
return wrapper
# experimental decorator
# Just used to inform you that we are not sur if this code really works
# and that we are currently working on it
# (yes dbgengine type API is not simple nor complete)
def experimental(f):
if IS_SPHINX_BUILD:
f._do_not_generate_doc = True
return f
class LocalKernelDebuggerBase(object):
DEBUG_DLL_PATH = None
DRIVER_FILENAME = None
DRIVER_RESOURCE = None
# Will be used if '_NT_SYMBOL_PATH' is not set
DEFAULT_SYMBOL_PATH = "SRV*{0}\\symbols*http://msdl.microsoft.com/download/symbols".format(realpath(dirname(__file__)))
SYMBOL_OPT = None
def __init__(self, quiet=True):
self.quiet = quiet
self._output_string = ""
self._output_callback = None
self._load_debug_dll()
self.DebugClient = self._do_debug_create()
self._do_kernel_attach()
self._ask_other_interface()
self._setup_symbols_options()
self.set_output_callbacks(self._standard_output_callback)
self._wait_local_kernel_connection()
self._load_modules_syms()
self.reload()
self._init_dbghelp_func()
self._upgrade_driver()
def _setup_driver_resource(self, dbgengmod, k32import):
raise NotImplementedError("_setup_driver_resource")
def _setup_name_imposture(self, dbgengmod, k32import):
raise NotImplementedError("_setup_name_imposture")
# Change current process name via GetModuleFileNameW hook
def _setup_windbg_imposture(self):
dbgengmod = [i for i in windows.current_process.peb.modules if i.name == "dbgeng.dll"][0]
k32import = dbgengmod.pe.imports['kernel32.dll']
self._setup_driver_resource(dbgengmod, k32import)
self._setup_name_imposture(dbgengmod, k32import)
def _do_kernel_attach(self):
self._setup_windbg_imposture()
res = self.DebugClient.AttachKernel(DEBUG_ATTACH_LOCAL_KERNEL, None)
if res:
raise WinError(res)
def _load_debug_dll(self):
self.hmoduledbghelp = winproxy.LoadLibraryA(self.DEBUG_DLL_PATH + "dbghelp.dll")
self.hmoduledbgeng = winproxy.LoadLibraryA(self.DEBUG_DLL_PATH + "dbgeng.dll")
self.hmodulesymsrv = winproxy.LoadLibraryA(self.DEBUG_DLL_PATH + "symsrv.dll")
def _do_debug_create(self):
DebugClient = IDebugClient(0)
DebugCreateAddr = winproxy.GetProcAddress(self.hmoduledbgeng, "DebugCreate")
DebugCreate = WINFUNCTYPE(HRESULT, PVOID, PVOID)(DebugCreateAddr)
DebugCreate(IID_IDebugClient, byref(DebugClient))
return DebugClient
def _ask_other_interface(self):
DebugClient = self.DebugClient
self.DebugDataSpaces = IDebugDataSpaces2(0)
self.DebugSymbols = IDebugSymbols3(0)
self.DebugControl = IDebugControl(0)
DebugClient.QueryInterface(IID_IDebugDataSpaces2, ctypes.byref(self.DebugDataSpaces))
DebugClient.QueryInterface(IID_IDebugSymbols3, ctypes.byref(self.DebugSymbols))
DebugClient.QueryInterface(IID_IDebugControl, ctypes.byref(self.DebugControl))
def _wait_local_kernel_connection(self):
self.DebugControl.WaitForEvent(0, 0xffffffff)
return True
def _setup_symbols_options(self):
try:
symbol_path = os.environ['_NT_SYMBOL_PATH']
except KeyError:
symbol_path = self.DEFAULT_SYMBOL_PATH
self.DebugSymbols.SetSymbolPath(symbol_path)
self.DebugSymbols.SetSymbolOption(self.SYMBOL_OPT)
def get_number_modules(self):
"""Get the number of loaded and unloaded modules
:returns: Number of loaded, unloaded modules -- int, int
"""
numModulesLoaded = DWORD(0)
numModulesUnloaded = DWORD(0)
self.DebugSymbols.GetNumberModules(byref(numModulesLoaded), byref(numModulesUnloaded))
return (numModulesLoaded.value, numModulesUnloaded.value)
def get_module_by_index(self, i):
"""Get the base of module number **i**"""
currModuleBase = ULONG64(0)
self.DebugSymbols.GetModuleByIndex(i, byref(currModuleBase))
return self.trim_ulong64_to_address(currModuleBase.value)
def get_module_name_by_index(self, i):
"""Get the name of module number **i**"""
currModuleBase = ULONG64(0)
currModuleName = (c_char * 1024)()
currImageName = (c_char * 1024)()
currLoadedImageName = (c_char * 1024)()
currModuleNameSize = DWORD(0)
currImageNameSize = DWORD(0)
currLoadedImageNameSize = DWORD(0)
self.DebugSymbols.GetModuleByIndex(i, byref(currModuleBase))
self.DebugSymbols.GetModuleNames(i, currModuleBase, byref(currImageName), 1023, byref(currImageNameSize),
byref(currModuleName), 1023, byref(currModuleNameSize), byref(currLoadedImageName),
1023, byref(currLoadedImageNameSize))
return (currImageName.value, currModuleName.value, currLoadedImageName.value)
def _load_modules_syms(self):
currModuleName = (c_char * 1024)()
currImageName = (c_char * 1024)()
currLoadedImageName = (c_char * 1024)()
currModuleNameSize = DWORD(0)
currImageNameSize = DWORD(0)
currLoadedImageNameSize = DWORD(0)
currModuleBase = ULONG64(0)
numModulesLoaded = DWORD(0)
numModulesUnloaded = DWORD(0)
self.DebugSymbols.GetNumberModules(byref(numModulesLoaded), byref(numModulesUnloaded))
for i in range(numModulesLoaded.value):
self.DebugSymbols.GetModuleByIndex(i, byref(currModuleBase))
self.DebugSymbols.GetModuleNames(i, currModuleBase, byref(currImageName), 1023, byref(currImageNameSize),
byref(currModuleName), 1023, byref(currModuleNameSize), byref(currLoadedImageName),
1023, byref(currLoadedImageNameSize))
self.DebugSymbols.Reload(currModuleName[:currModuleNameSize.value])
# TODO: SymGetTypeInfo goto winfunc?
def _init_dbghelp_func(self):
# We need to hack our way to some dbghelp functions
# Some info are not reachable through COM API
# 0xf0f0f0f0 is the magic handler used by dbgengine for dbghelp
dbghelp = ctypes.windll.DbgHelp
SymGetTypeInfoPrototype = WINFUNCTYPE(BOOL, HANDLE, DWORD64, ULONG, IMAGEHLP_SYMBOL_TYPE_INFO, PVOID)
SymGetTypeInfoParams = ((1, 'hProcess'), (1, 'ModBase'), (1, 'TypeId'), (1, 'GetType'), (1, 'pInfo'))
self.SymGetTypeInfo_ctypes = SymGetTypeInfoPrototype(("SymGetTypeInfo", dbghelp), SymGetTypeInfoParams)
# Internal helper
def resolve_symbol(self, symbol):
"""| Return **symbol** if it's an :class:`int` else resolve it using :func:`get_symbol_offset`
| Used by functions to either accept an :class:`int` or a windbg :class:`Symbol`"""
if isinstance(symbol, (int, long)):
return symbol
x = self.get_symbol_offset(symbol)
if x is None:
raise ValueError("Unknow symbol <{0}>".format(symbol))
return x
def resolve_type(self, imodule, itype):
"""| Return **imodule** and **itype** if they are an :class:`int` else
| resolve then using respectively :func:`get_symbol_offset` and :func:`get_type_id`
| Used by functions about types to either accept :class:`int` or windbg :class:`Symbol`
"""
module = self.resolve_symbol(imodule)
if isinstance(itype, (int, long)):
return self.expand_address_to_ulong64(module), itype
try:
type = self.get_type_id(module, itype)
except WindowsError:
raise ValueError("Unkown type: <{0}!{1}>".format(imodule, itype))
return self.expand_address_to_ulong64(module), type
def _get_kldbgdrv_handle(self):
if not hasattr(self, "_kldbgdrv_handle"):
self._kldbgdrv_handle = windows.winproxy.CreateFileA("\\\\.\\kldbgdrv", GENERIC_READ | GENERIC_WRITE, FILE_SHARE_WRITE | FILE_SHARE_READ)
return self._kldbgdrv_handle
# Actual Interface
def execute(self, str, to_string=False):
r"""| Execute a windbg command
| if **to_string** is False, use the current output callback
| (see :file:`example\\output_demo.py`)
"""
if to_string:
old_output = self._output_callback
self._init_string_output_callback()
self.DebugControl.Execute(0, str, 0)
if to_string:
if old_output is None:
old_output = self._standard_output_callback
self.set_output_callbacks(old_output)
return self._output_string
return None
def _standard_output_callback(self, x, y, msg):
if not self.quiet:
print msg,
return 0
def _init_string_output_callback(self):
self._output_string = ""
self.set_output_callbacks(self._string_output_callback)
def _string_output_callback(self, x, y, msg):
self._output_string += msg
return 0
def set_output_callbacks(self, callback):
r"""| Register a new output callback, that must respect the interface of
| :func:`IDebugOutputCallbacks::Output` `<https://msdn.microsoft.com/en-us/library/windows/hardware/ff550815%28v=vs.85%29.aspx>`_.
| (see :file:`example\\output_demo.py`)
"""
self._output_callback = callback
my_idebugoutput_vtable = IDebugOutputCallbacksVtable.create_vtable(Output=callback)
my_debugoutput_obj = ctypes.pointer(my_idebugoutput_vtable)
res = self.DebugClient.SetOutputCallbacks(ctypes.addressof(my_debugoutput_obj))
# Need to keep reference to these object else our output callback will be
# garbage collected leading to crash
# Update self.keep_alive AFTER the call to SetOutputCallbacks because
# SetOutputCallbacks may call methods of the old my_debugoutput_obj
self.keep_alive = [my_idebugoutput_vtable, my_debugoutput_obj]
return res
def get_modules(self):
"""Return a list of (currModuleName, currImageName, currLoadedImageName)"""
self.reload("")
currModuleName = (c_char * 1024)()
currImageName = (c_char * 1024)()
currLoadedImageName = (c_char * 1024)()
currModuleNameSize = DWORD(0)
currImageNameSize = DWORD(0)
currLoadedImageNameSize = DWORD(0)
currModuleBase = ULONG64(0)
numModulesLoaded = DWORD(0)
numModulesUnloaded = DWORD(0)
self.DebugSymbols.GetNumberModules(byref(numModulesLoaded), byref(numModulesUnloaded))
res = []
for i in range(numModulesLoaded.value):
self.DebugSymbols.GetModuleByIndex(i, byref(currModuleBase))
self.DebugSymbols.GetModuleNames(i, c_uint64(currModuleBase.value), byref(currImageName), 1023, byref(currImageNameSize),
byref(currModuleName), 1023, byref(currModuleNameSize), byref(currLoadedImageName),
1023, byref(currLoadedImageNameSize))
# Removing trailing \x00
res.append((currModuleName[:currModuleNameSize.value - 1], currImageName[:currImageNameSize.value - 1], currLoadedImageName[:currLoadedImageNameSize.value - 1]))
return res
def reload(self, module_to_reload=""):
"""Reload a module or all modules if **module_to_reload** is not specified"""
return self.DebugSymbols.Reload(module_to_reload)
def detach(self):
"""End the Debugging session and detach the COM interface"""
self.DebugClient.EndSession(DEBUG_END_PASSIVE)
self.DebugClient.DetachProcesses()
self.DebugClient.Release()
del self.DebugClient
self.DebugSymbols.Release()
del self.DebugSymbols
self.DebugControl.Release()
del self.DebugControl
self.DebugDataSpaces.Release()
del self.DebugDataSpaces
def current_processor(self):
""":returns: The number of the processor we are currently on -- :class:`int`"""
return windows.winproxy.GetCurrentProcessorNumber()
def set_current_processor(self, proc_nb):
"""Set the processor we want to be executed on
:param proc_nb: the number of the processor
:type proc_nb: int"""
return windows.winproxy.SetThreadAffinityMask(dwThreadAffinityMask=(1 << proc_nb))
def number_processor(self):
""":returns: The number of processors on the machine -- :class:`int`"""
return self.read_dword("nt!KeNumberProcessors")
def on_each_processor(self):
"""Iter execution on every processor
:yield: current processor number"""
for nb_proc in self.number_processor():
self.set_current_processor(nb_proc)
yield nb_proc
# type stuff
@experimental
def get_type(self, module, typeid):
module, typeid = self.resolve_type(module, typeid)
return DbgEngType(module, typeid, self)
def get_type_id(self, module, type_name):
"""Get the typeid of a type
:param module: the module containing the type
:type module: Symbol
:param type_name: the name of the type
:type type_name: str
:rtype: int"""
module = self.resolve_symbol(module)
res = ULONG(0)
self.DebugSymbols.GetTypeId(self.expand_address_to_ulong64(module), type_name, byref(res))
return res.value
def get_symbol_type_id(self, symtype):
"""Get the module and typeid of a symbol
:param symtype: the name of the type
:type symtype: str
:rtype: int, int -- module ID, type ID"""
typeid = ULONG(0)
module = ULONG64(0)
self.DebugSymbols.GetSymbolTypeId(symtype, byref(typeid), byref(module))
return (module.value, typeid.value)
def get_field_offset(self, module, typeid, field):
"""Get the offset of a field in a type
:rtype: int"""
module, typeid = self.resolve_type(module, typeid)
res = ULONG(0)
self.DebugSymbols.GetFieldOffset(module, typeid, field, byref(res))
return res.value
def get_type_name(self, module, typeid):
"""Get the name of a type
:rtype: str"""
module, typeid = self.resolve_type(module, typeid)
buffer_size = 1024
buffer = (c_char * buffer_size)()
name_size = ULONG(0)
self.DebugSymbols.GetTypeName(module, typeid, byref(buffer), buffer_size, byref(name_size))
res = buffer[:name_size.value]
if res[-1] == "\x00":
res = res[:-1]
return res
def get_type_size(self, module, typeid):
"""Get the size of a type
:rtype: int"""
module, typeid = self.resolve_type(module, typeid)
res = ULONG(0)
self.DebugSymbols.GetTypeSize(module, typeid, byref(res))
return res.value
def get_field_name(self, module, typeid, fieldindex):
"""Get the name of a field in a type
:param fieldindex: Index of the field to retrieve
:type fieldindex: int
:rtype: int"""
module, typeid = self.resolve_type(module, typeid)
buffer_size = 1024
buffer = (c_char * buffer_size)()
name_size = ULONG(0)
self.DebugSymbols.GetFieldName(module, typeid, fieldindex, byref(buffer), buffer_size, byref(name_size))
res = buffer[:name_size.value]
if res[-1] == "\x00":
res = res[:-1]
return res
def get_field_type_and_offset(self, module, typeid, fieldname):
"""Get the type and the offset of a field in a type
:param fieldname: The name of the field we want
:type fieldname: str
:rtype: int, int -- type ID, field offset"""
module, typeid = self.resolve_type(module, typeid)
fieldtypeid = ULONG(0)
fieldoffset = ULONG(0)
self.DebugSymbols.GetFieldTypeAndOffset(module, typeid, fieldname, byref(fieldtypeid), byref(fieldoffset))
return fieldtypeid.value, fieldoffset.value
# Custom type functions, it seems that COM api does not return all informations
# we may need to directly call Dbghelp
@experimental
def get_all_field_generator(self, module, typeid):
for i in itertools.count(0):
try:
name = self.get_field_name(module, typeid, i)
except WindowsError:
if i == 0: # Empty struct: Error in call args
raise
return
yield name
@experimental
def get_all_field(self, module, typeid):
return list(self.get_all_field_generator(module, typeid))
@experimental
def get_all_field_type_and_offset(self, module, typeid):
fields = self.get_all_field(module, typeid)
return [(f,) + self.get_field_type_and_offset(module, typeid, f) for f in fields]
# def old_tst(self, module, typeid):
# for name, type, offset in self.get_all_field_type_and_offset(module, typeid):
# type_name = self.get_type_name(module, type)
# #print("+{0} {1}: {2}({3})".format(hex(offset), name, type_name, type))
# if type_name.endswith("[]"):
# try:
# sub_module, sub_type = self.resolve_type(module, type_name[:-2]) # Get the subtype if not a basic C type
# size_of_elt = self.get_type_size(sub_module, sub_type)
# is_base_type = False
# except ValueError: # If it's an array of basic type
# sub_module, sub_type = self.get_symbol_type_id(type_name[:-2])
# size_of_elt = self.get_type_size(sub_module, sub_type)
# is_base_type = True
# nb_elt = self.get_type_size(module, type) / size_of_elt
# print("+{0} {1}: {2}({3})".format(hex(offset), name, type_name, type))
# print "ARRAY of {0} | {1}".format(nb_elt, "BASE_TYPE" if is_base_type else "NO BASE TYPE")
#
# if not is_base_type:
# print("PARSING {0}".format(type_name[:-2]))
# self.tst(sub_module, sub_type)
# # use kdbg.get_symbol_type_id("nt!_KPRCB.HalReserved[0]") ?
# # or look at ctypes.windll.Dbghelp.SymGetTypeInfo ?
# def tst(self, module, typeid):
# "Create Ctypes"
# next_offset = 0
# last_offset = -1
# for name, type, offset in self.get_all_field_type_and_offset(module, typeid):
# type_name = self.get_type_name(module, type)
#
# if offset == 0x2dec:
# print("----")
# print("+{0} {1}: {2}({3})".format(hex(offset), name, type_name, type))
# x = self.SymGetTypeInfo(module, type, TI_GET_BITPOSITION)
# print("TI_GET_BITPOSITION -> {0}".format(x))
# x = self.SymGetTypeInfo(module, type, TI_GET_LENGTH)
# print("TI_GET_LENGTH -> {0}".format(x))
#
# #if type_name.endswith("[]"):
# # print("-------")
# # print("+{0} {1}: {2}({3})".format(hex(offset), name, type_name, type))
# # sub_type = self.SymGetTypeInfo(module, type, TI_GET_TYPE)
# # print("ARRAY OF {0}".format(self.get_type_name(module, sub_type)))
#
# x = self.SymGetTypeInfo(module, type, TI_GET_BITPOSITION)
# #print(x)
# if x:
# print("-------")
# print("+{0} {1}: {2}({3})".format(hex(offset), name, type_name, type))
# print("BITFIELD POS {0}".format(x))
#
#
# # HANDLE ARRAY
# #if type_name.endswith("[]"):
# # try:
# # sub_module, sub_type = self.resolve_type(module, type_name[:-2]) # Get the subtype if not a basic C type
# # size_of_elt = self.get_type_size(sub_module, sub_type)
# # is_base_type = False
# # except ValueError: # If it's an array of basic type
# # sub_module, sub_type = self.get_symbol_type_id(type_name[:-2])
# # size_of_elt = self.get_type_size(sub_module, sub_type)
# # is_base_type = True
# # nb_elt = self.get_type_size(module, type) / size_of_elt
# # print("+{0} {1}: {2}({3})".format(hex(offset), name, type_name, type))
# # print "ARRAY of {0} | {1}".format(nb_elt, "BASE_TYPE" if is_base_type else "NO BASE TYPE")
# # # use kdbg.get_symbol_type_id("nt!_KPRCB.HalReserved[0]") ?
# # # or look at ctypes.windll.Dbghelp.SymGetTypeInfo ?
# Low level DbgHelp queries
@experimental
def SymGetTypeInfo(self, module, typeid, GetType, ires=None):
# If not: result is a DWORD
res = ires
if res is None:
if GetType == TI_FINDCHILDREN or GetType == TI_GET_VALUE:
raise NotImplementedError("SymGetTypeInfo with GetType == TI_FINDCHILDREN need struct passed as argument")
result_type = {
TI_GET_SYMNAME: c_wchar_p, TI_GET_LENGTH: ULONG64,
TI_GET_ADDRESS: ULONG64, TI_GTIEX_REQS_VALID: ULONG64,
}
res = result_type.get(GetType, DWORD)()
module, typeid = self.resolve_type(module, typeid)
self.SymGetTypeInfo_ctypes(0xf0f0f0f0, module, typeid, GetType, byref(res))
if ires is None:
return res.value
return res
@experimental
def get_number_chid(self, module, typeid):
module, typeid = self.resolve_type(module, typeid)
return self.SymGetTypeInfo(module, typeid, TI_GET_CHILDRENCOUNT)
@experimental
def get_childs_types(self, module, typeid):
nb_childs = self.get_number_chid(module, typeid)
class res_struct(Structure):
_fields_ = [("Count", ULONG), ("Start", ULONG), ("Types", (ULONG * nb_childs))]
_fields_ = [("Count", ULONG), ("Start", ULONG), ("Types", (ULONG * nb_childs))]
res = res_struct()
res.Count = nb_childs
self.SymGetTypeInfo(module, typeid, TI_FINDCHILDREN, ires=res)
return res
def trim_ulong64_to_address(self, addr):
""" | Used to convert a symbol ULONG64 to the actual symbol.
| Problem is that in a 32bits kernel the kernel address are bit expended
| :file:`nt` in 32bits kernel would not be :file:`0x8xxxxxxx` but :file:`0xffffffff8xxxxxxx`
"""
raise NotImplementedError("bitness dependent")
def expand_address_to_ulong64(self, addr):
"""| Used to convert a symbol address to an ULONG64 requested by the API.
| Problem is that in a 32bits kernel the kernel address are bit expended
| :file:`nt` in 32bits kernel would not be :file:`0x8xxxxxxx` but :file:`0xffffffff8xxxxxxx`
"""
raise NotImplementedError("bitness dependent")
def get_symbol_offset(self, name):
"""Get the address of a symbol
:param name: Name of the symbol
:type name: str
:rtype: int"""
SymbolLocation = ctypes.c_uint64(0)
try:
self.DebugSymbols.GetOffsetByName(name, ctypes.byref(SymbolLocation))
except WindowsError:
return None
return self.trim_ulong64_to_address(SymbolLocation.value)
def get_symbol(self, addr):
"""Get the symbol and displacement of an address
:param addr: The address to lookup
:type addr: int
:rtype: str, int -- symbol name, displacement"""
addr = self.expand_address_to_ulong64(addr)
buffer_size = 1024
buffer = (c_char * buffer_size)()
name_size = ULONG()
displacement = ULONG64()
try:
self.DebugSymbols.GetNameByOffset(addr, byref(buffer), buffer_size, byref(name_size), byref(displacement))
except WindowsError as e:
if (e.winerror & 0xffffffff) == E_FAIL:
return (None, None)
return (buffer.value, displacement.value)
def symbol_match(self, symbol_pattern):
"""| <generator>
| List of symbol (name, address) that match a symbol pattern
:param symbol_pattern: The symbol pattern (nt!Create*, *!CreateFile, ..)
:type symbol_pattern: str
:yield: str, int -- symbol name, symbol address
"""
search_handle = ULONG64()
buffer_size = 1024
buffer = (c_char * buffer_size)()
match_size = ULONG()
symbol_addr = ULONG64()
self.DebugSymbols.StartSymbolMatch(symbol_pattern, byref(search_handle))
while True:
try:
self.DebugSymbols.GetNextSymbolMatch(search_handle, byref(buffer), buffer_size, byref(match_size), byref(symbol_addr))
except WindowsError as e:
if (e.winerror & 0xffffffff) == S_FALSE:
buffer_size = buffer_size
buffer = (c_char * buffer_size)()
continue
if (e.winerror & 0xffffffff) == E_NOINTERFACE:
self.DebugSymbols.EndSymbolMatch(search_handle)
return
yield (buffer.value, self.trim_ulong64_to_address(symbol_addr.value))
def read_virtual_memory(self, addr, size):
"""Read the memory at a given virtual address
:param addr: The Symbol to read from
:type addr: Symbol
:param size: The size to read
:type size: int
:returns: str
"""
addr = self.resolve_symbol(addr)
buffer = (c_char * size)()
read = DWORD(0)
self.DebugDataSpaces.ReadVirtual(c_uint64(addr), buffer, size, byref(read))
return buffer[0:read.value]
def write_virtual_memory(self, addr, data):
"""Write data to a given virtual address
:param addr: The Symbol to write to
:type addr: Symbol
:param size: The Data to write
:type size: str or ctypes.Structure
:returns: the size written -- :class:`int`
"""
try:
# ctypes structure
size = ctypes.sizeof(data)
buffer = ctypes.byref(data)
except TypeError:
# buffer
size = len(data)
buffer = data
written = ULONG(0)
addr = self.resolve_symbol(addr)
self.DebugDataSpaces.WriteVirtual(c_uint64(addr), buffer, size, byref(written))
return written.value
def write_pfv_memory(self, addr, data):
"""Write physical memory from virtual address
Exactly the same as write_physical(virtual_to_physical(addr), data)
"""
return self.write_physical_memory(self.virtual_to_physical(addr), data)
def read_virtual_memory_into(self, addr, struct):
""""Read the memory at a given virtual address into a ctypes Structure
:param addr: The Symbol to read from
:type addr: Symbol
:param struct: The structure to fill
:type size: ctypes.Structure
:returns: the size read -- :class:`int`
"""
addr = self.resolve_symbol(addr)
size = ctypes.sizeof(struct)
read = ULONG(0)
self.DebugDataSpaces.ReadVirtual(c_uint64(addr), byref(struct), size, byref(read))
return read.value
def read_byte(self, addr):
"""Read a byte from virtual memory"""
sizeof_byte = sizeof(BYTE)
raw_data = self.read_virtual_memory(addr, sizeof_byte)
return struct.unpack("<B", raw_data)[0]
def read_byte_p(self, addr):
"""Read a byte from physical memory"""
sizeof_byte = sizeof(BYTE)
raw_data = self.read_physical_memory(addr, sizeof_byte)
return struct.unpack("<B", raw_data)[0]
def read_word(self, addr):
"""Read a word from virtual memory"""
sizeof_word = sizeof(WORD)
raw_data = self.read_virtual_memory(addr, sizeof_word)
return struct.unpack("<H", raw_data)[0]
def read_word_p(self, addr):
"""Read a word from physical memory"""
sizeof_word = sizeof(WORD)
raw_data = self.read_physical_memory(addr, sizeof_word)
return struct.unpack("<H", raw_data)[0]
def read_dword(self, addr):
"""Read a dword from virtual memory"""
sizeof_dword = ctypes.sizeof(DWORD)
raw_data = self.read_virtual_memory(addr, sizeof_dword)
return struct.unpack("<I", raw_data)[0]
def read_dword_p(self, addr):
"""Read a dword from physical memory"""
sizeof_dword = ctypes.sizeof(DWORD)
raw_data = self.read_physical_memory(addr, sizeof_dword)
return struct.unpack("<I", raw_data)[0]
def read_qword(self, addr):
"""Read a qword from virtual memory"""
sizeof_qword = sizeof(ULONG64)
raw_data = self.read_virtual_memory(addr, sizeof_qword)
return struct.unpack("<Q", raw_data)[0]
def read_qword_p(self, addr):
"""Read a qword from physical memory"""
sizeof_qword = sizeof(ULONG64)
raw_data = self.read_physical_memory(addr, sizeof_qword)
return struct.unpack("<Q", raw_data)[0]
def write_byte(self, addr, byte):
"""Read a byte to virtual memory"""
return self.write_virtual_memory(addr, struct.pack("<B", byte))
def write_byte_p(self, addr, byte):
"""write a byte to physical memory"""
return self.write_physical_memory(addr, struct.pack("<B", byte))
def write_word(self, addr, word):
"""write a word to virtual memory"""
return self.write_virtual_memory(addr, struct.pack("<H", word))
def write_word_p(self, addr, word):
"""write a word to physical memory"""
return self.write_physical_memory(addr, struct.pack("<H", word))
def write_dword(self, addr, dword):
"""write a dword to virtual memory"""
return self.write_virtual_memory(addr, struct.pack("<I", dword))
def write_dword_p(self, addr, dword):
"""write a dword to physical memory"""
return self.write_physical_memory(addr, struct.pack("<I", dword))
def write_qword(self, addr, qword):
"""write a qword to virtual memory"""
return self.write_virtual_memory(addr, struct.pack("<Q", qword))
def write_qword_p(self, addr, qword):
"""write a qword to physical memory"""
return self.write_physical_memory(addr, struct.pack("<Q", qword))
def read_ptr(self, addr):
"""Read a pointer from virtual memory"""
raise NotImplementedError("bitness dependent")
def read_ptr_p(self, addr):
"""Read a pointer from physical memory"""
raise NotImplementedError("bitness dependent")
def write_ptr(self, addr, value):
"""Write a pointer to virtual memory"""
raise NotImplementedError("bitness dependent")
def write_ptr_p(self, addr, value):
"""Write a pointer to physical memory"""
raise NotImplementedError("bitness dependent")
def write_msr(self, msr_id, value):
"""Write a Model Specific Register"""
return self.DebugDataSpaces.WriteMsr(msr_id, value)
def read_msr(self, msr_id):
"""Read a Model Specific Register"""
msr_value = ULONG64()
self.DebugDataSpaces.ReadMsr(msr_id, byref(msr_value))
return msr_value.value
def virtual_to_physical(self, virtual):
"""Get the physical address of a virtual one"""
virtual = self.resolve_symbol(virtual)
res = ULONG64(0)
self.DebugDataSpaces.VirtualToPhysical(c_uint64(virtual), byref(res))
return res.value
def read_physical_memory(self, addr, size):
"""Read the physical memory at a given address
:param addr: The Symbol to read from
:type addr: Symbol
:param size: The size to read
:type size: int
:returns: :class:`str`
"""
buffer = (c_char * size)()
read = DWORD(0)
self.DebugDataSpaces.ReadPhysical(c_uint64(addr), buffer, size, byref(read))
return buffer[0:read.value]
def write_physical_memory(self, addr, data):
"""Write data to a given physical address
:param addr: The Symbol to write to
:type addr: Symbol
:param size: The Data to write
:type size: str or ctypes.Structure
:returns: the size written -- :class:`int`
"""
try:
# ctypes structure
size = ctypes.sizeof(data)
buffer = ctypes.byref(data)
except TypeError:
# buffer
size = len(data)
buffer = data
written = ULONG(0)
self.DebugDataSpaces.WritePhysical(c_uint64(addr), buffer, size, byref(written))
return written.value
def read_processor_system_data(self, processor, type):
"""| Returns a :class:`DEBUG_PROCESSOR_IDENTIFICATION_X86` if type is :class:`DEBUG_DATA_PROCESSOR_IDENTIFICATION`
| else returns an :class:`int`
(see :func:`ReadProcessorSystemData` `<https://msdn.microsoft.com/en-us/library/windows/hardware/ff554326%28v=vs.85%29.aspx>`_.)
"""
if type == DEBUG_DATA_PROCESSOR_IDENTIFICATION:
buffer = DEBUG_PROCESSOR_IDENTIFICATION_ALL()
elif type == DEBUG_DATA_PROCESSOR_SPEED:
buffer = ULONG(0)
else:
buffer = ULONG64(0)
data_size = ULONG(0)
self.DebugDataSpaces.ReadProcessorSystemData(processor, type, byref(buffer), sizeof(buffer), byref(data_size))
if type != DEBUG_DATA_PROCESSOR_IDENTIFICATION:
buffer = buffer.value
return buffer
def read_bus_data(self, datatype, busnumber, slot, offset, size):
r"""| Read on bus data, only current known use is to read on the PCI bus.
| (see :file:`example\\simple_pci_exploration.py`)
"""
buffer = (c_char * size)()
read = ULONG(0)
self.DebugDataSpaces.ReadBusData(datatype, busnumber, slot, offset, buffer, size, byref(read))
return buffer[0:read.value]
def write_bus_data(self, datatype, busnumber, slot, offset, data):
r"""| Write on bus data, only current known use is to write on the PCI bus.
| (see :file:`example\\simple_pci_exploration.py`)
"""
size = len(data)
written = ULONG(0)
self.DebugDataSpaces.ReadBusData(datatype, busnumber, slot, offset, buffer, size, byref(written))
return written.value
def read_io(self, port, size):
"""| Perform an IN operation
| might be subject to some restrictions
| (see :file:`README.md` :file:`do_in | do_out VS read_io | write_io`)
:param port: port to read
:param size: size to read
:type port: int
:type size: int - 1, 2 or 4
:returns: the value read -- :class:`int`
"""
InterfaceType = 1 # Isa
BusNumber = 0
AddressSpace = 1
Buffer = (c_char * size)()
BytesRead = ULONG()
self.DebugDataSpaces.ReadIo(InterfaceType, BusNumber, AddressSpace, port, Buffer, size, byref(BytesRead))
format = {1: '<B', 2: '<H', 4: '<I'}[size]
return struct.unpack(format, Buffer[:BytesRead.value])[0]
def write_io(self, port, value, size=None):
"""| Perform an OUT operation
| might be subject to some restrictions
| (see :file:`README.md` :file:`do_in | do_out VS read_io | write_io`)
:param port: port to write
:param size: size to write
:type port: int
:type size: int - 1, 2 or 4
:returns: the number of bytes written -- :class:`int`
"""
InterfaceType = 1 # Isa
BusNumber = 0
AddressSpace = 1
if size is None:
size = len(value)
Buffer = value
else:
format = {1: '<B', 2: '<H', 4: '<I'}[size]
Buffer = struct.pack(format, value)
BytesWritten = ULONG()
self.DebugDataSpaces.WriteIo(InterfaceType, BusNumber, AddressSpace, port, Buffer, size, byref(BytesWritten))
return BytesWritten.value
@require_upgraded_driver
def kcall(self, target, *args):
"""| Call target in kernel mode with given arguments.
| KCall respect the calling convention but YOU must
| pass the correct number of argument or the kernel
| will likely crash.
| (see :func:`map_page_to_userland`) for an example
:param target: the Symbol to call
:type target: Symbol
:param args: the arguments of the call
:type args: list of int
:returns: :class:`int`
"""
raise NotImplementedError("bitness dependent")
# do_in | do_out
# There is already a COM API: WriteIo and ReadIo but it seems that
# these API check for some alignment in the port and the size.
# A ReadIo(size=4) must be on a port aligned on 4.
# It may caused by a bad call from use in write_io | read_io
# Anyway we implemented our own bypass in upgrade driver
@require_upgraded_driver
def do_in(self, port, size):
"""| Perform an IN operation
:param port: port to read
:param size: size to read
:type port: int
:type size: int - 1, 2 or 4
:returns: the value read -- :class:`int`
"""
raise NotImplementedError("bitness dependent")
@require_upgraded_driver
def do_out(self, port, value, size):
"""| Perform an OUT operation
:param port: port to write
:param size: size to write
:type port: int
:type size: int - 1, 2 or 4
:returns: the number of bytes written -- :class:`int`
"""
raise NotImplementedError("bitness dependent")
@require_upgraded_driver
def alloc_memory(self, size=0x1000):
"""Allocate **size** of NonPaged kernel memory"""
raise NotImplementedError("bitness dependent")
@require_upgraded_driver
def map_page_to_userland(self, virtual_addr, size):
"""Map **size** bytes of kernel memory **virtual_addr** in the current address space
:param virtual_addr: kernel virtual addr
:param size: size to map
:type virtual_addr: int
:type size: int
:returns: address of the page in current process -- :class:`int`
"""
# Check if all exports are known before calling everything
IoAllocateMdl = self.resolve_symbol("IoAllocateMdl")
MmBuildMdlForNonPagedPool = self.resolve_symbol("MmBuildMdlForNonPagedPool")
MmMapLockedPagesSpecifyCache = self.resolve_symbol("MmMapLockedPagesSpecifyCache")
mdl = self.kcall(IoAllocateMdl, virtual_addr, size, False, False, None)
self.kcall(MmBuildMdlForNonPagedPool, mdl)
mapped_addr = self.kcall(MmMapLockedPagesSpecifyCache, mdl, UserMode, MmNonCached, None, False, NormalPagePriority)
return mapped_addr
class LocalKernelDebugger32(LocalKernelDebuggerBase):
DEBUG_DLL_PATH = os.path.join(realpath(dirname(__file__)), "bin\\DBGDLL\\")
DRIVER_FILENAME = os.path.join(realpath(dirname(__file__)), "bin\\windbg_driver_x86.sys")
DRIVER_RESOURCE = resource_emulation.Ressource(DRIVER_FILENAME, 0x7777, 0x4444)
SYMBOL_OPT = (SYMOPT_NO_IMAGE_SEARCH + SYMOPT_AUTO_PUBLICS + SYMOPT_FAIL_CRITICAL_ERRORS +
SYMOPT_OMAP_FIND_NEAREST + SYMOPT_LOAD_LINES + SYMOPT_DEFERRED_LOADS +
SYMOPT_UNDNAME + SYMOPT_CASE_INSENSITIVE)
# In our 32bits dll, GetModuleFileNameW and FindResourceW are not in IAT
# There is a safe and secure lazy resolution, so let's just hook this jump table
GetModuleFileNameW_addr_jump_offset = 0x3374bc
FindResourceW_addr_jump_offset = 0x3374a0
# read_ptr and write_ptr real implementation (bitness dependant)
read_ptr = LocalKernelDebuggerBase.read_dword
read_ptr_p = LocalKernelDebuggerBase.read_dword_p
write_ptr = LocalKernelDebuggerBase.write_dword
write_ptr_p = LocalKernelDebuggerBase.write_dword_p
def expand_address_to_ulong64(self, addr):
if addr is None:
return None
# bit expansion
return (0xFFFFFFFF00000000 * (addr >> 31)) | addr
def trim_ulong64_to_address(self, addr):
if addr is None:
return None
return addr & 0xffffffff
# Setup DRIVER_RESOURCE as a resource using hooks
def _setup_driver_resource(self, dbgengmod, k32import):
SizeofResourceIAT = [x for x in k32import if x.name == "SizeofResource"][0]
LoadResourceIAT = [x for x in k32import if x.name == "LoadResource"][0]
LockResourceIAT = [x for x in k32import if x.name == "LockResource"][0]
FindResourceW_addr_jump = dbgengmod.DllBase + self.FindResourceW_addr_jump_offset
DummyFindResourceWIAT = DummyIATEntry.create(FindResourceW_addr_jump, "kernel32.dll", "FindResourceW")
# Add our driver to emulated resources
resource_emulation.resource_list.append(self.DRIVER_RESOURCE)
# Setup Resource emulation into dbgeng.dll
DummyFindResourceWIAT.set_hook(resource_emulation.FindResourceWHook)
SizeofResourceIAT.set_hook(resource_emulation.SizeofResourceHook)
LoadResourceIAT.set_hook(resource_emulation.LoadResourceHook)
LockResourceIAT.set_hook(resource_emulation.LockResourceHook)
def _setup_name_imposture(self, dbgengmod, k32import):
GetModuleFileNameW_addr_jump = dbgengmod.DllBase + self.GetModuleFileNameW_addr_jump_offset
DummyGetModuleFileNameWIAT = DummyIATEntry.create(GetModuleFileNameW_addr_jump, "kernel32.dll", "GetModuleFileNameW")
DummyGetModuleFileNameWIAT.set_hook(EmulateWinDBGName)
# Driver upgrade stuff
def _upgrade_driver(self):
self.upgrader = driver_upgrade.DriverUpgrader32(self)
self.upgrader.upgrade_driver()
# upgraded driver API
@require_upgraded_driver
def kcall(self, target, *args):
"""Call target in kernel mode with given arguments"""
target = self.resolve_symbol(target)
args = [arg if arg is not None else 0 for arg in args]
buffer = struct.pack("<" + "I" * (len(args) + 1), target, *args)
h = self._get_kldbgdrv_handle()
res = DWORD(0x44444444)
windows.winproxy.DeviceIoControl(h, DU_KCALL_IOCTL, buffer, len(buffer), byref(res), ctypes.sizeof(res))
return res.value
@require_upgraded_driver
def do_in(self, port, size):
"""Perform IN instruction in kernel mode"""
if size not in [1, 2, 4]:
raise ValueError("Invalid IN size: {0}".format(size))
h = self._get_kldbgdrv_handle()
buffer = struct.pack("<II", size, port)
res = DWORD(0x44444444)
windows.winproxy.DeviceIoControl(h, DU_IN_IOCTL, buffer, len(buffer), byref(res), ctypes.sizeof(res))
return res.value
@require_upgraded_driver
def do_out(self, port, value, size):
"""Perform OUT instruction in kernel mode"""
if size not in [1, 2, 4]:
raise ValueError("Invalid OUT size: {0}".format(size))
h = self._get_kldbgdrv_handle()
buffer = struct.pack("<III", size, port, value)
windows.winproxy.DeviceIoControl(h, DU_OUT_IOCTL, buffer, len(buffer), 0, 0)
return None
@require_upgraded_driver
def alloc_memory(self, size=0x1000, type=0, tag=0x45544942):
"""Allocation <size> of NonPaged kernel memory"""
h = self._get_kldbgdrv_handle()
buffer = struct.pack("<III", type, size, tag)
res = DWORD(0x44444444)
windows.winproxy.DeviceIoControl(h, DU_MEMALLOC_IOCTL, buffer, len(buffer), byref(res), 4)
return res.value
class LocalKernelDebugger64(LocalKernelDebuggerBase):
DEBUG_DLL_PATH = os.path.join(realpath(dirname(__file__)), "bin\\DBGDLL64\\")
DRIVER_FILENAME = os.path.join(realpath(dirname(__file__)), "bin\\windbg_driver_x64.sys")
DRIVER_RESOURCE = resource_emulation.Ressource(DRIVER_FILENAME, 0x7777, 0x4444)
SYMBOL_OPT = (SYMOPT_NO_IMAGE_SEARCH + SYMOPT_AUTO_PUBLICS + SYMOPT_FAIL_CRITICAL_ERRORS +
SYMOPT_OMAP_FIND_NEAREST + SYMOPT_LOAD_LINES + SYMOPT_DEFERRED_LOADS +
SYMOPT_UNDNAME + SYMOPT_CASE_INSENSITIVE)
read_ptr = LocalKernelDebuggerBase.read_qword
read_ptr_p = LocalKernelDebuggerBase.read_qword_p
write_ptr = LocalKernelDebuggerBase.write_qword
write_ptr_p = LocalKernelDebuggerBase.write_qword_p
# Setup DRIVER_RESOURCE as a resource using hooks
def _setup_driver_resource(self, dbgengmod, k32import):
SizeofResourceIAT = [x for x in k32import if x.name == "SizeofResource"][0]
LoadResourceIAT = [x for x in k32import if x.name == "LoadResource"][0]
LockResourceIAT = [x for x in k32import if x.name == "LockResource"][0]
FindResourceWIAT = [x for x in k32import if x.name == "FindResourceW"][0]
# Add our drive to emulated resources
resource_emulation.resource_list.append(self.DRIVER_RESOURCE)
# Setup Resource emulation into dbgeng.dll
FindResourceWIAT.set_hook(resource_emulation.FindResourceWHook)
SizeofResourceIAT.set_hook(resource_emulation.SizeofResourceHook)
LoadResourceIAT.set_hook(resource_emulation.LoadResourceHook)
LockResourceIAT.set_hook(resource_emulation.LockResourceHook)
def _setup_name_imposture(self, dbgengmod, k32import):
GetModuleFileNameWIAT = [x for x in k32import if x.name == "GetModuleFileNameW"][0]
GetModuleFileNameWIAT.set_hook(EmulateWinDBGName)
def _upgrade_driver(self):
self.upgrader = driver_upgrade.DriverUpgrader64(self)
self.upgrader.upgrade_driver()
def expand_address_to_ulong64(self, addr):
return addr
def trim_ulong64_to_address(self, addr):
return addr
@require_upgraded_driver
def alloc_memory(self, size=0x1000, type=0, tag=0x45544942):
"""Allocation <size> of NonPaged kernel memory"""
h = self._get_kldbgdrv_handle()
buffer = struct.pack("<QQQ", type, size, tag)
res = c_uint64(0x44444444)
windows.winproxy.DeviceIoControl(h, DU_MEMALLOC_IOCTL, buffer, len(buffer), byref(res), sizeof(res))
return res.value
@require_upgraded_driver
def kcall(self, target, *args):
"""Call target in kernel mode with given arguments"""
target = self.resolve_symbol(target)
args = [arg if arg is not None else 0 for arg in args]
buffer = struct.pack("<" + "Q" * (len(args) + 1), target, *args)
h = self._get_kldbgdrv_handle()
res = c_uint64(0x44444444)
windows.winproxy.DeviceIoControl(h, DU_KCALL_IOCTL, buffer, len(buffer), byref(res), ctypes.sizeof(res))
return res.value
@require_upgraded_driver
def do_in(self, port, size):
"""Perform IN instruction in kernel mode"""
if size not in [1, 2, 4]:
raise ValueError("Invalid IN size: {0}".format(size))
h = self._get_kldbgdrv_handle()
buffer = struct.pack("<QQ", size, port)
res = DWORD(0x44444444)
windows.winproxy.DeviceIoControl(h, DU_IN_IOCTL, buffer, len(buffer), byref(res), ctypes.sizeof(res))
return res.value
@require_upgraded_driver
def do_out(self, port, value, size):
"""Perform OUT instruction in kernel mode"""
if size not in [1, 2, 4]:
raise ValueError("Invalid OUT size: {0}".format(size))
h = self._get_kldbgdrv_handle()
buffer = struct.pack("<QQQ", size, port, value)
windows.winproxy.DeviceIoControl(h, DU_OUT_IOCTL, buffer, len(buffer), 0, 0)
return None
class LocalKernelDebuggerError(Exception):
pass
def LocalKernelDebugger(quiet=True):
"""| Check that all conditions to Local Kernel Debugging are met
| and return a LKD (subclass of :class:`LocalKernelDebuggerBase`
"""
if not windows.utils.check_debug():
raise LocalKernelDebuggerError("Cannot perform LocalKernelDebugging on kernel not in DEBUG mode")
if not windows.utils.check_is_elevated():
raise LocalKernelDebuggerError("Cannot perform LocalKernelDebugging from non-Admin process")
windows.utils.enable_privilege(SE_DEBUG_NAME, True)
if windows.system.bitness == 64:
if windows.current_process.is_wow_64:
raise LocalKernelDebuggerError("Cannot perform LocalKernelDebugging from SysWow64 process (please launch from 64bits python)")
return LocalKernelDebugger64(quiet)
return LocalKernelDebugger32(quiet)
# # We are working on this part, we don't know if we will use it
# # We don't know if it really works
# # We keep that here for information purposes
# class RemoteDebugger(LocalKernelDebugger32):
# def __init__(self):
# enable_privilege(SE_DEBUG_NAME, True)
# self._load_debug_dll()
# self.DebugClient = self._do_debug_create()
#
# def _do_kernel_attach(self, str):
# #self._setup_windbg_imposture()
# DEBUG_ATTACH_LOCAL_KERNEL = 1
# DEBUG_ATTACH_KERNEL_CONNECTION = 0x00000000
# res = self.DebugClient.AttachKernel(DEBUG_ATTACH_KERNEL_CONNECTION, str)
# if res:
# raise WinError(res)
| |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import sys
import tabulate
import pb4py
import pb4py.exceptions
def add_device_commands(parsers):
device_parsers = parsers.add_subparsers()
device_parsers.add_parser(
'list',
help = 'List all devices.',
).set_defaults(func = command_devices_list)
create_device = device_parsers.add_parser(
'create',
help = 'Create a new PushBullet device.',
)
create_device.add_argument(
'name',
type = str,
help = 'The name of the device',
)
create_device.add_argument(
'device_type',
type = str,
help = 'The type of device (ex: Windows, phone, etc)',
)
create_device.set_defaults(func = command_devices_create)
update_device = device_parsers.add_parser(
'update',
help = 'Update an existing PushBullet device.',
)
update_device.add_argument(
'iden',
type = str,
help = 'The ID of the device',
)
update_device.add_argument(
'--nickname',
type = str,
default = None,
help = 'Set the device nickname',
)
update_device.add_argument(
'--device_type',
dest = 'type',
type = str,
default = None,
help = 'Set the device type',
)
update_device.set_defaults(func = command_devices_update)
delete_device = device_parsers.add_parser(
'delete',
help = 'Delete an existing PushBullet device.',
)
delete_device.add_argument(
'iden',
type = str,
help = 'The ID of the device',
)
delete_device.set_defaults(func = command_devices_delete)
def add_contact_commands(parsers):
contact_parsers = parsers.add_subparsers()
contact_parsers.add_parser(
'list',
help = 'List all contacts.',
).set_defaults(func = command_contacts_list)
create_contact = contact_parsers.add_parser(
'create',
help = 'Create a new contact.',
)
create_contact.add_argument(
'name',
type = str,
help = 'The name of the contact',
)
create_contact.add_argument(
'email',
type = str,
help = 'The contact\'s email',
)
create_contact.set_defaults(func = command_contacts_create)
update_contact = contact_parsers.add_parser(
'update',
help = 'Update an existing contact.',
)
update_contact.add_argument(
'iden',
type = str,
help = 'The ID of the contact',
)
update_contact.add_argument(
'--name',
type = str,
default = None,
help = 'Set the name of the contact',
)
update_contact.add_argument(
'--email',
type = str,
default = None,
help = 'Set the email of the contact',
)
update_contact.set_defaults(func = command_contacts_update)
delete_contact = contact_parsers.add_parser(
'delete',
help = 'Delete an existing contact.',
)
delete_contact.add_argument(
'iden',
type = str,
help = 'The ID of the contact',
)
delete_contact.set_defaults(func = command_contacts_delete)
def add_push_commands(parsers):
push_parsers = parsers.add_subparsers()
send_push = push_parsers.add_parser(
'send',
help = 'Send a push notification.',
)
target_group = send_push.add_mutually_exclusive_group(required = True)
target_group.add_argument(
'--device_iden',
type = str,
help = 'A device ID',
)
target_group.add_argument(
'--email',
type = str,
help = 'An email address',
)
target_group.add_argument(
'--channel_tag',
type = str,
help = 'A channel tag',
)
target_group.add_argument(
'--client_iden',
type = str,
help = 'An OAuth client ID',
)
send_push.add_argument(
'push_type',
type = str,
choices = pb4py.Client.PUSH_TYPES,
help = 'The type of push to send',
)
send_push.add_argument(
'--title',
type = str,
default = None,
help = 'The title of the push (only used for note and link type pushes)',
)
send_push.add_argument(
'--body',
type = str,
default = None,
help = 'The body of the push',
)
send_push.add_argument(
'--url',
type = str,
default = None,
help = 'The URL of the push (only used for link pushes)',
)
send_push.add_argument(
'--file-path',
dest = 'file_path',
type = str,
help = 'The path to the file (only used for file pushes)',
)
send_push.add_argument(
'--file-name',
dest = 'file_name',
type = str,
default = None,
help = 'The name of the file (only used for file pushes), by dfeault this is the name of the file',
)
send_push.add_argument(
'--file-type',
dest = 'file_type',
type = str,
default = None,
help = 'The MIME type of the file (only used for file pushes), by default the MIME type is guessed',
)
send_push.set_defaults(func = command_push_send)
push_history = push_parsers.add_parser(
'history',
help = 'Get your push history.',
)
push_history.add_argument(
'--filter-type',
type = str,
choices = pb4py.Client.PUSH_TYPES,
help = 'Filter the pushes by their type',
)
push_history.set_defaults(func = command_push_history)
dismiss_push = push_parsers.add_parser(
'dismiss',
help = 'Dismiss a push.',
)
dismiss_push.add_argument(
'iden',
type = str,
help = 'The push ID',
)
dismiss_push.set_defaults(func = command_push_dismiss)
delete_push = push_parsers.add_parser(
'delete',
help = 'Delete a push',
)
delete_push.add_argument(
'iden',
type = str,
help = 'The push ID',
)
delete_push.set_defaults(func = command_push_delete)
def add_subscription_commands(parsers):
subscription_parsers = parsers.add_subparsers()
subscription_parsers.add_parser(
'list',
help = 'List your subscriptions',
).set_defaults(func = command_subscriptions_list)
unsubscribe_to_channel = subscription_parsers.add_parser(
'subscribe',
help = 'Subscribe to a channel',
)
unsubscribe_to_channel.add_argument(
'tag',
help = 'The channel tag',
)
unsubscribe_to_channel.set_defaults(func = command_subscriptions_unsubscribe)
def add_channel_commands(parsers):
channel_parsers = parsers.add_subparsers()
channel_info = channel_parsers.add_parser(
'info',
help = 'Get information about a subscription.',
)
channel_info.add_argument(
'tag',
type = str,
help = 'The channel tag',
)
channel_info.set_defaults(func = command_channel_info)
subscribe = channel_parsers.add_parser(
'subscribe',
help = 'Subscribe to a channel.',
)
subscribe.add_argument(
'tag',
help = 'The channel tag',
)
subscribe.set_defaults(func = command_channel_subscribe)
def add_parser_commands(parsers):
parsers.add_parser(
'generate-config',
help = 'Generate a sane set of config options. These can then bet put in the ~/.pb4pyrc file.',
).set_defaults(func = command_generate_default_config)
add_device_commands(parsers.add_parser(
'devices',
help = 'Manage connected devices.',
))
add_contact_commands(parsers.add_parser(
'contacts',
help = 'Manage contacts.',
))
parsers.add_parser(
'me',
help = 'Get information about yourself.',
).set_defaults(func = command_me_get)
add_push_commands(parsers.add_parser(
'pushes',
help = 'Manage pushes',
))
add_subscription_commands(parsers.add_parser(
'subscriptions',
help = 'Manage subscriptions',
))
add_channel_commands(parsers.add_parser(
'channels',
help = 'Manage channels',
))
def parse_args():
parser = argparse.ArgumentParser(
description = 'A CLI command for working with PushBullet',
)
command_parsers = parser.add_subparsers(title = 'commands')
add_parser_commands(command_parsers)
return parser.parse_args()
def prompt(question, default = "yes"):
"""
Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
if default is None:
options = ' [y/n] '
elif default == 'yes':
options = ' [Y/n] '
elif default == 'no':
options = ' [y/N] '
else:
raise ValueError('invalid default answer: \'%s\'' % default)
while True:
sys.stdout.write(question + options)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write('Please respond with \'yes\' or \'no\' (or \'y\' or \'n\').\n')
def build_table(data, columns):
return tabulate.tabulate(
[
[
row[column] if column in row else ''
for column in columns
]
for row in data
],
[col.replace('_', ' ').capitalize() for col in columns]
)
def client_command(func):
def wrapper(*args, **kwargs):
client = pb4py.Client()
return func(client, *args, **kwargs)
return wrapper
def command_generate_default_config(args):
config = {
'auth': {
'type': 'basic',
'access_token': 'deadbeef',
},
}
print(json.dumps(config, indent = 4, sort_keys = False))
@client_command
def command_devices_list(client, args):
devices = client.devices()
print(build_table(devices, ['iden', 'nickname', 'type']))
@client_command
def command_devices_create(client, args):
client.create_device(args.name, args.device_type)
print('Device created')
@client_command
def command_devices_update(client, args):
updates = {
k: getattr(args, k)
for k in ['nickname', 'type']
if getattr(args, k) is not None
}
client.update_device(args.iden, **updates)
print('Device updated')
@client_command
def command_devices_delete(client, args):
device = args.iden
confirm = prompt('Are you sure you want to delete device {}?'.format(device), default = 'no')
if not confirm:
return
client.delete_device(device)
print('Device deleted')
@client_command
def command_contacts_list(client, args):
contacts = client.contacts()
print(build_table(contacts, ['iden', 'name', 'email']))
@client_command
def command_contacts_create(client, args):
client.create_contact(args.name, args.email)
print('Contact created')
@client_command
def command_contacts_update(client, args):
updates = {
k: getattr(args, k)
for k in ['name', 'email']
if getattr(args, k) is not None
}
client.update_contact(args.iden, **updates)
print('Contact updated')
@client_command
def command_contacts_delete(client, args):
contact = args.iden
confirm = prompt('Are you sure you want to delete contact {}?'.format(contact), default = 'no')
if not confirm:
return
client.delete_contact(contact)
print('Contact deleted')
@client_command
def command_me_get(client, args):
me = client.me()
print(build_table([me], ['name', 'email']))
@client_command
def command_push_send(client, args):
push_type = args.push_type
if push_type == 'file':
if not args.file_path:
raise ValueError('No file was specified')
args.file_url = client.upload_file(
args.file_path,
filename = args.file_name,
file_type = args.file_type,
)['file_url']
push_data = {
k: getattr(args, k)
for k in ['title', 'body', 'file_name', 'file_type', 'file_url']
if getattr(args, k) is not None
}
elif push_type == 'link':
if not args.url:
raise ValueError('No URL was given')
push_data = {
k: getattr(args, k)
for k in ['title', 'body', 'url']
if getattr(args, k) is not None
}
elif push_type == 'note':
push_data = {
k: getattr(args, k)
for k in ['title', 'body']
if getattr(args, k) is not None
}
push_data.update({
k: getattr(args, k)
for k in ['device_iden', 'email', 'channel_tag', 'client_iden']
if getattr(args, k) is not None
})
client.push(push_type, **push_data)
print('Sent push')
@client_command
def command_push_history(client, args):
pushes = client.push_history()
if len(pushes) == 0:
print('No pushes :\'(')
return
if args.filter_type:
pushes = [push for push in pushes if push['type'] == args.filter_type]
pushes = {
push_type: [push for push in pushes if push['type'] == push_type]
for push_type in pb4py.Client.PUSH_TYPES
}
for push_type in pb4py.Client.PUSH_TYPES:
print()
pushes_of_type = pushes[push_type]
if len(pushes_of_type) == 0:
continue
if push_type == 'file':
columns = ['iden', 'sender_name', 'sender_email', 'file_url']
elif push_type == 'link':
columns = ['iden', 'sender_name', 'sender_email', 'title', 'body', 'url']
elif push_type == 'note':
columns = ['iden', 'sender_name', 'sender_email', 'title', 'body']
print('===== ' + push_type.capitalize() + ' =====')
print(build_table(pushes_of_type, columns))
@client_command
def command_push_dismiss(client, args):
push = args.iden
confirm = prompt('Are you sure you want to dismiss push {}?'.format(push), default = 'no')
if not confirm:
return
client.dismiss_push(push)
print('Push dismissed')
@client_command
def command_push_delete(client, args):
push = args.iden
confirm = prompt('Are you sure you want to delete push {}?'.format(push), default = 'no')
if not confirm:
return
client.delete_push(push)
print('Push deleted')
@client_command
def command_subscriptions_list(client, args):
subscriptions = client.subscriptions()
for subscription in subscriptions:
channel = subscription['channel']
subscription['name'] = channel.get('name', '')
subscription['tag'] = channel.get('tag', '')
subscription['website_url'] = channel.get('website_url', '')
subscription['description'] = channel.get('description', '')
print(build_table(subscriptions, ['iden', 'name', 'tag', 'website_url', 'description']))
@client_command
def command_subscriptions_unsubscribe(client, args):
channel = args.tag
confirm = prompt('Are you sure you want to unsubscribe from this channel?', default = 'no')
if not confirm:
return
client.unsubscribe_to_channel(channel)
print('Unsubscribed')
@client_command
def command_channel_info(client, args):
channel = client.get_channel_info(args.tag)
print(build_table([channel], ['iden', 'name', 'tag', 'subscriber_count', 'website_url', 'description']))
@client_command
def command_channel_subscribe(client, args):
channel = args.tag
client.subscribe_to_channel(channel)
print('Subscribed')
def main():
args = parse_args()
try:
args.func(args)
except pb4py.exceptions.PB4PyException as ex:
print(ex)
if __name__ == '__main__':
main()
| |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import datetime
import os
import sys
import uuid
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_log import log
from oslo_serialization import jsonutils
import pbr.version
from keystone.cmd import bootstrap
from keystone.cmd import doctor
from keystone.common import driver_hints
from keystone.common import fernet_utils
from keystone.common import jwt_utils
from keystone.common import sql
from keystone.common.sql import upgrades
from keystone.common import utils
import keystone.conf
from keystone.credential.providers import fernet as credential_fernet
from keystone import exception
from keystone.federation import idp
from keystone.federation import utils as mapping_engine
from keystone.i18n import _
from keystone.server import backends
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class BaseApp(object):
name = None
@classmethod
def add_argument_parser(cls, subparsers):
parser = subparsers.add_parser(cls.name, help=cls.__doc__)
parser.set_defaults(cmd_class=cls)
return parser
class BootStrap(BaseApp):
"""Perform the basic bootstrap process."""
name = "bootstrap"
def __init__(self):
self.bootstrapper = bootstrap.Bootstrapper()
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BootStrap, cls).add_argument_parser(subparsers)
parser.add_argument('--bootstrap-username', default='admin',
metavar='OS_BOOTSTRAP_USERNAME',
help=('The username of the initial keystone '
'user during bootstrap process.'))
# NOTE(morganfainberg): See below for ENV Variable that can be used
# in lieu of the command-line arguments.
parser.add_argument('--bootstrap-password', default=None,
metavar='OS_BOOTSTRAP_PASSWORD',
help='The bootstrap user password')
parser.add_argument('--bootstrap-project-name', default='admin',
metavar='OS_BOOTSTRAP_PROJECT_NAME',
help=('The initial project created during the '
'keystone bootstrap process.'))
parser.add_argument('--bootstrap-role-name', default='admin',
metavar='OS_BOOTSTRAP_ROLE_NAME',
help=('The initial role-name created during the '
'keystone bootstrap process.'))
parser.add_argument('--bootstrap-service-name', default='keystone',
metavar='OS_BOOTSTRAP_SERVICE_NAME',
help=('The initial name for the initial identity '
'service created during the keystone '
'bootstrap process.'))
parser.add_argument('--bootstrap-admin-url',
metavar='OS_BOOTSTRAP_ADMIN_URL',
help=('The initial identity admin url created '
'during the keystone bootstrap process. '
'e.g. http://127.0.0.1:5000/v3'))
parser.add_argument('--bootstrap-public-url',
metavar='OS_BOOTSTRAP_PUBLIC_URL',
help=('The initial identity public url created '
'during the keystone bootstrap process. '
'e.g. http://127.0.0.1:5000/v3'))
parser.add_argument('--bootstrap-internal-url',
metavar='OS_BOOTSTRAP_INTERNAL_URL',
help=('The initial identity internal url created '
'during the keystone bootstrap process. '
'e.g. http://127.0.0.1:5000/v3'))
parser.add_argument('--bootstrap-region-id',
metavar='OS_BOOTSTRAP_REGION_ID',
help=('The initial region_id endpoints will be '
'placed in during the keystone bootstrap '
'process.'))
parser.add_argument('--immutable-roles',
default=True,
action='store_true',
help=('Whether default roles (admin, member, and '
'reader) should be immutable. This is the '
'default.'))
parser.add_argument('--no-immutable-roles',
default=False,
action='store_true',
help=('Whether default roles (admin, member, and '
'reader) should be immutable. Immutable '
'default roles is the default, use this '
'flag to opt out of immutable default '
'roles.'))
return parser
def do_bootstrap(self):
"""Perform the bootstrap actions.
Create bootstrap user, project, and role so that CMS, humans, or
scripts can continue to perform initial setup (domains, projects,
services, endpoints, etc) of Keystone when standing up a new
deployment.
"""
self.username = (
os.environ.get('OS_BOOTSTRAP_USERNAME') or
CONF.command.bootstrap_username)
self.project_name = (
os.environ.get('OS_BOOTSTRAP_PROJECT_NAME') or
CONF.command.bootstrap_project_name)
self.role_name = (
os.environ.get('OS_BOOTSTRAP_ROLE_NAME') or
CONF.command.bootstrap_role_name)
self.password = (
os.environ.get('OS_BOOTSTRAP_PASSWORD') or
CONF.command.bootstrap_password)
self.service_name = (
os.environ.get('OS_BOOTSTRAP_SERVICE_NAME') or
CONF.command.bootstrap_service_name)
self.admin_url = (
os.environ.get('OS_BOOTSTRAP_ADMIN_URL') or
CONF.command.bootstrap_admin_url)
self.public_url = (
os.environ.get('OS_BOOTSTRAP_PUBLIC_URL') or
CONF.command.bootstrap_public_url)
self.internal_url = (
os.environ.get('OS_BOOTSTRAP_INTERNAL_URL') or
CONF.command.bootstrap_internal_url)
self.region_id = (
os.environ.get('OS_BOOTSTRAP_REGION_ID') or
CONF.command.bootstrap_region_id)
self.service_id = None
self.endpoints = None
if self.password is None:
print(_('ERROR: Either --bootstrap-password argument or '
'OS_BOOTSTRAP_PASSWORD must be set.'))
sys.exit(1)
self.bootstrapper.admin_password = self.password
self.bootstrapper.admin_username = self.username
self.bootstrapper.project_name = self.project_name
self.bootstrapper.admin_role_name = self.role_name
self.bootstrapper.service_name = self.service_name
self.bootstrapper.service_id = self.service_id
self.bootstrapper.admin_url = self.admin_url
self.bootstrapper.public_url = self.public_url
self.bootstrapper.internal_url = self.internal_url
self.bootstrapper.region_id = self.region_id
if CONF.command.no_immutable_roles:
self.bootstrapper.immutable_roles = False
else:
self.bootstrapper.immutable_roles = True
self.bootstrapper.bootstrap()
self.reader_role_id = self.bootstrapper.reader_role_id
self.member_role_id = self.bootstrapper.member_role_id
self.role_id = self.bootstrapper.admin_role_id
self.project_id = self.bootstrapper.project_id
@classmethod
def main(cls):
klass = cls()
klass.do_bootstrap()
class Doctor(BaseApp):
"""Diagnose common problems with keystone deployments."""
name = 'doctor'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(Doctor, cls).add_argument_parser(subparsers)
return parser
@staticmethod
def main():
# Return a non-zero exit code if we detect any symptoms.
raise SystemExit(doctor.diagnose())
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DbSync, cls).add_argument_parser(subparsers)
parser.add_argument(
'version',
default=None,
nargs='?',
help=(
'Migrate the database up to a specified version. '
'If not provided, db_sync will migrate the database to the '
'latest known version. '
'Schema downgrades are not supported.'
),
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
'--expand',
default=False,
action='store_true',
help=(
'Expand the database schema in preparation for data migration.'
),
)
group.add_argument(
'--migrate',
default=False,
action='store_true',
help=(
'Copy all data that needs to be migrated within the database '
'ahead of starting the first keystone node upgraded to the '
'new release. '
'This command should be run after the --expand command. '
'Once the --migrate command has completed, you can upgrade '
'all your keystone nodes to the new release and restart them.'
),
)
group.add_argument(
'--contract',
default=False,
action='store_true',
help=(
'Remove any database tables and columns that are no longer '
'required. This command should be run after all keystone '
'nodes are running the new release.'
),
)
group.add_argument(
'--check',
default=False,
action='store_true',
help=(
'Check for outstanding database actions that still need to be '
'executed. This command can be used to verify the condition '
'of the current database state.'
),
)
return parser
@classmethod
def check_db_sync_status(cls):
status = 0
try:
expand_version = upgrades.get_db_version(branch='expand')
except db_exception.DBMigrationError:
LOG.info(
'Your database is not currently under version '
'control or the database is already controlled. Your '
'first step is to run `keystone-manage db_sync --expand`.'
)
return 2
try:
migrate_version = upgrades.get_db_version(
branch='data_migration')
except db_exception.DBMigrationError:
migrate_version = 0
try:
contract_version = upgrades.get_db_version(branch='contract')
except db_exception.DBMigrationError:
contract_version = 0
migration_script_version = upgrades.LATEST_VERSION
if (
contract_version > migrate_version or
migrate_version > expand_version
):
LOG.info('Your database is out of sync. For more information '
'refer to https://docs.openstack.org/keystone/'
'latest/admin/identity-upgrading.html')
status = 1
elif migration_script_version > expand_version:
LOG.info('Your database is not up to date. Your first step is '
'to run `keystone-manage db_sync --expand`.')
status = 2
elif expand_version > migrate_version:
LOG.info('Expand version is ahead of migrate. Your next step '
'is to run `keystone-manage db_sync --migrate`.')
status = 3
elif migrate_version > contract_version:
LOG.info('Migrate version is ahead of contract. Your next '
'step is to run `keystone-manage db_sync --contract`.')
status = 4
elif (
migration_script_version == expand_version == migrate_version ==
contract_version
):
LOG.info('All db_sync commands are upgraded to the same '
'version and up-to-date.')
LOG.info(
'The latest installed migration script version is: %(script)d.\n'
'Current repository versions:\n'
'Expand: %(expand)d\n'
'Migrate: %(migrate)d\n'
'Contract: %(contract)d',
{
'script': migration_script_version,
'expand': expand_version,
'migrate': migrate_version,
'contract': contract_version,
},
)
return status
@staticmethod
def main():
if CONF.command.check:
sys.exit(DbSync.check_db_sync_status())
elif CONF.command.expand:
upgrades.expand_schema()
elif CONF.command.migrate:
upgrades.migrate_data()
elif CONF.command.contract:
upgrades.contract_schema()
else:
upgrades.offline_sync_database_to_version(
CONF.command.version)
class DbVersion(BaseApp):
"""Print the current migration version of the database."""
name = 'db_version'
@staticmethod
def main():
print(upgrades.get_db_version())
class BasePermissionsSetup(BaseApp):
"""Common user/group setup for file permissions."""
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(BasePermissionsSetup,
cls).add_argument_parser(subparsers)
running_as_root = (os.geteuid() == 0)
parser.add_argument('--keystone-user', required=running_as_root)
parser.add_argument('--keystone-group', required=running_as_root)
return parser
@staticmethod
def get_user_group():
keystone_user_id = None
keystone_group_id = None
try:
a = CONF.command.keystone_user
if a:
keystone_user_id = utils.get_unix_user(a)[0]
except KeyError:
raise ValueError("Unknown user '%s' in --keystone-user" % a)
try:
a = CONF.command.keystone_group
if a:
keystone_group_id = utils.get_unix_group(a)[0]
except KeyError:
raise ValueError("Unknown group '%s' in --keystone-group" % a)
return keystone_user_id, keystone_group_id
@classmethod
def initialize_fernet_repository(
cls, keystone_user_id, keystone_group_id, config_group=None):
conf_group = getattr(CONF, config_group)
futils = fernet_utils.FernetUtils(
conf_group.key_repository,
conf_group.max_active_keys,
config_group
)
futils.create_key_directory(keystone_user_id, keystone_group_id)
if futils.validate_key_repository(requires_write=True):
futils.initialize_key_repository(
keystone_user_id, keystone_group_id)
@classmethod
def rotate_fernet_repository(
cls, keystone_user_id, keystone_group_id, config_group=None):
conf_group = getattr(CONF, config_group)
futils = fernet_utils.FernetUtils(
conf_group.key_repository,
conf_group.max_active_keys,
config_group
)
if futils.validate_key_repository(requires_write=True):
futils.rotate_keys(keystone_user_id, keystone_group_id)
class FernetSetup(BasePermissionsSetup):
"""Setup key repositories for Fernet tokens and auth receipts.
This also creates a primary key used for both creating and validating
Fernet tokens and auth receipts. To improve security, you should rotate
your keys (using keystone-manage fernet_rotate, for example).
"""
name = 'fernet_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
if (os.path.abspath(CONF.fernet_tokens.key_repository) !=
os.path.abspath(CONF.fernet_receipts.key_repository)):
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
elif(CONF.fernet_tokens.max_active_keys !=
CONF.fernet_receipts.max_active_keys):
# WARNING(adriant): If the directories are the same,
# 'max_active_keys' is ignored from fernet_receipts in favor of
# fernet_tokens to avoid a potential mismatch. Only if the
# directories are different do we create a different one for
# receipts, and then respect 'max_active_keys' for receipts.
LOG.warning(
"Receipt and Token fernet key directories are the same "
"but `max_active_keys` is different. Receipt "
"`max_active_keys` will be ignored in favor of Token "
"`max_active_keys`."
)
class FernetRotate(BasePermissionsSetup):
"""Rotate Fernet encryption keys.
This assumes you have already run keystone-manage fernet_setup.
A new primary key is placed into rotation, which is used for new tokens.
The old primary key is demoted to secondary, which can then still be used
for validating tokens. Excess secondary keys (beyond [fernet_tokens]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate tokens. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [fernet_tokens] max_active_keys set
too low, will cause tokens to become invalid prior to their expiration.
"""
name = 'fernet_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
if (os.path.abspath(CONF.fernet_tokens.key_repository) !=
os.path.abspath(CONF.fernet_receipts.key_repository)):
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
class CreateJWSKeyPair(BasePermissionsSetup):
"""Create a key pair for signing and validating JWS tokens.
This command creates a public and private key pair to use for signing and
validating JWS token signatures. The key pair is written to the directory
where the command is invoked.
"""
name = 'create_jws_keypair'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(CreateJWSKeyPair, cls).add_argument_parser(subparsers)
parser.add_argument(
'--force', action='store_true',
help=('Forcibly overwrite keys if they already exist')
)
return parser
@classmethod
def main(cls):
current_directory = os.getcwd()
private_key_path = os.path.join(current_directory, 'private.pem')
public_key_path = os.path.join(current_directory, 'public.pem')
if os.path.isfile(private_key_path) and not CONF.command.force:
raise SystemExit(_('Private key %(path)s already exists')
% {'path': private_key_path})
if os.path.isfile(public_key_path) and not CONF.command.force:
raise SystemExit(_('Public key %(path)s already exists')
% {'path': public_key_path})
jwt_utils.create_jws_keypair(private_key_path, public_key_path)
class TokenSetup(BasePermissionsSetup):
"""Setup a key repository for tokens.
This also creates a primary key used for both creating and validating
tokens. To improve security, you should rotate your keys (using
keystone-manage token_rotate, for example).
"""
name = 'token_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
class TokenRotate(BasePermissionsSetup):
"""Rotate token encryption keys.
This assumes you have already run keystone-manage token_setup.
A new primary key is placed into rotation, which is used for new tokens.
The old primary key is demoted to secondary, which can then still be used
for validating tokens. Excess secondary keys (beyond [token]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate tokens. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [token] max_active_keys set
too low, will cause tokens to become invalid prior to their expiration.
"""
name = 'token_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_tokens')
class ReceiptSetup(BasePermissionsSetup):
"""Setup a key repository for auth receipts.
This also creates a primary key used for both creating and validating
receipts. To improve security, you should rotate your keys (using
keystone-manage receipt_rotate, for example).
"""
name = 'receipt_setup'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.initialize_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
class ReceiptRotate(BasePermissionsSetup):
"""Rotate auth receipts encryption keys.
This assumes you have already run keystone-manage receipt_setup.
A new primary key is placed into rotation, which is used for new receipts.
The old primary key is demoted to secondary, which can then still be used
for validating receipts. Excess secondary keys (beyond [receipt]
max_active_keys) are revoked. Revoked keys are permanently deleted. A new
staged key will be created and used to validate receipts. The next time key
rotation takes place, the staged key will be put into rotation as the
primary key.
Rotating keys too frequently, or with [receipt] max_active_keys set
too low, will cause receipts to become invalid prior to their expiration.
"""
name = 'receipt_rotate'
@classmethod
def main(cls):
keystone_user_id, keystone_group_id = cls.get_user_group()
cls.rotate_fernet_repository(
keystone_user_id, keystone_group_id, 'fernet_receipts')
class CredentialSetup(BasePermissionsSetup):
"""Setup a Fernet key repository for credential encryption.
The purpose of this command is very similar to `keystone-manage
fernet_setup` only the keys included in this repository are for encrypting
and decrypting credential secrets instead of token payloads. Keys can be
rotated using `keystone-manage credential_rotate`.
"""
name = 'credential_setup'
@classmethod
def main(cls):
futils = fernet_utils.FernetUtils(
CONF.credential.key_repository,
credential_fernet.MAX_ACTIVE_KEYS,
'credential'
)
keystone_user_id, keystone_group_id = cls.get_user_group()
futils.create_key_directory(keystone_user_id, keystone_group_id)
if futils.validate_key_repository(requires_write=True):
futils.initialize_key_repository(
keystone_user_id,
keystone_group_id
)
class CredentialRotate(BasePermissionsSetup):
"""Rotate Fernet encryption keys for credential encryption.
This assumes you have already run `keystone-manage credential_setup`.
A new primary key is placed into rotation only if all credentials are
encrypted with the current primary key. If any credentials are encrypted
with a secondary key the rotation will abort. This protects against
removing a key that is still required to decrypt credentials. Once a key is
removed from the repository, it is impossible to recover the original data
without restoring from a backup external to keystone (more on backups
below). To make sure all credentials are encrypted with the latest primary
key, please see the `keystone-manage credential_migrate` command. Since the
maximum number of keys in the credential repository is 3, once all
credentials are encrypted with the latest primary key we can safely
introduce a new primary key. All credentials will still be decryptable
since they are all encrypted with the only secondary key in the repository.
It is imperitive to understand the importance of backing up keys used to
encrypt credentials. In the event keys are overrotated, applying a key
repository from backup can help recover otherwise useless credentials.
Persisting snapshots of the key repository in secure and encrypted source
control, or a dedicated key management system are good examples of
encryption key backups.
The `keystone-manage credential_rotate` and `keystone-manage
credential_migrate` commands are intended to be done in sequence. After
performing a rotation, a migration must be done before performing another
rotation. This ensures we don't over-rotate encryption keys.
"""
name = 'credential_rotate'
def __init__(self):
drivers = backends.load_backends()
self.credential_provider_api = drivers['credential_provider_api']
self.credential_api = drivers['credential_api']
def validate_primary_key(self):
crypto, keys = credential_fernet.get_multi_fernet_keys()
primary_key_hash = credential_fernet.primary_key_hash(keys)
credentials = self.credential_api.driver.list_credentials(
driver_hints.Hints()
)
for credential in credentials:
if credential['key_hash'] != primary_key_hash:
msg = _('Unable to rotate credential keys because not all '
'credentials are encrypted with the primary key. '
'Please make sure all credentials have been encrypted '
'with the primary key using `keystone-manage '
'credential_migrate`.')
raise SystemExit(msg)
@classmethod
def main(cls):
futils = fernet_utils.FernetUtils(
CONF.credential.key_repository,
credential_fernet.MAX_ACTIVE_KEYS,
'credential'
)
keystone_user_id, keystone_group_id = cls.get_user_group()
if futils.validate_key_repository(requires_write=True):
klass = cls()
klass.validate_primary_key()
futils.rotate_keys(keystone_user_id, keystone_group_id)
class CredentialMigrate(BasePermissionsSetup):
"""Provides the ability to encrypt credentials using a new primary key.
This assumes that there is already a credential key repository in place and
that the database backend has been upgraded to at least the Newton schema.
If the credential repository doesn't exist yet, you can use
``keystone-manage credential_setup`` to create one.
"""
name = 'credential_migrate'
def __init__(self):
drivers = backends.load_backends()
self.credential_provider_api = drivers['credential_provider_api']
self.credential_api = drivers['credential_api']
def migrate_credentials(self):
crypto, keys = credential_fernet.get_multi_fernet_keys()
primary_key_hash = credential_fernet.primary_key_hash(keys)
# FIXME(lbragstad): We *should* be able to use Hints() to ask only for
# credentials that have a key_hash equal to a secondary key hash or
# None, but Hints() doesn't seem to honor None values. See
# https://bugs.launchpad.net/keystone/+bug/1614154. As a workaround -
# we have to ask for *all* credentials and filter them ourselves.
credentials = self.credential_api.driver.list_credentials(
driver_hints.Hints()
)
for credential in credentials:
if credential['key_hash'] != primary_key_hash:
# If the key_hash isn't None but doesn't match the
# primary_key_hash, then we know the credential was encrypted
# with a secondary key. Let's decrypt it, and send it through
# the update path to re-encrypt it with the new primary key.
decrypted_blob = self.credential_provider_api.decrypt(
credential['encrypted_blob']
)
cred = {'blob': decrypted_blob}
self.credential_api.update_credential(
credential['id'],
cred
)
@classmethod
def main(cls):
# Check to make sure we have a repository that works...
futils = fernet_utils.FernetUtils(
CONF.credential.key_repository,
credential_fernet.MAX_ACTIVE_KEYS,
'credential'
)
futils.validate_key_repository(requires_write=True)
klass = cls()
klass.migrate_credentials()
class TrustFlush(BaseApp):
"""Flush expired and non-expired soft deleted trusts from the backend."""
name = 'trust_flush'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(TrustFlush, cls).add_argument_parser(subparsers)
parser.add_argument('--project-id', default=None,
help=('The id of the project of which the '
'expired or non-expired soft-deleted '
'trusts is to be purged'))
parser.add_argument('--trustor-user-id', default=None,
help=('The id of the trustor of which the '
'expired or non-expired soft-deleted '
'trusts is to be purged'))
parser.add_argument('--trustee-user-id', default=None,
help=('The id of the trustee of which the '
'expired or non-expired soft-deleted '
'trusts is to be purged'))
parser.add_argument('--date', default=datetime.datetime.utcnow(),
help=('The date of which the expired or '
'non-expired soft-deleted trusts older '
'than that will be purged. The format of '
'the date to be "DD-MM-YYYY". If no date '
'is supplied keystone-manage will use the '
'system clock time at runtime'))
return parser
@classmethod
def main(cls):
drivers = backends.load_backends()
trust_manager = drivers['trust_api']
if CONF.command.date:
if not isinstance(CONF.command.date, datetime.datetime):
try:
CONF.command.date = datetime.datetime.strptime(
CONF.command.date, '%d-%m-%Y')
except KeyError:
raise ValueError("'%s'Invalid input for date, should be "
"DD-MM-YYYY", CONF.command.date)
else:
LOG.info("No date is supplied, keystone-manage will use the "
"system clock time at runtime ")
trust_manager.flush_expired_and_soft_deleted_trusts(
project_id=CONF.command.project_id,
trustor_user_id=CONF.command.trustor_user_id,
trustee_user_id=CONF.command.trustee_user_id,
date=CONF.command.date
)
class MappingPurge(BaseApp):
"""Purge the mapping table."""
name = 'mapping_purge'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPurge, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help=('Purge all mappings.'))
parser.add_argument('--domain-name', default=None,
help=('Purge any mappings for the domain '
'specified.'))
parser.add_argument('--public-id', default=None,
help=('Purge the mapping for the Public ID '
'specified.'))
parser.add_argument('--local-id', default=None,
help=('Purge the mappings for the Local ID '
'specified.'))
parser.add_argument('--type', default=None, choices=['user', 'group'],
help=('Purge any mappings for the type '
'specified.'))
return parser
@staticmethod
def main():
def validate_options():
# NOTE(henry-nash): It would be nice to use the argparse automated
# checking for this validation, but the only way I can see doing
# that is to make the default (i.e. if no optional parameters
# are specified) to purge all mappings - and that sounds too
# dangerous as a default. So we use it in a slightly
# unconventional way, where all parameters are optional, but you
# must specify at least one.
if (CONF.command.all is False and
CONF.command.domain_name is None and
CONF.command.public_id is None and
CONF.command.local_id is None and
CONF.command.type is None):
raise ValueError(_('At least one option must be provided'))
if (CONF.command.all is True and
(CONF.command.domain_name is not None or
CONF.command.public_id is not None or
CONF.command.local_id is not None or
CONF.command.type is not None)):
raise ValueError(_('--all option cannot be mixed with '
'other options'))
def get_domain_id(name):
try:
return resource_manager.get_domain_by_name(name)['id']
except KeyError:
raise ValueError(_("Unknown domain '%(name)s' specified by "
"--domain-name") % {'name': name})
validate_options()
drivers = backends.load_backends()
resource_manager = drivers['resource_api']
mapping_manager = drivers['id_mapping_api']
# Now that we have validated the options, we know that at least one
# option has been specified, and if it was the --all option then this
# was the only option specified.
#
# The mapping dict is used to filter which mappings are purged, so
# leaving it empty means purge them all
mapping = {}
if CONF.command.domain_name is not None:
mapping['domain_id'] = get_domain_id(CONF.command.domain_name)
if CONF.command.public_id is not None:
mapping['public_id'] = CONF.command.public_id
if CONF.command.local_id is not None:
mapping['local_id'] = CONF.command.local_id
if CONF.command.type is not None:
mapping['entity_type'] = CONF.command.type
mapping_manager.purge_mappings(mapping)
DOMAIN_CONF_FHEAD = 'keystone.'
DOMAIN_CONF_FTAIL = '.conf'
def _domain_config_finder(conf_dir):
"""Return a generator of all domain config files found in a directory.
Domain configs match the filename pattern of
'keystone.<domain_name>.conf'.
:returns: generator yielding (filename, domain_name) tuples
"""
LOG.info('Scanning %r for domain config files', conf_dir)
for r, d, f in os.walk(conf_dir):
for fname in f:
if (fname.startswith(DOMAIN_CONF_FHEAD) and
fname.endswith(DOMAIN_CONF_FTAIL)):
if fname.count('.') >= 2:
domain_name = fname[len(DOMAIN_CONF_FHEAD):
-len(DOMAIN_CONF_FTAIL)]
yield (os.path.join(r, fname), domain_name)
continue
LOG.warning('Ignoring file (%s) while scanning '
'domain config directory', fname)
class DomainConfigUploadFiles(object):
def __init__(self, domain_config_finder=_domain_config_finder):
super(DomainConfigUploadFiles, self).__init__()
self.load_backends()
self._domain_config_finder = domain_config_finder
def load_backends(self):
drivers = backends.load_backends()
self.resource_manager = drivers['resource_api']
self.domain_config_manager = drivers['domain_config_api']
def valid_options(self):
"""Validate the options, returning True if they are indeed valid.
It would be nice to use the argparse automated checking for this
validation, but the only way I can see doing that is to make the
default (i.e. if no optional parameters are specified) to upload
all configuration files - and that sounds too dangerous as a
default. So we use it in a slightly unconventional way, where all
parameters are optional, but you must specify at least one.
"""
if (CONF.command.all is False and
CONF.command.domain_name is None):
print(_('At least one option must be provided, use either '
'--all or --domain-name'))
return False
if (CONF.command.all is True and
CONF.command.domain_name is not None):
print(_('The --all option cannot be used with '
'the --domain-name option'))
return False
return True
def _upload_config_to_database(self, file_name, domain_name):
"""Upload a single config file to the database.
:param file_name: the file containing the config options
:param domain_name: the domain name
:returns: a boolean indicating if the upload succeeded
"""
try:
domain_ref = (
self.resource_manager.get_domain_by_name(domain_name))
except exception.DomainNotFound:
print(_('Invalid domain name: %(domain)s found in config file '
'name: %(file)s - ignoring this file.') % {
'domain': domain_name,
'file': file_name})
return False
if self.domain_config_manager.get_config_with_sensitive_info(
domain_ref['id']):
print(_('Domain: %(domain)s already has a configuration '
'defined - ignoring file: %(file)s.') % {
'domain': domain_name,
'file': file_name})
return False
sections = {}
try:
parser = cfg.ConfigParser(file_name, sections)
parser.parse()
except Exception:
# We explicitly don't try and differentiate the error cases, in
# order to keep the code in this tool more robust as oslo.config
# changes.
print(_('Error parsing configuration file for domain: %(domain)s, '
'file: %(file)s.') % {
'domain': domain_name,
'file': file_name})
return False
try:
for group in sections:
for option in sections[group]:
sections[group][option] = sections[group][option][0]
self.domain_config_manager.create_config(domain_ref['id'],
sections)
return True
except Exception as e:
msg = ('Error processing config file for domain: '
'%(domain_name)s, file: %(filename)s, error: %(error)s')
LOG.error(msg,
{'domain_name': domain_name,
'filename': file_name,
'error': e},
exc_info=True)
return False
def read_domain_configs_from_files(self):
"""Read configs from file(s) and load into database.
The command line parameters have already been parsed and the CONF
command option will have been set. It is either set to the name of an
explicit domain, or it's None to indicate that we want all domain
config files.
"""
domain_name = CONF.command.domain_name
conf_dir = CONF.identity.domain_config_dir
if not os.path.exists(conf_dir):
print(_('Unable to locate domain config directory: %s') % conf_dir)
raise ValueError
if domain_name:
# Request is to upload the configs for just one domain
fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL
if not self._upload_config_to_database(
os.path.join(conf_dir, fname), domain_name):
return False
return True
success_cnt = 0
failure_cnt = 0
for filename, domain_name in self._domain_config_finder(conf_dir):
if self._upload_config_to_database(filename, domain_name):
success_cnt += 1
LOG.info('Successfully uploaded domain config %r',
filename)
else:
failure_cnt += 1
if success_cnt == 0:
LOG.warning('No domain configs uploaded from %r', conf_dir)
if failure_cnt:
return False
return True
def run(self):
# First off, let's just check we can talk to the domain database
try:
self.resource_manager.list_domains(driver_hints.Hints())
except Exception:
# It is likely that there is some SQL or other backend error
# related to set up
print(_('Unable to access the keystone database, please check it '
'is configured correctly.'))
raise
if not self.valid_options():
return 1
if not self.read_domain_configs_from_files():
return 1
class DomainConfigUpload(BaseApp):
"""Upload the domain specific configuration files to the database."""
name = 'domain_config_upload'
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(DomainConfigUpload, cls).add_argument_parser(subparsers)
parser.add_argument('--all', default=False, action='store_true',
help='Upload contents of all domain specific '
'configuration files. Either use this option '
'or use the --domain-name option to choose a '
'specific domain.')
parser.add_argument('--domain-name', default=None,
help='Upload contents of the specific '
'configuration file for the given domain. '
'Either use this option or use the --all '
'option to upload contents for all domains.')
return parser
@staticmethod
def main():
dcu = DomainConfigUploadFiles()
status = dcu.run()
if status is not None:
sys.exit(status)
class SamlIdentityProviderMetadata(BaseApp):
"""Generate Identity Provider metadata."""
name = 'saml_idp_metadata'
@staticmethod
def main():
metadata = idp.MetadataGenerator().generate_metadata()
print(metadata)
class MappingEngineTester(BaseApp):
"""Execute mapping engine locally."""
name = 'mapping_engine'
def __init__(self):
super(MappingEngineTester, self).__init__()
self.mapping_id = uuid.uuid4().hex
self.rules_pathname = None
self.rules = None
self.assertion_pathname = None
self.assertion = None
def read_rules(self, path):
self.rules_pathname = path
try:
with open(path, "rb") as file:
self.rules = jsonutils.load(file)
except ValueError as e:
raise SystemExit(_('Error while parsing rules '
'%(path)s: %(err)s') % {'path': path, 'err': e})
def read_assertion(self, path):
self.assertion_pathname = path
try:
with open(path) as file:
self.assertion = file.read().strip()
except IOError as e:
raise SystemExit(_("Error while opening file "
"%(path)s: %(err)s") % {'path': path, 'err': e})
def normalize_assertion(self):
def split(line, line_num):
try:
k, v = line.split(':', 1)
return k.strip(), v.strip()
except ValueError:
msg = _("assertion file %(pathname)s at line %(line_num)d "
"expected 'key: value' but found '%(line)s' "
"see help for file format")
raise SystemExit(msg % {'pathname': self.assertion_pathname,
'line_num': line_num,
'line': line})
assertion = self.assertion.splitlines()
assertion_dict = {}
prefix = CONF.command.prefix
for line_num, line in enumerate(assertion, 1):
line = line.strip()
if line == '':
continue
k, v = split(line, line_num)
if prefix:
if k.startswith(prefix):
assertion_dict[k] = v
else:
assertion_dict[k] = v
self.assertion = assertion_dict
def normalize_rules(self):
if isinstance(self.rules, list):
self.rules = {'rules': self.rules}
@classmethod
def main(cls):
if CONF.command.engine_debug:
mapping_engine.LOG.logger.setLevel('DEBUG')
else:
mapping_engine.LOG.logger.setLevel('WARN')
tester = cls()
tester.read_rules(CONF.command.rules)
tester.normalize_rules()
mapping_engine.validate_mapping_structure(tester.rules)
tester.read_assertion(CONF.command.input)
tester.normalize_assertion()
if CONF.command.engine_debug:
print("Using Rules:\n%s" % (
jsonutils.dumps(tester.rules, indent=2)))
print("Using Assertion:\n%s" % (
jsonutils.dumps(tester.assertion, indent=2)))
rp = mapping_engine.RuleProcessor(tester.mapping_id,
tester.rules['rules'])
mapped = rp.process(tester.assertion)
print(jsonutils.dumps(mapped, indent=2))
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingEngineTester,
cls).add_argument_parser(subparsers)
parser.formatter_class = argparse.RawTextHelpFormatter
parser.add_argument('--rules', default=None, required=True,
help=("Path to the file with "
"rules to be executed. "
"Content must be\na proper JSON structure, "
"with a top-level key 'rules' and\n"
"corresponding value being a list."))
parser.add_argument('--input', default=None, required=True,
help=("Path to the file with input attributes. "
"The content\nconsists of ':' separated "
"parameter names and their values.\nThere "
"is only one key-value pair per line. "
"A ';' in the\nvalue is a separator and "
"then a value is treated as a list.\n"
"Example:\n"
"\tEMAIL: me@example.com\n"
"\tLOGIN: me\n"
"\tGROUPS: group1;group2;group3"))
parser.add_argument('--prefix', default=None,
help=("A prefix used for each environment "
"variable in the\nassertion. For example, "
"all environment variables may have\nthe "
"prefix ASDF_."))
parser.add_argument('--engine-debug',
default=False, action="store_true",
help=("Enable debug messages from the mapping "
"engine."))
class MappingPopulate(BaseApp):
"""Pre-populate entries from domain-specific backends.
Running this command is not required. It should only be run right after
the LDAP was configured, when many new users were added, or when
"mapping_purge" is run.
This command will take a while to run. It is perfectly fine for it to run
more than several minutes.
"""
name = "mapping_populate"
@classmethod
def load_backends(cls):
drivers = backends.load_backends()
cls.identity_api = drivers['identity_api']
cls.resource_api = drivers['resource_api']
@classmethod
def add_argument_parser(cls, subparsers):
parser = super(MappingPopulate, cls).add_argument_parser(
subparsers)
parser.add_argument('--domain-name', default=None, required=True,
help=("Name of the domain configured to use "
"domain-specific backend"))
return parser
@classmethod
def main(cls):
"""Process entries for id_mapping_api."""
cls.load_backends()
domain_name = CONF.command.domain_name
try:
domain_id = cls.resource_api.get_domain_by_name(domain_name)['id']
except exception.DomainNotFound:
print(_('Invalid domain name: %(domain)s') % {
'domain': domain_name})
return False
# We don't actually need to tackle id_mapping_api in order to get
# entries there, because list_users does this anyway. That's why it
# will be enough to just make the call below.
cls.identity_api.list_users(domain_scope=domain_id)
CMDS = [
BootStrap,
CredentialMigrate,
CredentialRotate,
CredentialSetup,
DbSync,
DbVersion,
Doctor,
DomainConfigUpload,
FernetRotate,
FernetSetup,
CreateJWSKeyPair,
MappingPopulate,
MappingPurge,
MappingEngineTester,
ReceiptRotate,
ReceiptSetup,
SamlIdentityProviderMetadata,
TokenRotate,
TokenSetup,
TrustFlush
]
def add_command_parsers(subparsers):
for cmd in CMDS:
cmd.add_argument_parser(subparsers)
command_opt = cfg.SubCommandOpt('command',
title='Commands',
help='Available commands',
handler=add_command_parsers)
def main(argv=None, developer_config_file=None):
"""Main entry point into the keystone-manage CLI utility.
:param argv: Arguments supplied via the command line using the ``sys``
standard library.
:type argv: list
:param developer_config_file: The location of a configuration file normally
found in development environments.
:type developer_config_file: string
"""
CONF.register_cli_opt(command_opt)
keystone.conf.configure()
sql.initialize()
keystone.conf.set_default_for_default_log_levels()
user_supplied_config_file = False
if argv:
for argument in argv:
if argument == '--config-file':
user_supplied_config_file = True
if developer_config_file:
developer_config_file = [developer_config_file]
# NOTE(lbragstad): At this point in processing, the first element of argv
# is the binary location of keystone-manage, which oslo.config doesn't need
# and is keystone specific. Only pass a list of arguments so that
# oslo.config can determine configuration file locations based on user
# provided arguments, if present.
CONF(args=argv[1:],
project='keystone',
version=pbr.version.VersionInfo('keystone').version_string(),
usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']',
default_config_files=developer_config_file)
if not CONF.default_config_files and not user_supplied_config_file:
LOG.warning('Config file not found, using default configs.')
keystone.conf.setup_logging()
CONF.command.cmd_class.main()
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
from contextlib import contextmanager
import pytest
import simplejson
from pyramid.httpexceptions import exception_response
from webtest.utils import NoDefault
from pyramid_swagger import exceptions
def build_test_app(swagger_versions, **overrides):
"""Fixture for setting up a test test_app with particular settings."""
from tests.acceptance.app import main
from webtest import TestApp as App
settings = dict({
'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/',
'pyramid_swagger.enable_request_validation': True,
'pyramid_swagger.enable_response_validation': False,
'pyramid_swagger.enable_swagger_spec_validation': False,
'pyramid_swagger.swagger_versions': swagger_versions},
**overrides
)
return App(main({}, **settings))
# Parameterize pyramid_swagger.swagger_versions
@pytest.fixture(
params=[['1.2'], ['2.0'], ['1.2', '2.0']],
ids=['1.2', '2.0', '1.2-2.0'],
)
def test_app(request):
"""Fixture for setting up a test test_app with particular settings."""
return build_test_app(
swagger_versions=request.param,
)
@contextmanager
def validation_context(request, response=None):
try:
yield
except (
exceptions.RequestValidationError,
exceptions.ResponseValidationError,
exceptions.PathNotFoundError,
):
raise exception_response(206)
except Exception:
raise exception_response(400)
validation_ctx_path = 'tests.acceptance.request_test.validation_context'
def test_echo_date_with_pyramid_swagger_renderer(test_app):
input_object = {'date': datetime.date.today().isoformat()}
response = test_app.post_json('/echo_date', input_object)
# If the request is served via Swagger1.2
assert response.status_code == 200
assert response.json == input_object
def test_echo_date_with_json_renderer(test_app):
today = datetime.date.today()
input_object = {'date': today.isoformat()}
exc = None
response = None
try:
response = test_app.post_json('/echo_date_json_renderer', input_object)
except TypeError as exception:
exc = exception
served_swagger_versions = test_app.app.registry.settings['pyramid_swagger.swagger_versions']
if '2.0' in served_swagger_versions:
# If the request is served via Swagger2.0, pyramid_swagger will perform types
# conversions providing a datetime.date object in the pyramid view
assert exc.args == ('{!r} is not JSON serializable'.format(today), )
else:
# If the request is served via Swagger1.2 there are no implicit type conversions performed by pyramid_swagger
assert response.status_code == 200
assert response.json == input_object
@pytest.mark.parametrize(
'body, expected_length',
[
[NoDefault, 0],
[{}, 2],
],
)
def test_post_endpoint_with_optional_body(test_app, body, expected_length):
assert test_app.post_json('/post_endpoint_with_optional_body', body).json == expected_length
def test_200_with_form_params(test_app):
assert test_app.post(
'/post_with_form_params',
{'form_param': 42},
).status_code == 200
def test_200_with_file_upload(test_app):
assert test_app.post(
'/post_with_file_upload',
upload_files=[('photo_file', 'photo.jpg', b'<binary data goes here>')],
).status_code == 200
def test_400_with_form_params_wrong_type(test_app):
assert test_app.post(
'/post_with_form_params',
{'form_param': "not a number"},
expect_errors=True,
).status_code == 400
def test_400_if_json_body_for_form_parms(test_app):
assert test_app.post_json(
'/post_with_form_params',
{'form_param': 42},
expect_errors=True,
).status_code == 400
def test_400_if_required_query_args_absent(test_app):
assert test_app.get(
'/sample/path_arg1/resource',
expect_errors=True,
).status_code == 400
def test_200_if_optional_query_args_absent(test_app):
assert test_app.get(
'/sample/path_arg1/resource',
params={'required_arg': 'test'}, # no `optional_arg` arg
).status_code == 200
def test_200_if_request_arg_is_wrong_type(test_app):
assert test_app.get(
'/sample/path_arg1/resource',
params={'required_arg': 1.0},
).status_code == 200
def test_200_if_request_arg_types_are_not_strings(test_app):
assert test_app.get(
'/get_with_non_string_query_args',
params={
'int_arg': '5',
'float_arg': '3.14',
'boolean_arg': 'true',
},
).status_code == 200
def test_404_if_path_not_in_swagger(test_app):
assert test_app.get(
'/undefined/path',
expect_errors=True,
).status_code == 404
def test_200_skip_validation_with_excluded_path():
app = build_test_app(
swagger_versions=['2.0'],
**{'pyramid_swagger.exclude_paths': [r'^/undefined/path']}
)
assert app.get('/undefined/path').status_code == 200
def test_400_if_request_arg_is_wrong_type_but_not_castable(test_app):
assert test_app.get(
'/get_with_non_string_query_args',
params={'float_arg': 'foobar'},
expect_errors=True,
).status_code == 400
def test_400_if_path_arg_not_valid_enum(test_app):
assert test_app.get(
'/sample/invalid_arg/resource',
params={'required_arg': 'test'},
expect_errors=True,
).status_code == 400
def test_200_if_path_arg_is_wrong_type_but_castable(test_app):
assert test_app.get(
'/sample/nonstring/3/1.4/false',
).status_code == 200
def test_400_if_required_body_is_missing(test_app):
assert test_app.post_json(
'/sample',
{},
expect_errors=True,
).status_code == 400
def test_200_on_json_body_without_contenttype_header(test_app):
"""See https://github.com/striglia/pyramid_swagger/issues/49."""
# We use .post to avoid sending a Content Type of application/json.
assert test_app.post(
'/sample?optional_string=bar',
simplejson.dumps({'foo': 'test'}),
{'Content-Type': ''},
).status_code == 200
def test_400_if_body_has_missing_required_arg(test_app):
assert test_app.post_json(
'/sample',
{'bar': 'test'},
expect_errors=True,
).status_code == 400
def test_200_if_body_has_missing_optional_arg(test_app):
assert test_app.post_json(
'/sample',
{'foo': 'test'},
).status_code == 200
def test_200_if_required_body_is_model(test_app):
assert test_app.post_json(
'/sample',
{'foo': 'test', 'bar': 'test'},
).status_code == 200
def test_200_if_required_body_is_primitives(test_app):
assert test_app.post_json(
'/post_with_primitive_body',
['foo', 'bar'],
).status_code == 200
def test_400_if_extra_body_args(test_app):
assert test_app.post_json(
'/sample',
{'foo': 'test', 'bar': 'test', 'made_up_argument': 1},
expect_errors=True,
).status_code == 400
def test_400_if_extra_query_args(test_app):
assert test_app.get(
'/sample/path_arg1/resource?made_up_argument=1',
expect_errors=True,
).status_code == 400
def test_400_if_missing_required_header(test_app):
assert test_app.get(
'/sample/header',
expect_errors=True,
).status_code == 400
def test_200_with_required_header(test_app):
response = test_app.get(
'/sample/header',
headers={'X-Force': 'True'},
expect_errors=True,
)
assert response.status_code == 200
def test_200_skip_validation_when_disabled():
# calling endpoint with required args missing
overrides = {
'pyramid_swagger.enable_request_validation': False,
'skip_swagger_data_assert': True,
}
app = build_test_app(
swagger_versions=['2.0'],
**overrides
)
response = app.get('/get_with_non_string_query_args', params={})
assert response.status_code == 200
def test_path_validation_context():
app = build_test_app(
swagger_versions=['2.0'],
**{'pyramid_swagger.validation_context_path': validation_ctx_path}
)
assert app.get('/does_not_exist').status_code == 206
def test_request_validation_context():
app = build_test_app(
swagger_versions=['2.0'],
**{'pyramid_swagger.validation_context_path': validation_ctx_path})
response = app.get('/get_with_non_string_query_args', params={})
assert response.status_code == 206
def test_request_to_authenticated_endpoint_without_authentication():
app = build_test_app(swagger_versions=['2.0'])
response = app.get(
'/sample/authentication',
expect_errors=True,
)
assert response.status_code == 401
def test_request_to_endpoint_with_no_response_schema():
app = build_test_app(swagger_versions=['2.0'])
response = app.get('/sample/no_response_schema')
assert response.status_code == 200
| |
# coding=utf-8
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo.config import cfg
from nova import context as nova_context
from nova import exception
from nova import network
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import utils
from nova.virt.baremetal import db as bmdb
from nova.virt.libvirt import utils as libvirt_utils
opts = [
cfg.BoolOpt('use_unsafe_iscsi',
default=False,
help='Do not set this out of dev/test environments. '
'If a node does not have a fixed PXE IP address, '
'volumes are exported with globally opened ACL'),
cfg.StrOpt('iscsi_iqn_prefix',
default='iqn.2010-10.org.openstack.baremetal',
help='The iSCSI IQN prefix used in baremetal volume '
'connections.'),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('volume_drivers', 'nova.virt.libvirt.driver', group='libvirt')
LOG = logging.getLogger(__name__)
def _get_baremetal_node_by_instance_uuid(instance_uuid):
context = nova_context.get_admin_context()
return bmdb.bm_node_get_by_instance_uuid(context, instance_uuid)
def _create_iscsi_export_tgtadm(path, tid, iqn):
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'new',
'--tid', tid,
'--targetname', iqn,
run_as_root=True)
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'logicalunit',
'--op', 'new',
'--tid', tid,
'--lun', '1',
'--backing-store', path,
run_as_root=True)
def _allow_iscsi_tgtadm(tid, address):
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'bind',
'--tid', tid,
'--initiator-address', address,
run_as_root=True)
def _delete_iscsi_export_tgtadm(tid):
try:
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'logicalunit',
'--op', 'delete',
'--tid', tid,
'--lun', '1',
run_as_root=True)
except processutils.ProcessExecutionError:
pass
try:
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'delete',
'--tid', tid,
run_as_root=True)
except processutils.ProcessExecutionError:
pass
# Check if the tid is deleted, that is, check the tid no longer exists.
# If the tid dose not exist, tgtadm returns with exit_code 22.
# utils.execute() can check the exit_code if check_exit_code parameter is
# passed. But, regardless of whether check_exit_code contains 0 or not,
# if the exit_code is 0, the function dose not report errors. So we have to
# catch a ProcessExecutionError and test its exit_code is 22.
try:
utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'show',
'--tid', tid,
run_as_root=True)
except processutils.ProcessExecutionError as e:
if e.exit_code == 22:
# OK, the tid is deleted
return
raise
raise exception.NovaException(_(
'baremetal driver was unable to delete tid %s') % tid)
def _show_tgtadm():
out, _ = utils.execute('tgtadm', '--lld', 'iscsi',
'--mode', 'target',
'--op', 'show',
run_as_root=True)
return out
def _list_backingstore_path():
out = _show_tgtadm()
l = []
for line in out.split('\n'):
m = re.search(r'Backing store path: (.*)$', line)
if m:
if '/' in m.group(1):
l.append(m.group(1))
return l
def _get_next_tid():
out = _show_tgtadm()
last_tid = 0
for line in out.split('\n'):
m = re.search(r'^Target (\d+):', line)
if m:
tid = int(m.group(1))
if last_tid < tid:
last_tid = tid
return last_tid + 1
def _find_tid(iqn):
out = _show_tgtadm()
pattern = r'^Target (\d+): *' + re.escape(iqn)
for line in out.split('\n'):
m = re.search(pattern, line)
if m:
return int(m.group(1))
return None
def _get_iqn(instance_name, mountpoint):
mp = mountpoint.replace('/', '-').strip('-')
iqn = '%s:%s-%s' % (CONF.baremetal.iscsi_iqn_prefix,
instance_name,
mp)
return iqn
def _get_fixed_ips(instance):
context = nova_context.get_admin_context()
nw_info = network.API().get_instance_nw_info(context, instance)
ips = nw_info.fixed_ips()
return ips
class VolumeDriver(object):
def __init__(self, virtapi):
super(VolumeDriver, self).__init__()
self.virtapi = virtapi
self._initiator = None
def get_volume_connector(self, instance):
if not self._initiator:
self._initiator = libvirt_utils.get_iscsi_initiator()
if not self._initiator:
LOG.warn(_('Could not determine iscsi initiator name'),
instance=instance)
return {
'ip': CONF.my_ip,
'initiator': self._initiator,
'host': CONF.host,
}
def attach_volume(self, connection_info, instance, mountpoint):
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
raise NotImplementedError()
class LibvirtVolumeDriver(VolumeDriver):
"""The VolumeDriver delegates to nova.virt.libvirt.volume."""
def __init__(self, virtapi):
super(LibvirtVolumeDriver, self).__init__(virtapi)
self.volume_drivers = {}
for driver_str in CONF.libvirt.volume_drivers:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
self.volume_drivers[driver_type] = driver_class(self)
def _volume_driver_method(self, method_name, connection_info,
*args, **kwargs):
driver_type = connection_info.get('driver_volume_type')
if driver_type not in self.volume_drivers:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
driver = self.volume_drivers[driver_type]
method = getattr(driver, method_name)
return method(connection_info, *args, **kwargs)
def attach_volume(self, connection_info, instance, mountpoint):
fixed_ips = _get_fixed_ips(instance)
if not fixed_ips:
if not CONF.baremetal.use_unsafe_iscsi:
raise exception.NovaException(_(
'No fixed PXE IP is associated to %s') % instance['uuid'])
mount_device = mountpoint.rpartition("/")[2]
disk_info = {
'dev': mount_device,
'bus': 'baremetal',
'type': 'baremetal',
}
conf = self._connect_volume(connection_info, disk_info)
self._publish_iscsi(instance, mountpoint, fixed_ips,
conf.source_path)
def _connect_volume(self, connection_info, disk_info):
return self._volume_driver_method('connect_volume',
connection_info,
disk_info)
def _publish_iscsi(self, instance, mountpoint, fixed_ips, device_path):
iqn = _get_iqn(instance['name'], mountpoint)
tid = _get_next_tid()
_create_iscsi_export_tgtadm(device_path, tid, iqn)
if fixed_ips:
for ip in fixed_ips:
_allow_iscsi_tgtadm(tid, ip['address'])
else:
# NOTE(NTTdocomo): Since nova-compute does not know the
# instance's initiator ip, it allows any initiators
# to connect to the volume. This means other bare-metal
# instances that are not attached the volume can connect
# to the volume. Do not set CONF.baremetal.use_unsafe_iscsi
# out of dev/test environments.
# TODO(NTTdocomo): support CHAP
_allow_iscsi_tgtadm(tid, 'ALL')
def detach_volume(self, connection_info, instance, mountpoint):
mount_device = mountpoint.rpartition("/")[2]
try:
self._depublish_iscsi(instance, mountpoint)
finally:
self._disconnect_volume(connection_info, mount_device)
def _disconnect_volume(self, connection_info, disk_dev):
return self._volume_driver_method('disconnect_volume',
connection_info,
disk_dev)
def _depublish_iscsi(self, instance, mountpoint):
iqn = _get_iqn(instance['name'], mountpoint)
tid = _find_tid(iqn)
if tid is not None:
_delete_iscsi_export_tgtadm(tid)
else:
LOG.warn(_('detach volume could not find tid for %s'), iqn,
instance=instance)
def get_all_block_devices(self):
"""
Return all block devices in use on this node.
"""
return _list_backingstore_path()
def get_hypervisor_version(self):
"""
A dummy method for LibvirtBaseVolumeDriver.connect_volume.
"""
return 1
| |
#!/usr/bin/env python
"""
This module contains tests for djenerator app.
"""
import datetime
import itertools
import os
import random
import re
import tempfile
import uuid
from decimal import Decimal
from django.conf import settings
from django.db import models
from django.db.models import Model
from django.db.models.fields import BigIntegerField
from django.db.models.fields import BooleanField
from django.db.models.fields import BinaryField
from django.db.models.fields import CharField
from django.db.models.fields import CommaSeparatedIntegerField
from django.db.models.fields import DateField
from django.db.models.fields import DateTimeField
from django.db.models.fields import DecimalField
from django.db.models.fields import DurationField
from django.db.models.fields import EmailField
from django.db.models.fields import FilePathField
from django.db.models.fields import FloatField
from django.db.models.fields import GenericIPAddressField
from django.db.models.fields import IntegerField
from django.db.models.fields import NullBooleanField
from django.db.models.fields import PositiveIntegerField
from django.db.models.fields import PositiveSmallIntegerField
from django.db.models.fields import SmallIntegerField
from django.db.models.fields import SlugField
from django.db.models.fields import TextField
from django.db.models.fields import TimeField
from django.db.models.fields import URLField
from django.db.models.fields import UUIDField
from django.db.models.fields.files import FieldFile
from django.db.models.fields.files import FileField
from django.db.models.fields.files import ImageField
from django.db.models.fields.files import ImageFieldFile
from django.test import TestCase
from djenerator.djenerator import djenerator
from djenerator.fields_generator import generate_random_values
from djenerator.generate_test_data import create_model
from djenerator.generate_test_data import dependencies
from djenerator.generate_test_data import dfs
from djenerator.generate_test_data import field_sample_values
from djenerator.generate_test_data import generate_model
from djenerator.generate_test_data import generate_test_data
from djenerator.generate_test_data import recompute
from djenerator.generate_test_data import topological_sort
from djenerator.model_reader import field_type
from djenerator.model_reader import is_auto_field
from djenerator.model_reader import is_instance_of_django_model
from djenerator.model_reader import is_related
from djenerator.model_reader import is_required
from djenerator.model_reader import is_reverse_related
from djenerator.model_reader import list_of_fields
from djenerator.model_reader import list_of_models
from djenerator.model_reader import module_import
from djenerator.model_reader import names_of_fields
from djenerator.model_reader import relation_type
from djenerator.values_generator import generate_big_integer
from djenerator.values_generator import generate_boolean
from djenerator.values_generator import generate_comma_separated_int
from djenerator.values_generator import generate_date
from djenerator.values_generator import generate_date_time
from djenerator.values_generator import generate_decimal
from djenerator.values_generator import generate_email
from djenerator.values_generator import generate_file_name
from djenerator.values_generator import generate_file_path
from djenerator.values_generator import generate_int
from djenerator.values_generator import generate_integer
from djenerator.values_generator import generate_ip
from djenerator.values_generator import generate_png
from djenerator.values_generator import generate_positive_integer
from djenerator.values_generator import generate_positive_small_integer
from djenerator.values_generator import generate_small_integer
from djenerator.values_generator import generate_sentence
from djenerator.values_generator import generate_string
from djenerator.values_generator import generate_text
from djenerator.values_generator import generate_time
from djenerator.values_generator import generate_url
from djenerator.utility import sort_unique_tuple
from djenerator.utility import sort_unique_tuples
from djenerator.utility import unique_items
import models as mdls
from models import AllFieldsModel
from models import CycleA
from models import CycleB
from models import CycleC
from models import CycleD
from models import CycleE
from models import CycleF
from models import ExtendAbstract
from models import ExtendSuperClass
from models import ExtendingModel
from models import NotExtendingModel
from models import ProxyExtend
from models import SuperAbstract
from models import SuperClass
from models import TestModel0
from models import TestModel1
from models import TestModelA
from models import TestModelB
from models import TestModelC
from models import TestModelD
from models import TestModelE
from models import TestModelFields
from models import TestModelFieldsTwo
from models import TestModelX
from models import TestModelY
class TestFieldToRandomGeneratorMatcher(TestCase):
def test(self):
fields = list_of_fields(AllFieldsModel)
present_types = list(map(lambda field: field.__class__, fields))
field_types = [BigIntegerField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField,
DecimalField, DurationField, EmailField, FloatField,
GenericIPAddressField, IntegerField, NullBooleanField,
PositiveIntegerField, PositiveSmallIntegerField,
SmallIntegerField, TextField, TimeField, BinaryField,
FileField, ImageField, FilePathField, SlugField,
URLField, UUIDField]
self.assertFalse(set(field_types) - set(present_types),
"All types should be present." +
str(set(field_types) - set(present_types)))
for field in fields:
if isinstance(field, FileField) or isinstance(field, ImageField):
path = os.path.join(settings.MEDIA_ROOT, field.upload_to)
files = next((filenames for dirpath, dirnames, filenames in
os.walk(path) if dirpath == path), None)
files = list(map(lambda file: os.path.join(path, file), files))
sample_siz = 10
values = generate_random_values(field, sample_siz)
self.assertLessEqual(len(values), sample_siz)
self.assertGreaterEqual(len(values), 1)
for val in values:
if isinstance(field, IntegerField):
self.assertTrue(isinstance(val, int), val)
if isinstance(field, EmailField):
self.assertTrue(isinstance(val, str), val)
email_reg = r'^\w+(?:\.\w+)*@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$'
self.assertRegexpMatches(val, email_reg, val)
if isinstance(field, BooleanField):
self.assertTrue(isinstance(val, bool), val)
if isinstance(field, NullBooleanField):
self.assertTrue(isinstance(val, bool) or val is None, val)
if isinstance(field, CharField):
self.assertTrue(isinstance(val, str), val)
self.assertLessEqual(len(val), field.max_length, val)
if isinstance(field, CommaSeparatedIntegerField):
self.assertTrue(isinstance(val, str), val)
comma_sep_int_re = r'^\d{1,3}(?:,\d{3})*$'
self.assertRegexpMatches(val, comma_sep_int_re, val)
if isinstance(field, DateField):
self.assertTrue(isinstance(val, datetime.date), val)
if isinstance(field, DateTimeField):
self.assertTrue(isinstance(val, datetime.datetime), val)
if isinstance(field, DecimalField):
self.assertTrue(isinstance(val, Decimal), val)
if isinstance(field, FloatField):
self.assertTrue(isinstance(val, float), val)
if isinstance(field, GenericIPAddressField):
self.assertTrue(isinstance(val, str), val)
ip_regex = r'^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$'
self.assertRegexpMatches(val, ip_regex, val)
if isinstance(field, PositiveIntegerField):
self.assertTrue(isinstance(val, int), val)
self.assertLessEqual(val, 2147483647, val)
self.assertGreaterEqual(val, 0, val)
if isinstance(field, PositiveSmallIntegerField):
self.assertTrue(isinstance(val, int), val)
self.assertLessEqual(val, 32767, val)
self.assertGreaterEqual(val, 0, val)
if isinstance(field, SmallIntegerField):
self.assertTrue(isinstance(val, int), val)
self.assertLessEqual(val, 32767, val)
self.assertGreaterEqual(val, -32768, val)
if isinstance(field, TimeField):
self.assertTrue(isinstance(val, datetime.time), val)
if isinstance(field, TextField):
self.assertTrue(isinstance(val, str), val)
self.assertLessEqual(len(val), field.max_length)
text_re = r'^(?:(?:\w+\s?)+\.\s?)+$'
self.assertRegexpMatches(val, text_re, val)
if isinstance(field, DurationField):
self.assertTrue(isinstance(val, datetime.timedelta), val)
if isinstance(field, SlugField):
self.assertTrue(isinstance(val, str), val)
slug_re = r'^[a-zA-Z0-9_\-]+$'
self.assertRegexpMatches(val, slug_re, val)
if isinstance(field, URLField):
url_re = r'^(?:http|ftp|https)://(?:[a-z0-9_\-]+\.?)+/?'
url_re += r'(?:/[a-z0-9_\-]+)*/?$'
self.assertTrue(isinstance(val, str), val)
self.assertRegexpMatches(val, url_re, val)
if isinstance(field, UUIDField):
self.assertTrue(isinstance(val, uuid.UUID), val)
if isinstance(field, FilePathField):
self.assertTrue(isinstance(val, str), val)
self.assertTrue(os.path.exists(val), val)
if isinstance(field, ImageField):
beg = b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
full_path = os.path.join(settings.MEDIA_ROOT, val.name)
img = val.file.read()
self.assertTrue(full_path not in files,
val.name + " shouldnt't already exist")
self.assertTrue(os.path.exists(full_path))
self.assertTrue(isinstance(val, ImageFieldFile), val)
self.assertTrue(val.name.startswith(field.upload_to))
self.assertTrue(isinstance(val.field, ImageField))
self.assertTrue(isinstance(img, str))
self.assertTrue(img.startswith(beg))
self.assertTrue(val.name.endswith('.png'))
if isinstance(field, FileField):
content = val.file.read()
full_path = os.path.join(settings.MEDIA_ROOT, val.name)
self.assertTrue(isinstance(val, FieldFile), val)
self.assertTrue(isinstance(val.field, FileField))
self.assertTrue(full_path not in files,
val.name + " shouldnt't already exist")
self.assertTrue(os.path.exists(full_path))
self.assertTrue(val.name.startswith(field.upload_to))
self.assertTrue(isinstance(content, str))
if not isinstance(field, ImageField):
text_re = r'^(?:(?:\w+\s?)+\.\s?)+$'
self.assertRegexpMatches(content, text_re, content)
self.assertTrue(val.name.endswith('.txt'))
if isinstance(field, BinaryField):
self.assertTrue(isinstance(val, buffer), val)
class TestInstanceOfDjangoModel(TestCase):
def test(self):
models = [TestModel0, TestModel1, TestModelA, TestModelB,
TestModelC, TestModelD, TestModelE, TestModelX,
TestModelY, ExtendingModel]
for model in models:
self.assertTrue(is_instance_of_django_model(model))
self.assertFalse(is_instance_of_django_model(NotExtendingModel))
def not_extending_model_func():
pass
self.assertFalse(is_instance_of_django_model(not_extending_model_func))
class TestListOfModels(TestCase):
def test(self):
self.assertEqual(set([ExtendingModel, TestModel0, TestModel1,
TestModelA, TestModelB, TestModelC, TestModelD,
TestModelE, TestModelX, TestModelY,
TestModelFields, SuperClass, ExtendAbstract,
ExtendSuperClass, ProxyExtend, SuperAbstract,
TestModelFieldsTwo, CycleA, CycleB, CycleC,
CycleD, CycleE, CycleF, AllFieldsModel]),
set(list_of_models(mdls, keep_abstract=True)))
self.assertEqual(set([ExtendingModel, TestModel0, TestModel1,
TestModelA, TestModelB, TestModelC, TestModelD,
TestModelE, TestModelX, TestModelY,
TestModelFields, SuperClass, ExtendAbstract,
ExtendSuperClass, TestModelFieldsTwo,
ProxyExtend, CycleA, CycleB, CycleC, CycleD,
CycleE, CycleF, AllFieldsModel]),
set(list_of_models(mdls)))
class TestListOfFields(TestCase):
def test(self):
self.assertTrue(all(isinstance(*x)
for x in zip(list_of_fields(TestModel1),
[models.AutoField, models.CharField,
models.IntegerField,
models.ForeignKey])))
self.assertTrue(all([isinstance(*x)
for x in zip(list_of_fields(TestModel0),
[models.AutoField,
models.BooleanField,
models.EmailField])]))
self.assertTrue(all([isinstance(*x)
for x in zip(list_of_fields(TestModelE),
[models.AutoField,
models.OneToOneField,
models.ForeignKey,
models.IntegerField,
models.ManyToManyField])]))
class TestNamesOfFields(TestCase):
def test(self):
self.assertEqual(['id', 'field1E', 'field3E', 'field4E', 'field2E'],
names_of_fields(TestModelE))
self.assertEqual(['id', 'field1', 'field2', 'field3'],
names_of_fields(TestModel1))
self.assertEqual(['id', 'field1', 'field2'],
names_of_fields(TestModel0))
class TestFieldType(TestCase):
def test(self):
self.assertEqual(field_type(models.CharField()),
'CharField')
self.assertEqual(field_type(models.IntegerField()),
'IntegerField')
self.assertEqual(field_type(models.EmailField()),
'CharField')
self.assertEqual(field_type(models.BooleanField()),
'BooleanField')
self.assertEqual(field_type(models.ForeignKey(ExtendingModel)),
'ForeignKey')
self.assertEqual(field_type(models.OneToOneField(ExtendingModel)),
'OneToOneField')
self.assertEqual(field_type(models.ManyToManyField(ExtendingModel)),
'ManyToManyField')
class TestIsAutoField(TestCase):
def test(self):
self.assertTrue(is_auto_field(models.AutoField(primary_key=True)))
self.assertFalse(is_auto_field(models.CharField()))
self.assertFalse(is_auto_field(models.BooleanField()))
self.assertFalse(is_auto_field(models.IntegerField()))
self.assertFalse(is_auto_field(models.ForeignKey(ExtendingModel)))
class TestIsRelated(TestCase):
def test(self):
self.assertTrue(is_related(models.ForeignKey))
self.assertTrue(is_related(models.OneToOneField))
self.assertTrue(is_related(models.ManyToManyField))
self.assertFalse(is_related(models.CharField))
self.assertFalse(is_related(models.BooleanField))
self.assertFalse(is_related(models.EmailField))
self.assertFalse(is_related(models.IntegerField))
class TestRelationType(TestCase):
def test(self):
self.assertEqual(relation_type(models.OneToOneField(ExtendingModel)),
'OneToOneRel')
self.assertEqual(relation_type(models.ManyToManyField(ExtendingModel)),
'ManyToManyRel')
self.assertEqual(relation_type(models.ForeignKey(ExtendingModel)),
'ManyToOneRel')
class TestIsRequired(TestCase):
def test(self):
field = models.CharField(max_length=20, null=True)
self.assertFalse(is_required(field))
field = models.IntegerField(null=True)
self.assertFalse(is_required(field))
field = models.IntegerField()
self.assertTrue(is_required(field))
field = models.ForeignKey(ExtendingModel)
self.assertTrue(is_required(field))
field = models.ForeignKey(ExtendingModel, null=True)
self.assertFalse(is_required(field))
class TestModuleImport(TestCase):
def test(self):
self.assertEqual(mdls, module_import('tests.models'))
class TestListOfSampleFieldValues(TestCase):
def test(self):
Y = list_of_fields(TestModelY)
X = list_of_fields(TestModelX)
A = list_of_fields(TestModelA)
B = list_of_fields(TestModelB)
C = list_of_fields(TestModelC)
D = list_of_fields(TestModelD)
E = list_of_fields(TestModelE)
self.assertFalse(field_sample_values(X[0]))
self.assertEqual(field_sample_values(Y[1]), [2, 3, 5, 7, 11, 13])
self.assertEqual(field_sample_values(Y[2]), ['MMa', 'XXa', 'azz'])
self.assertEqual(field_sample_values(X[1]),
[x * x * x for x in range(10)])
self.assertEqual(field_sample_values(E[3]), [1000000009, 1000003, 101])
self.assertEqual(field_sample_values(D[1]),
[x * x * x for x in range(10)])
self.assertEqual(field_sample_values(C[1]),
['Hello I am C', 'MUHAHAHAHAHA', 'CCCC', '^_^'])
self.assertEqual(field_sample_values(B[1]),
['Hello Universe', 'Hello Parallel Universe!'])
self.assertEqual(field_sample_values(A[1]),
['Hello World', 'Hello Africa', 'axxx!!'])
self.assertEqual(field_sample_values(A[2]),
['Hello Second Field', 'field 2'])
a = TestModelX(field1X=12)
b = TestModelX(field1X=15)
a.save()
b.save()
self.assertEqual((field_sample_values(models.ForeignKey(TestModelX))),
([a, b]))
fld = models.ManyToManyField(TestModelX)
self.assertTrue(all([x in [a, b]
for x in field_sample_values(fld)[0]]))
vals = [int(x) for x in field_sample_values(list_of_fields(CycleF)[2])]
self.assertEqual(vals, range(4000, 5000))
class TestCreateModel(TestCase):
def test(self):
kwargsa = {'field1A': 'Hrr', 'field2A': 'HxxA'}
atest = create_model(TestModelA, kwargsa.items())
self.assertEqual(atest, TestModelA.objects.get(**kwargsa))
kwargsa = {'field1B': 'Hello Worrd', 'field2B': atest}
btest = create_model(TestModelB, kwargsa.items())
self.assertEqual(btest, TestModelB.objects.get(**kwargsa))
kwargsa = {'field1C': 'Hello Egypt!!', 'field2C': btest}
ctest = create_model(TestModelC, kwargsa.items())
self.assertEqual(ctest, TestModelC.objects.get(**kwargsa))
kwargsa = {'field1D': 77, 'field2D': TestModelA.objects.all()}
dtest = create_model(TestModelD, kwargsa.items())
self.assertEqual(dtest, TestModelD.objects.get(**kwargsa))
class TestDependencies(TestCase):
def test(self):
self.assertEqual(dependencies(TestModelD), [])
self.assertEqual(set(dependencies(TestModelE)),
set([TestModelB, TestModelC]))
self.assertEqual(dependencies(TestModelC), [TestModelB])
self.assertEqual(dependencies(TestModelB), [TestModelA])
self.assertEqual(dependencies(CycleD), [CycleC])
self.assertFalse(dependencies(CycleC))
self.assertEqual(set(dependencies(TestModelFields)),
set([TestModelY, TestModelX]))
class TestTopologicalSorting(TestCase):
def test(self):
self.assertEqual(topological_sort([ExtendingModel, TestModel1,
TestModel0]),
[ExtendingModel, TestModel0, TestModel1])
self.assertEqual(topological_sort([TestModel1, TestModel0]),
[TestModel0, TestModel1])
self.assertEqual(topological_sort([TestModel0, TestModel1]),
[TestModel0, TestModel1])
def assertions(sorted_list):
self.assertTrue(sorted_list.index(TestModelA) <
sorted_list.index(TestModelB))
self.assertTrue(sorted_list.index(TestModelB) <
sorted_list.index(TestModelC))
self.assertTrue(sorted_list.index(TestModelB) <
sorted_list.index(TestModelE))
self.assertTrue(sorted_list.index(TestModelC) <
sorted_list.index(TestModelE))
self.assertTrue(ExtendingModel in sorted_list)
for perm in itertools.permutations([TestModelA, TestModelB, TestModelD,
TestModelC, TestModelE,
ExtendingModel]):
assertions(topological_sort(list(perm)))
class TestUniqueConstraints(TestCase):
def test(self):
constraint = unique_items(('fieldA', 'fieldD',))
model = TestModelFieldsTwo(fieldA='A', fieldD=5, fieldB=10,
fieldC='Winner', fieldE=True, fieldF=6,
fieldG='Mathematics', fieldH=False)
model.save()
fields = list_of_fields(TestModelFields)
self.assertFalse(constraint([('fieldA', 'A'), ('fieldD', 5)],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A')],
TestModelFields, fields[5]))
self.assertFalse(constraint([('fieldA', 'A'), ('fieldD', 5)],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A'), ('fieldD', 3)],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A')],
TestModelFieldsTwo, fields[5]))
self.assertTrue(constraint([('fieldA', 'A'), ('fieldD', 3)],
TestModelFieldsTwo, fields[5]))
class TestSortTuple(TestCase):
def test(self):
flds = tuple(names_of_fields(TestModelFields))
self.assertEqual(sort_unique_tuple(('fieldA', 'fieldX', 'fieldG',
'fieldD'), TestModelFields),
('fieldA', 'fieldD', 'fieldG', 'fieldX'))
self.assertEqual(sort_unique_tuple(flds[::-1], TestModelFields), flds)
self.assertEqual(sort_unique_tuple(('fieldD', 'fieldH', 'fieldF'),
TestModelFields),
('fieldD', 'fieldF', 'fieldH'))
class TestSortTuples(TestCase):
def test(self):
self.assertEqual(sort_unique_tuples((('fieldA',), ('fieldA', 'fieldD'),
('fieldC', 'fieldX', 'fieldB'),
('fieldC', 'fieldE', 'fieldH'),
('fieldA', 'fieldX', 'fieldC')),
TestModelFields),
(('fieldA',), ('fieldA', 'fieldC', 'fieldX'),
('fieldA', 'fieldD'), ('fieldB', 'fieldC', 'fieldX'),
('fieldC', 'fieldE', 'fieldH')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldD'),
('fieldA', 'fieldE', 'fieldX')),
TestModelFields),
(('fieldA', 'fieldD'),
('fieldA', 'fieldE', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldE', 'fieldX'),
('fieldA', 'fieldD')),
TestModelFields),
(('fieldA', 'fieldD'),
('fieldA', 'fieldE', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldD', 'fieldX'),
('fieldA', 'fieldD')),
TestModelFields),
(('fieldA', 'fieldD'),
('fieldA', 'fieldD', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldE'),
('fieldA', 'fieldE', 'fieldX')),
TestModelFields),
(('fieldA', 'fieldE'),
('fieldA', 'fieldE', 'fieldX')))
self.assertEqual(sort_unique_tuples((('fieldA', 'fieldD'),
('fieldA', 'fieldD')),
TestModelFields),
(('fieldA', 'fieldD'), ('fieldA', 'fieldD')))
class TestDFS(TestCase):
def test(self):
def func(cur_tuple, models, field):
dic = dict(cur_tuple)
keys = dic.keys()
if not 'fieldD' in keys:
return True
elif dic['fieldD'] % 3 != 1:
return False
if not ('fieldE' in keys and 'fieldH' in keys):
return True
elif dic['fieldE'] ^ dic['fieldH']:
return False
return True
dfs.size = 30
dfs.total = 0
to_be_computed = []
cur_tup = [('fieldA', 'X'), ('fieldB', 199), ('fieldC', 'general')]
unique_together = TestModelFieldsTwo._meta.unique_together
unique = list(unique_together)
unique = sort_unique_tuples(unique, TestModelFieldsTwo)
unique_constraints = [unique_items(un_tuple) for un_tuple in unique]
constraints = [func] + unique_constraints
dfs(30, cur_tup, 4, to_be_computed, constraints,
TestModelFieldsTwo, False)
self.assertEqual(len(list(TestModelFieldsTwo.objects.all())), 30)
for mdl in list(TestModelFieldsTwo.objects.all()):
self.assertEqual(mdl.fieldA, 'X')
self.assertEqual(mdl.fieldB, 199)
self.assertEqual(mdl.fieldC, 'general')
self.assertTrue(mdl.fieldD in [13, 19, 31, 43])
self.assertTrue(mdl.fieldF in [6, 28, 496, 8128, 33550336])
self.assertTrue(mdl.fieldG in ['Mathematics', 'Physics',
'Chemistry', 'Biology'])
self.assertTrue(not (mdl.fieldE ^ mdl.fieldH))
class TestGenerateModel(TestCase):
def test(self):
generate_model(TestModelX, 5)
self.assertEqual(len(TestModelX.objects.all()), 5)
generate_model(TestModelY, 95)
generated_models = list(TestModelY.objects.all())
length = len(generated_models)
self.assertEqual(len(TestModelX.objects.all()) * 18, length)
generate_model(TestModelA, 7)
self.assertEqual(len(TestModelA.objects.all()), 6)
generate_model(TestModelB, 17)
self.assertEqual(len(TestModelB.objects.all()), 12)
generate_model(TestModelC, 53)
self.assertEqual(len(TestModelC.objects.all()), 12)
for model in generated_models:
self.assertTrue(isinstance(model, TestModelY))
self.assertTrue(model.field1Y in [2, 3, 5, 7, 11, 13])
self.assertTrue(model.field2Y in ['MMa', 'XXa', 'azz'])
self.assertTrue(model.field3Y in TestModelX.objects.all())
to_be_computed_test = generate_model(TestModelFieldsTwo, 50)
self.assertTrue(to_be_computed_test)
self.assertEqual(TestModelFieldsTwo, to_be_computed_test[0])
self.assertTrue(to_be_computed_test[1])
for fld in to_be_computed_test[1]:
self.assertTrue(is_related(fld)
and 'ManyToMany' in relation_type(fld))
self.assertEqual(fld.rel.to, TestModelE)
generate_model(TestModelE, 2, shuffle=False)[0]
generated_models = list(TestModelE.objects.all())
for model in generated_models:
self.assertTrue(isinstance(model, TestModelE))
self.assertTrue(model.field4E in [1000000009, 1000003, 101])
self.assertTrue(model.field1E in TestModelB.objects.all())
self.assertTrue(all([x in TestModelA.objects.all()
for x in model.field2E.all()]))
self.assertTrue(model.field3E in TestModelC.objects.all())
class TestRecompute(TestCase):
def test(self):
c = CycleC(c='3.14159')
c.save()
d = CycleD(d=53, dc=c)
d.save()
b = CycleB(b=1000000009, bc=c)
b.save()
e = CycleE(e=17, ec=c, ed=d)
e.save()
a = CycleA(a=999, ab=b, ae=e)
a.save()
f = CycleF(f=123, fd=d)
f.save()
recompute(CycleD, list_of_fields(CycleD)[2])
recompute(CycleC, list_of_fields(CycleC)[1])
recompute(CycleC, list_of_fields(CycleC)[3])
self.assertTrue(CycleD.objects.all()[0].df)
self.assertTrue(CycleC.objects.all()[0].ca)
self.assertTrue(CycleC.objects.all()[0].cc.all())
class TestGenerateData(TestCase):
def test(self):
generate_test_data('tests.models', 10)
length = len(list_of_models(mdls))
visited = dict(zip(list_of_models(mdls), length * [False]))
pairs = []
data_base = dict([(mdl, list(mdl.objects.all()))
for mdl in list_of_models(mdls)])
generated_data = data_base.values()
nodes = 0
edges = 0
for list_model in generated_data:
for model in list_model:
visited[model.__class__] = True
fields = list_of_fields(model.__class__)
nodes += 1
for field in fields:
if (not is_auto_field(field) and
not is_reverse_related(field)):
val = getattr(model, field.name)
if is_related(field):
if 'ManyToMany' in relation_type(field):
r = data_base[field.rel.to]
self.assertTrue(list(val.all()))
self.assertTrue(all([x in r for
x in list(val.all())]))
else:
r = data_base[field.rel.to]
self.assertTrue(val in r)
edges += 1
else:
this_model = field.model
while (this_model != Model and not
(hasattr(this_model, 'TestData') and
hasattr(this_model.TestData, field.name))
and not os.path.exists(
'%s/TestTemplates/sample__%s__%s' %
(this_model._meta.app_label,
this_model.__name__, field.name))):
this_model = this_model.__base__
if this_model == Model:
self.assertEqual(model.__class__,
AllFieldsModel)
sample_values = field_sample_values(field)
if val.__class__ == unicode:
val = str(val)
self.assertTrue(val.__class__ in
map(lambda val: val.__class__,
sample_values))
elif (field.__class__.__name__ == 'DecimalField' or
field.__class__.__name__ == 'FloatField'):
sample_values = map(float,
field_sample_values(field))
val = float(val)
self.assertTrue(any(abs(val - fld_value) < 1e-5
for fld_value in
sample_values))
else:
sample_values = map(str,
field_sample_values(field))
val = str(val)
self.assertTrue(val in sample_values)
if model.__class__ == TestModelFields:
pr = (model.fieldC, model.fieldA)
self.assertFalse(pr in pairs)
pairs.append(pr)
self.assertTrue((model.fieldB < 50)
or (model.fieldD / 2 % 2 == 1))
self.assertTrue(all(visited.values()),
"Not all the models with sample data are generated.")
class TestDjenerator(TestCase):
def test(self):
fl = tempfile.TemporaryFile()
djenerator('tests', 1, fl, **{'AllFieldsModel': 20})
self.assertEqual(len(AllFieldsModel.objects.all()), 20)
fl.seek(0)
length = len(fl.read())
self.assertGreater(length, 600)
class TestFieldsGeneratorNumbers(TestCase):
def test(self):
counts = {}
for times in xrange(100):
for bits in xrange(2, 64):
for negative_allowed in xrange(0, 2):
gen_val = generate_integer(bits, negative_allowed)
self.assertIn(gen_val.__class__, [int, long])
if not negative_allowed:
self.assertGreaterEqual(gen_val, 0)
self.assertLess(gen_val, 2 ** (bits - 1))
else:
self.assertGreaterEqual(gen_val, -2 ** (bits - 1))
self.assertLess(gen_val, 2 ** (bits - 1))
gen_val = generate_int()
self.assertEqual(gen_val.__class__, int)
self.assertLessEqual(abs(gen_val), 2 ** 31)
self.assertLess(gen_val, 2 ** 31)
gen_val = generate_big_integer()
self.assertIn(gen_val.__class__, [int, long])
self.assertLessEqual(abs(gen_val), 2 ** 63)
self.assertLess(gen_val, 2 ** 63)
gen_val = generate_small_integer()
self.assertEqual(gen_val.__class__, int)
self.assertLessEqual(abs(gen_val), 2 ** 15)
self.assertLess(gen_val, 2 ** 15)
gen_val = generate_positive_integer()
self.assertIn(gen_val.__class__, [int, long])
self.assertLess(gen_val, 2 ** 31)
self.assertGreaterEqual(gen_val, 0)
gen_val = generate_positive_small_integer()
self.assertEqual(gen_val.__class__, int)
self.assertLess(gen_val, 2 ** 15)
self.assertGreaterEqual(gen_val, 0)
gen_val = generate_boolean()
self.assertEqual(gen_val.__class__, bool)
gen_val = generate_boolean(True)
self.assertTrue((gen_val is None) or (gen_val.__class__ == bool))
gen_val = generate_ip()
self.assertEqual(gen_val.__class__, str)
ip_regex = r'^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$'
match = re.search(ip_regex, gen_val)
self.assertRegexpMatches(gen_val, ip_regex)
self.assertIsNotNone(match)
match = map(int, match.groups())
self.assertTrue(all([x in range(256) for x in match]))
gen_val = generate_comma_separated_int(random.randint(1, 1000))
self.assertEqual(gen_val.__class__, str)
comma_sep_regex = r'^\d{1,3}(?:,\d{3})*$'
self.assertRegexpMatches(gen_val, comma_sep_regex)
for digits in xrange(50):
for decimal in xrange(1, digits):
gen_val = generate_decimal(digits, decimal)
self.assertEqual(gen_val.__class__, Decimal)
gen_val = str(gen_val)
if 'decimal_contains_dot' in counts.keys():
counts['decimal_contains_dot'] += 1
else:
counts['decimal_contains_dot'] = 1
self.assertLessEqual(len(gen_val), digits + 1, gen_val)
self.assertLessEqual(len(gen_val.split('.')[1]),
decimal + (decimal == 0), gen_val)
class TestFieldsGeneratorStringGenerators(TestCase):
def test(self):
for length in xrange(1, 3):
gen_sentence = generate_sentence(length)
self.assertEqual(len(gen_sentence), length)
for length in xrange(3, 50):
seperators = [['.'], ['-', '_'], ['@']]
for sep in seperators:
for _ in xrange(20):
gen_val = generate_sentence(length, seperators=sep)
self.assertEqual(gen_val.__class__, str)
self.assertLessEqual(len(gen_val), length * 2)
reg = r'^(?:\w+(?:%s))*\w+\.$' % str.join('|', sep)
self.assertRegexpMatches(gen_val, reg)
gen_text = generate_text(length)
txt_re = r'^(?:(?:\w+\s?)+\.)+(?:\s(?:\w+\s?)+\.)*$'
self.assertLessEqual(len(gen_text), length)
self.assertRegexpMatches(gen_text, txt_re, gen_text)
gen_sentence = generate_sentence(length)
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+\.$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
gen_sentence = generate_sentence(length, end_char=['', '.'])
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+\.?$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
gen_sentence = generate_sentence(length, end_char=None)
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
gen_sentence = generate_sentence(length, end_char=['.', ','])
self.assertLessEqual(len(gen_sentence), length)
sent_re = r'^(?:\w+\s?)+[\.,]$'
self.assertRegexpMatches(gen_sentence, sent_re, gen_sentence)
class TestFieldsGeneratorChar(TestCase):
def test(self):
ascii_val = dict([(chr(n), n) for n in xrange(128)])
ascii_rng = lambda beg, end: xrange(ascii_val[beg], ascii_val[end] + 1)
chr_range = lambda beg, end: map(chr, ascii_rng(beg, end))
for log in xrange(0, 6):
lengths = random.sample(range(10 ** log,
10 ** (log + 1) + 1 - bool(log)), 10)
for length in lengths:
for tup in itertools.product(*zip(6 * [True], 6 * [False])):
lower, upper, digits, special, null_allowed, exact = tup
if random.randint(1, 6) < 3:
special = ['@', '!', '~']
if not (lower or upper or digits or special):
continue
gen_val = generate_string(length, lower, upper, digits,
special, null_allowed, exact)
existing_chars = set([])
for char in gen_val:
existing_chars.add(char)
excluded = []
if not upper:
excluded.extend(chr_range('A', 'Z'))
if not lower:
excluded.extend(chr_range('a', 'z'))
if not digits:
excluded.extend(chr_range('0', '9'))
if not special:
excluded.extend(chr_range('!', '/'))
excluded.extend(chr_range(':', '@'))
excluded.extend(chr_range('[', '`'))
excluded.extend(chr_range('{', '~'))
else:
if isinstance(special, list):
special_excluded = []
special_excluded.extend(chr_range('!', '/'))
special_excluded.extend(chr_range(':', '@'))
special_excluded.extend(chr_range('[', '`'))
special_excluded.extend(chr_range('{', '~'))
special_excluded = set(special_excluded)
special_excluded = special_excluded - set(special)
excluded.extend(list(special_excluded))
self.assertFalse(existing_chars & set(excluded),
str(existing_chars) +
str(set(excluded) & existing_chars))
if exact:
self.assertEqual(len(gen_val), length)
elif not null_allowed:
self.assertGreater(len(gen_val), 0)
self.assertGreaterEqual(len(gen_val), 0)
self.assertLessEqual(len(gen_val), length)
email = generate_email(length)
self.assertTrue(isinstance(email, str), email)
self.assertLessEqual(len(email), length)
if length >= 7:
email_reg = r'^\w+(?:\.\w+)*@(?:[A-Za-z0-9]+\.)+[A-Za-z]+$'
self.assertRegexpMatches(email, email_reg)
url = generate_url(length)
self.assertTrue(isinstance(url, str), url)
self.assertLessEqual(len(url), length)
if length >= 16:
url_re = r'^(?:http|ftp|https)://(?:[a-z0-9_\-]+\.?)+/?'
url_re += r'(?:/[a-z0-9_\-]+)*/?$'
self.assertRegexpMatches(url, url_re)
class TestFieldsGeneratorDateTime(TestCase):
def test(self):
for _ in xrange(10000):
gen_val = generate_date_time()
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.datetime)
gen_val = generate_time()
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.time)
gen_val = generate_date()
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.date)
for _ in xrange(100):
gen_val = generate_date_time(True)
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.datetime)
now = datetime.datetime.now()
self.assertLess(abs((gen_val - now).total_seconds()), 10e-4)
gen_val = generate_time(True)
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.time)
now = datetime.datetime.now().time()
gen_val_hash = gen_val.second
gen_val_hash += gen_val.hour * 3600 + gen_val.minute * 60
now_hash = now.hour * 3600 + now.minute * 60 + now.second
self.assertLessEqual(gen_val_hash, now_hash + 1)
gen_val = generate_date(True)
self.assertTrue(gen_val)
self.assertEqual(gen_val.__class__, datetime.date)
now = datetime.datetime.now().date()
self.assertEqual(gen_val, now)
class TestFileGenerators(TestCase):
def test(self):
for _ in xrange(20):
ext = random.choice(['txt', 'rst', 'md'])
name = generate_file_name(12, ext)
self.assertTrue(isinstance(name, str), name)
self.assertLessEqual(len(name), 12, name)
self.assertRegexpMatches(name, r'[a-zA-Z_]*\.' + ext, name)
path = generate_file_path()
self.assertTrue(os.path.exists(path), path)
img = generate_png(500, 120)
beg = b'\x89\x50\x4e\x47\x0d\x0a\x1a\x0a'
self.assertTrue(img.startswith(beg))
self.assertTrue(b'IHDR' in img)
self.assertTrue(b'IEND' in img)
self.assertTrue(b'IDAT' in img)
| |
import os
import re
import json
from collections import Iterable
from charmhelpers.core import host
from charmhelpers.core import hookenv
__all__ = ['ServiceManager', 'ManagerCallback',
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
'service_restart', 'service_stop']
class ServiceManager(object):
def __init__(self, services=None):
"""
Register a list of services, given their definitions.
Traditional charm authoring is focused on implementing hooks. That is,
the charm author is thinking in terms of "What hook am I handling; what
does this hook need to do?" However, in most cases, the real question
should be "Do I have the information I need to configure and start this
piece of software and, if so, what are the steps for doing so?" The
ServiceManager framework tries to bring the focus to the data and the
setup tasks, in the most declarative way possible.
Service definitions are dicts in the following formats (all keys except
'service' are optional)::
{
"service": <service name>,
"required_data": <list of required data contexts>,
"data_ready": <one or more callbacks>,
"data_lost": <one or more callbacks>,
"start": <one or more callbacks>,
"stop": <one or more callbacks>,
"ports": <list of ports to manage>,
}
The 'required_data' list should contain dicts of required data (or
dependency managers that act like dicts and know how to collect the data).
Only when all items in the 'required_data' list are populated are the list
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
information.
The 'data_ready' value should be either a single callback, or a list of
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
Each callback will be called with the service name as the only parameter.
After all of the 'data_ready' callbacks are called, the 'start' callbacks
are fired.
The 'data_lost' value should be either a single callback, or a list of
callbacks, to be called when a 'required_data' item no longer passes
`is_ready()`. Each callback will be called with the service name as the
only parameter. After all of the 'data_lost' callbacks are called,
the 'stop' callbacks are fired.
The 'start' value should be either a single callback, or a list of
callbacks, to be called when starting the service, after the 'data_ready'
callbacks are complete. Each callback will be called with the service
name as the only parameter. This defaults to
`[host.service_start, services.open_ports]`.
The 'stop' value should be either a single callback, or a list of
callbacks, to be called when stopping the service. If the service is
being stopped because it no longer has all of its 'required_data', this
will be called after all of the 'data_lost' callbacks are complete.
Each callback will be called with the service name as the only parameter.
This defaults to `[services.close_ports, host.service_stop]`.
The 'ports' value should be a list of ports to manage. The default
'start' handler will open the ports after the service is started,
and the default 'stop' handler will close the ports prior to stopping
the service.
Examples:
The following registers an Upstart service called bingod that depends on
a mongodb relation and which runs a custom `db_migrate` function prior to
restarting the service, and a Runit service called spadesd::
manager = services.ServiceManager([
{
'service': 'bingod',
'ports': [80, 443],
'required_data': [MongoRelation(), config(), {'my': 'data'}],
'data_ready': [
services.template(source='bingod.conf'),
services.template(source='bingod.ini',
target='/etc/bingod.ini',
owner='bingo', perms=0400),
],
},
{
'service': 'spadesd',
'data_ready': services.template(source='spadesd_run.j2',
target='/etc/sv/spadesd/run',
perms=0555),
'start': runit_start,
'stop': runit_stop,
},
])
manager.manage()
"""
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
self._ready = None
self.services = {}
for service in services or []:
service_name = service['service']
self.services[service_name] = service
def manage(self):
"""
Handle the current hook by doing The Right Thing with the registered services.
"""
hook_name = hookenv.hook_name()
try:
if hook_name == 'stop':
self.stop_services()
else:
self.update_status_working()
self.provide_data()
self.reconfigure_services()
self.update_status_done()
except Exception as e:
hookenv.juju_status('error', message=str(e))
raise
def provide_data(self):
hook_name = hookenv.hook_name()
for service in self.services.values():
for provider in service.get('provided_data', []):
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
data = provider.provide_data()
if provider._is_ready(data):
hookenv.relation_set(None, data)
def reconfigure_services(self, *service_names):
"""
Update all files for one or more registered services, and,
if ready, optionally restart them.
If no service names are given, reconfigures all registered services.
"""
for service_name in service_names or self.services.keys():
if self.is_ready(service_name):
self.fire_event('data_ready', service_name)
self.fire_event('start', service_name, default=[
service_restart,
manage_ports])
self.save_ready(service_name)
else:
if self.was_ready(service_name):
self.fire_event('data_lost', service_name)
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
self.save_lost(service_name)
def stop_services(self, *service_names):
"""
Stop one or more registered services, by name.
If no service names are given, stops all registered services.
"""
for service_name in service_names or self.services.keys():
self.fire_event('stop', service_name, default=[
manage_ports,
service_stop])
def get_service(self, service_name):
"""
Given the name of a registered service, return its service definition.
"""
service = self.services.get(service_name)
if not service:
raise KeyError('Service not registered: %s' % service_name)
return service
def fire_event(self, event_name, service_name, default=None):
"""
Fire a data_ready, data_lost, start, or stop event on a given service.
"""
service = self.get_service(service_name)
callbacks = service.get(event_name, default)
if not callbacks:
return
if not isinstance(callbacks, Iterable):
callbacks = [callbacks]
for callback in callbacks:
if isinstance(callback, ManagerCallback):
callback(self, service_name, event_name)
else:
callback(service_name)
def is_ready(self, service_name):
"""
Determine if a registered service is ready, by checking its 'required_data'.
A 'required_data' item can be any mapping type, and is considered ready
if `bool(item)` evaluates as True.
"""
service = self.get_service(service_name)
reqs = service.get('required_data', [])
return all(bool(req) for req in reqs)
def _load_ready_file(self):
if self._ready is not None:
return
if os.path.exists(self._ready_file):
with open(self._ready_file) as fp:
self._ready = set(json.load(fp))
else:
self._ready = set()
def _save_ready_file(self):
if self._ready is None:
return
with open(self._ready_file, 'w') as fp:
json.dump(list(self._ready), fp)
def save_ready(self, service_name):
"""
Save an indicator that the given service is now data_ready.
"""
self._load_ready_file()
self._ready.add(service_name)
self._save_ready_file()
def save_lost(self, service_name):
"""
Save an indicator that the given service is no longer data_ready.
"""
self._load_ready_file()
self._ready.discard(service_name)
self._save_ready_file()
def was_ready(self, service_name):
"""
Determine if the given service was previously data_ready.
"""
self._load_ready_file()
return service_name in self._ready
def update_status_working(self):
if hookenv.juju_status()['status'] == 'blocked':
hookenv.juju_status('churning')
def update_status_done(self):
status = 'up'
manual = False
blockers = []
for service in self.services.values():
for req in service.get('required_data', []):
if not bool(req):
status = 'blocked'
if getattr(req, 'manual', False):
manual = True
if hasattr(req, 'name'):
blockers.append(req.name)
hookenv.juju_status(status, manual=manual, blockers=blockers)
class ManagerCallback(object):
"""
Special case of a callback that takes the `ServiceManager` instance
in addition to the service name.
Subclasses should implement `__call__` which should accept three parameters:
* `manager` The `ServiceManager` instance
* `service_name` The name of the service it's being triggered for
* `event_name` The name of the event that this callback is handling
"""
def __call__(self, manager, service_name, event_name):
raise NotImplementedError()
class PortManagerCallback(ManagerCallback):
"""
Callback class that will open or close ports, for use as either
a start or stop action.
"""
def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name)
new_ports = service.get('ports', [])
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file):
with open(port_file) as fp:
old_ports = fp.read().split(',')
for old_port in old_ports:
if bool(old_port):
old_port = int(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports))
for port in new_ports:
if event_name == 'start':
hookenv.open_port(port)
elif event_name == 'stop':
hookenv.close_port(port)
def service_stop(service_name):
"""
Wrapper around host.service_stop to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_running(service_name):
host.service_stop(service_name)
def service_restart(service_name):
"""
Wrapper around host.service_restart to prevent spurious "unknown service"
messages in the logs.
"""
if host.service_available(service_name):
if host.service_running(service_name):
host.service_restart(service_name)
else:
host.service_start(service_name)
# Convenience aliases
open_ports = close_ports = manage_ports = PortManagerCallback()
| |
# coding: utf-8
"""
"""
import ctypes
import time
import sys
import argparse
import re
parser = argparse.ArgumentParser()
parser.add_argument("--file", "-f", type=str, required=True)
parser.add_argument("--title", "-t", type=str, required=True)
LONG = ctypes.c_long
DWORD = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(DWORD)
WORD = ctypes.c_ushort
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
VK_LBUTTON = 0x01 # Left mouse button
VK_RBUTTON = 0x02 # Right mouse button
VK_CANCEL = 0x03 # Control-break processing
VK_MBUTTON = 0x04 # Middle mouse button (three-button mouse)
VK_XBUTTON1 = 0x05 # X1 mouse button
VK_XBUTTON2 = 0x06 # X2 mouse button
VK_BACK = 0x08 # BACKSPACE key
VK_TAB = 0x09 # TAB key
VK_CLEAR = 0x0C # CLEAR key
VK_RETURN = 0x0D # ENTER key
VK_SHIFT = 0x10 # SHIFT key
VK_CONTROL = 0x11 # CTRL key
VK_MENU = 0x12 # ALT key
VK_PAUSE = 0x13 # PAUSE key
VK_CAPITAL = 0x14 # CAPS LOCK key
VK_KANA = 0x15 # IME Kana mode
VK_HANGUL = 0x15 # IME Hangul mode
VK_JUNJA = 0x17 # IME Junja mode
VK_FINAL = 0x18 # IME final mode
VK_HANJA = 0x19 # IME Hanja mode
VK_KANJI = 0x19 # IME Kanji mode
VK_ESCAPE = 0x1B # ESC key
VK_CONVERT = 0x1C # IME convert
VK_NONCONVERT = 0x1D # IME nonconvert
VK_ACCEPT = 0x1E # IME accept
VK_MODECHANGE = 0x1F # IME mode change request
VK_SPACE = 0x20 # SPACEBAR
VK_PRIOR = 0x21 # PAGE UP key
VK_NEXT = 0x22 # PAGE DOWN key
VK_END = 0x23 # END key
VK_HOME = 0x24 # HOME key
VK_LEFT = 0x25 # LEFT ARROW key
VK_UP = 0x26 # UP ARROW key
VK_RIGHT = 0x27 # RIGHT ARROW key
VK_DOWN = 0x28 # DOWN ARROW key
VK_SELECT = 0x29 # SELECT key
VK_PRINT = 0x2A # PRINT key
VK_EXECUTE = 0x2B # EXECUTE key
VK_SNAPSHOT = 0x2C # PRINT SCREEN key
VK_INSERT = 0x2D # INS key
VK_DELETE = 0x2E # DEL key
VK_HELP = 0x2F # HELP key
VK_LWIN = 0x5B # Left Windows key (Natural keyboard)
VK_RWIN = 0x5C # Right Windows key (Natural keyboard)
VK_APPS = 0x5D # Applications key (Natural keyboard)
VK_SLEEP = 0x5F # Computer Sleep key
VK_NUMPAD0 = 0x60 # Numeric keypad 0 key
VK_NUMPAD1 = 0x61 # Numeric keypad 1 key
VK_NUMPAD2 = 0x62 # Numeric keypad 2 key
VK_NUMPAD3 = 0x63 # Numeric keypad 3 key
VK_NUMPAD4 = 0x64 # Numeric keypad 4 key
VK_NUMPAD5 = 0x65 # Numeric keypad 5 key
VK_NUMPAD6 = 0x66 # Numeric keypad 6 key
VK_NUMPAD7 = 0x67 # Numeric keypad 7 key
VK_NUMPAD8 = 0x68 # Numeric keypad 8 key
VK_NUMPAD9 = 0x69 # Numeric keypad 9 key
VK_MULTIPLY = 0x6A # Multiply key
VK_ADD = 0x6B # Add key
VK_SEPARATOR = 0x6C # Separator key
VK_SUBTRACT = 0x6D # Subtract key
VK_DECIMAL = 0x6E # Decimal key
VK_DIVIDE = 0x6F # Divide key
VK_F1 = 0x70 # F1 key
VK_F2 = 0x71 # F2 key
VK_F3 = 0x72 # F3 key
VK_F4 = 0x73 # F4 key
VK_F5 = 0x74 # F5 key
VK_F6 = 0x75 # F6 key
VK_F7 = 0x76 # F7 key
VK_F8 = 0x77 # F8 key
VK_F9 = 0x78 # F9 key
VK_F10 = 0x79 # F10 key
VK_F11 = 0x7A # F11 key
VK_F12 = 0x7B # F12 key
VK_F13 = 0x7C # F13 key
VK_F14 = 0x7D # F14 key
VK_F15 = 0x7E # F15 key
VK_F16 = 0x7F # F16 key
VK_F17 = 0x80 # F17 key
VK_F18 = 0x81 # F18 key
VK_F19 = 0x82 # F19 key
VK_F20 = 0x83 # F20 key
VK_F21 = 0x84 # F21 key
VK_F22 = 0x85 # F22 key
VK_F23 = 0x86 # F23 key
VK_F24 = 0x87 # F24 key
VK_NUMLOCK = 0x90 # NUM LOCK key
VK_SCROLL = 0x91 # SCROLL LOCK key
VK_LSHIFT = 0xA0 # Left SHIFT key
VK_RSHIFT = 0xA1 # Right SHIFT key
VK_LCONTROL = 0xA2 # Left CONTROL key
VK_RCONTROL = 0xA3 # Right CONTROL key
VK_LMENU = 0xA4 # Left MENU key
VK_RMENU = 0xA5 # Right MENU key
VK_BROWSER_BACK = 0xA6 # Browser Back key
VK_BROWSER_FORWARD = 0xA7 # Browser Forward key
VK_BROWSER_REFRESH = 0xA8 # Browser Refresh key
VK_BROWSER_STOP = 0xA9 # Browser Stop key
VK_BROWSER_SEARCH = 0xAA # Browser Search key
VK_BROWSER_FAVORITES = 0xAB # Browser Favorites key
VK_BROWSER_HOME = 0xAC # Browser Start and Home key
VK_VOLUME_MUTE = 0xAD # Volume Mute key
VK_VOLUME_DOWN = 0xAE # Volume Down key
VK_VOLUME_UP = 0xAF # Volume Up key
VK_MEDIA_NEXT_TRACK = 0xB0 # Next Track key
VK_MEDIA_PREV_TRACK = 0xB1 # Previous Track key
VK_MEDIA_STOP = 0xB2 # Stop Media key
VK_MEDIA_PLAY_PAUSE = 0xB3 # Play/Pause Media key
VK_LAUNCH_MAIL = 0xB4 # Start Mail key
VK_LAUNCH_MEDIA_SELECT = 0xB5 # Select Media key
VK_LAUNCH_APP1 = 0xB6 # Start Application 1 key
VK_LAUNCH_APP2 = 0xB7 # Start Application 2 key
VK_OEM_1 = 0xBA # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the ';:' key
VK_OEM_PLUS = 0xBB # For any country/region, the '+' key
VK_OEM_COMMA = 0xBC # For any country/region, the ',' key
VK_OEM_MINUS = 0xBD # For any country/region, the '-' key
VK_OEM_PERIOD = 0xBE # For any country/region, the '.' key
VK_OEM_2 = 0xBF # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '/?' key
VK_OEM_3 = 0xC0 # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '`~' key
VK_OEM_4 = 0xDB # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '[{' key
VK_OEM_5 = 0xDC # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the '\|' key
VK_OEM_6 = 0xDD # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the ']}' key
VK_OEM_7 = 0xDE # Used for miscellaneous characters; it can vary by keyboard.
# For the US standard keyboard, the 'single-quote/double-quote' key
VK_OEM_8 = 0xDF # Used for miscellaneous characters; it can vary by keyboard.
VK_OEM_102 = 0xE2 # Either the angle bracket key or the backslash key on the RT 102-key keyboard
VK_PROCESSKEY = 0xE5 # IME PROCESS key
VK_PACKET = 0xE7 # Used to pass Unicode characters as if they were keystrokes. The VK_PACKET key is the low word of a 32-bit Virtual Key value used for non-keyboard input methods. For more information, see Remark in KEYBDINPUT, SendInput, WM_KEYDOWN, and WM_KEYUP
VK_ATTN = 0xF6 # Attn key
VK_CRSEL = 0xF7 # CrSel key
VK_EXSEL = 0xF8 # ExSel key
VK_EREOF = 0xF9 # Erase EOF key
VK_PLAY = 0xFA # Play key
VK_ZOOM = 0xFB # Zoom key
VK_PA1 = 0xFD # PA1 key
VK_OEM_CLEAR = 0xFE # Clear key
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_SCANCODE = 0x0008
KEYEVENTF_UNICODE = 0x0004
KEY_0 = 0x30
KEY_1 = 0x31
KEY_2 = 0x32
KEY_3 = 0x33
KEY_4 = 0x34
KEY_5 = 0x35
KEY_6 = 0x36
KEY_7 = 0x37
KEY_8 = 0x38
KEY_9 = 0x39
KEY_A = 0x41
KEY_B = 0x42
KEY_C = 0x43
KEY_D = 0x44
KEY_E = 0x45
KEY_F = 0x46
KEY_G = 0x47
KEY_H = 0x48
KEY_I = 0x49
KEY_J = 0x4A
KEY_K = 0x4B
KEY_L = 0x4C
KEY_M = 0x4D
KEY_N = 0x4E
KEY_O = 0x4F
KEY_P = 0x50
KEY_Q = 0x51
KEY_R = 0x52
KEY_S = 0x53
KEY_T = 0x54
KEY_U = 0x55
KEY_V = 0x56
KEY_W = 0x57
KEY_X = 0x58
KEY_Y = 0x59
KEY_Z = 0x5A
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
def send_input(*inputs):
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)
def input_structure(structure):
if isinstance(structure, MOUSEINPUT):
return INPUT(INPUT_MOUSE, _INPUTunion(mi=structure))
if isinstance(structure, KEYBDINPUT):
return INPUT(INPUT_KEYBOARD, _INPUTunion(ki=structure))
if isinstance(structure, HARDWAREINPUT):
return INPUT(INPUT_HARDWARE, _INPUTunion(hi=structure))
raise TypeError('Cannot create INPUT structure!')
def keyboard_input(code, flags):
return KEYBDINPUT(0, code, flags, 0, None)
def mouse_input(flags, x, y, data):
return MOUSEINPUT(x, y, data, flags, 0, None)
def hardware_input(message, parameter):
return HARDWAREINPUT(message & 0xFFFFFFFF,
parameter & 0xFFFF,
parameter >> 16 & 0xFFFF)
def Mouse(flags, x=0, y=0, data=0):
return input_structure(mouse_input(flags, x, y, data))
def Keyboard(code, flags=0):
return input_structure(keyboard_input(code, flags))
def Hardware(message, parameter=0):
return input_structure(hardware_input(message, parameter))
class WindowMgr:
"""Encapsulates some calls to the winapi for window management"""
BLACK_BRUSH = 4
CS_HREDRAW = 2
CS_VREDRAW = 1
PM_NOREMOVE = 0
SM_CXSCREEN = 0
SM_CYSCREEN = 1
SW_SHOW = 5
SW_SHOWNORMAL = 1
SWP_NOMOVE = 2
SWP_NOSIZE = 1
WM_CLOSE = 16
WM_DESTROY = 2
WM_KEYDOWN = 256
WM_KEYUP = 257
WM_CHAR = 0x0102
WM_LBUTTONDOWN = 513
WM_MBUTTONDOWN = 519
WM_MOUSEMOVE = 512
WM_PAINT = 15
WM_QUIT = 18
WM_RBUTTONDOWN = 516
WS_EX_TOPMOST = 8
WS_POPUP = 0x80000000
WS_VISIBLE = 0x10000000
def __init__ (self):
"""Constructor"""
self._handle = None
def ZeroChk (self, result):
if result == 0:
print(result)
raise ctypes.WinError(ctypes.windll.kernel32.GetLastError())
return result
def find_window(self, class_name, window_name = None):
"""find a window by its class_name"""
self._handle = ctypes.windll.user32.FindWindow(class_name, window_name)
def _window_enum_callback(self, hwnd, wildcard):
wildcard = ctypes.cast(wildcard, ctypes.c_wchar_p).value.encode('utf-8', 'replace')
length = ctypes.windll.user32.GetWindowTextLengthW(hwnd) + 1
buff = ctypes.create_unicode_buffer(length)
ctypes.windll.user32.GetWindowTextW(hwnd, buff, length)
wintitle = ctypes.cast(buff, ctypes.c_wchar_p).value.encode('utf-8', 'replace')
#print("len =", length, repr(buff.value))
#if re.search(wildcard, wintitle) is not None:
if wintitle.endswith(wildcard):
#print(repr(buff.value))
self._handle = hwnd
return True
else:
return False
def find_window_wildcard(self, wildcard):
self._handle = None
enum_windows_proc = \
ctypes.WINFUNCTYPE(ctypes.c_bool, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
self.ZeroChk(ctypes.windll.user32.EnumWindows(enum_windows_proc(self._window_enum_callback), wildcard))
def set_foreground(self):
"""put the window in the foreground"""
ctypes.windll.user32.ShowWindow(self._handle, self.SW_SHOWNORMAL);
ctypes.windll.user32.BringWindowToTop(self._handle);
self.ZeroChk(ctypes.windll.user32.SetForegroundWindow(self._handle))
def send_keypress(self, vkCode, flags=0):
hwnd = self._handle
#keyState = ctypes.windll.user32.GetAsyncKeyState(vkCode)
if vkCode==0x101:
lParam = 0x31
else:
lParam = 0
#hwnd = ctypes.windll.user32.GetForegroundWindow()
#print("hwnd:", hwnd, "self:", ctypes.cast(self._handle, ctypes.c_void_p).value)
#print("key state: ",keyState)
self.ZeroChk(ctypes.windll.user32.PostMessageW(hwnd, flags, vkCode, lParam))
#self.ZeroChk(ctypes.windll.user32.PostMessageW(hwnd, self.WM_CHAR, code, ctypes.windll.user32.MapVirtualKeyW(code, 1)))
#time.sleep(0.1)
#self.ZeroChk(ctypes.windll.user32.PostMessageW(hwnd, self.WM_KEYUP, code, 65539) - 1)
def main():
args = parser.parse_args()
w = WindowMgr()
w.find_window_wildcard(args.title) #"Deus Ex: Revision"
w.set_foreground()
delay = 10
try:
file = open(args.file, 'r')
for line in file.readlines():
if line.startswith(("#",",",":","/n")): continue
evt,flags,data,hold = line.split(",")
sdata = data.split(":")
if int(evt, 16)==INPUT_MOUSE:
for x,y,code in sdata:
print(x,y,code)
time.sleep(int(delay)/1000)
elif int(evt, 16)==INPUT_KEYBOARD:
for code in sdata:
print(code, flags)
#send_input(Keyboard(int(code,16),int(flags,16)-0x100))
w.send_keypress(int(code,16), int(flags,16))
time.sleep(int(delay)/1000)
time.sleep(int(hold)/1000)
finally:
file.close()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
import asyncio
import datetime
from irc3.plugins.command import command
from irc3.utils import IrcString
from irc3 import event
import irc3
MOTION_RESULT_LIST = 'Ayes: {ayes}; Nays: {nays}; Abstains: {abstains}'
MOTION_RESULT_COUNT = 'Ayes: {ayes}; Nays: {nays}; Abstains: {abstains}; TOTAL: {total}'
MOTION_EXTERNAL_VOTES = '[+] External ayes: {ayes}; External nays: {nays}'
MOTION_LAPSES_QUORUM = '*** Result: Motion lapses. Quorum of {quorum} not met.'
MOTION_LAPSES_PC = '*** Result: Motion lapses. {in_favour:.2f}% in favour.'
MOTION_CARRIES = '*** Result: Motion carries. {in_favour:.2f}% in favour.'
@irc3.plugin
class Motions(object):
def __init__(self, bot):
bot.include('casemapping')
bot.include('mappinguserlist')
bot.include('irc3.plugins.async')
bot.include('irc3.plugins.core')
self.bot = bot
self.name = bot.config.get('name', 'motionbot')
self.states = {}
self.db = None
# setup database if we're using one
db_uri = self.bot.config.get('database', None)
if db_uri:
from pymongo import MongoClient
self.db_client = MongoClient(db_uri)
self.db = self.db_client.motionbot
print('Using database')
else:
print('Not using database')
# channel permissions
def is_voice(self, mask, target):
target = self.bot.casefold(target)
if not isinstance(mask, IrcString):
mask = IrcString(mask)
return mask.nick in self.bot.channels[target].modes['+']
def is_admin(self, mask, target):
target = self.bot.casefold(target)
if not isinstance(mask, IrcString):
mask = IrcString(mask)
# we consider halfop/op and above admins
prefixes = self.bot.config['server_config'].get('PREFIX', '(ov)@+')
prefixes = prefixes.split(')')[1]
admin_prefix_index = prefixes.index('%') if '%' in prefixes else prefixes.index('@')
admin_prefixes = prefixes[:admin_prefix_index + 1]
for prefix in admin_prefixes:
if mask.nick in self.bot.channels[target].modes[prefix]:
return True
return False
# channel info init
@event(irc3.rfc.JOIN)
def join_chan(self, mask=None, channel=None):
"""Upon joining a channel, keep track of the channel state."""
channel = self.bot.casefold(channel)
if mask.nick == self.bot.nick:
self.states[channel] = {
'recognised': [],
'meeting': {
'name': '',
'started': False,
'quorum': 0,
},
'motion': {
'text': '',
'put_by': '',
'started': False,
'votes': {},
},
}
else:
userhost = mask.split('!', 1)[1]
if userhost in self.states[channel]['recognised']:
nick = self.bot.casefold(mask.nick)
self.bot.mode(channel, '+v {}'.format(nick))
if self.db:
doc = self.db.recognised.find_one({
'bot': self.name,
'channel': channel,
})
if doc:
self.states[channel]['recognised'] = doc['users']
else:
self.db.recognised.insert_one({
'bot': self.name,
'channel': channel,
'users': [],
})
# op commands
@command()
@asyncio.coroutine
def add(self, mask, target, args):
"""Recognise a user.
%%add <nick>
"""
# we only care about ops and commands to channels
if not (target.is_channel and self.is_admin(mask, target)):
return
channel = self.bot.casefold(target)
# voice user
nick = args['<nick>'].lower()
if not (self.is_admin(nick, channel) or self.is_voice(nick, channel)):
if self.is_admin(self.bot.nick, channel):
self.bot.mode(channel, '+v {}'.format(nick))
else:
self.bot.notice(channel, '*** I am not opped and cannot voice user.')
# add user to our recognised list
info = yield from self.bot.async.whois(nick=nick)
if not info['success']:
self.bot.notice(channel, '*** Could not add user to recognised list.')
return
userhost = '{username}@{host}'.format(**info)
if userhost not in self.states[channel]['recognised']:
self.states[channel]['recognised'].append(userhost)
if self.db:
self.db.recognised.update_one({
'bot': self.name,
'channel': channel,
}, {
'$push': {
'users': userhost,
}
}
)
@command()
def quorum(self, mask, target, args):
"""Set or see the quorum for the current meeting.
%%quorum [<number>]
"""
# we only care about ops and commands to channels
if not target.is_channel:
return
number = args['<number>']
target = self.bot.casefold(target)
if number:
if self.is_admin(mask, target):
try:
number = int(number)
except ValueError:
self.bot.notice(target, '*** Quorum must be an integer')
return
self.states[target]['meeting']['quorum'] = number
self.bot.notice(target, '*** Quorum now set to: {}'.format(number))
else:
current_number = self.states[target]['meeting']['quorum']
self.bot.notice(target, '*** Quorum is: {}'.format(current_number))
@command()
def meeting(self, mask, target, args):
"""Set or see the name for the current meeting.
%%meeting [<name>...]
"""
# we only care about ops and commands to channels
if not target.is_channel:
return
name = ' '.join(args['<name>'])
target = self.bot.casefold(target)
if name:
if self.is_admin(mask, target):
self.states[target]['meeting']['name'] = name
self.bot.notice(target, '*** Meeting: ' + name)
else:
current_name = self.states[target]['meeting']['name']
self.bot.notice(target, '*** Current meeting: ' + current_name)
@command()
def motion(self, mask, target, args):
"""Set or see the text for the current motion.
%%motion [<text>...]
"""
# we only care about ops and commands to channels
if not target.is_channel:
return
text = ' '.join(args['<text>'])
target = self.bot.casefold(target)
if text:
if self.is_admin(mask, target):
self.states[target]['motion']['text'] = text
self.states[target]['motion']['put_by'] = mask.nick
self.bot.notice(target, '*** Motion: ' + text)
else:
current_text = self.states[target]['motion']['text']
self.bot.notice(target, '*** Current motion: ' + current_text)
@command()
def ayes(self, mask, target, args):
"""Set external ayes for the current motion.
%%ayes <votes>
"""
# we only care about ops and commands to channels
if not (target.is_channel and self.is_admin(mask, target)):
return
target = self.bot.casefold(target)
if not self.states[target]['meeting']['started']:
self.bot.notice(target, '*** No meeting started.')
return
if not self.states[target]['motion']['started']:
self.bot.notice(target, '*** No motion started.')
return
self.states[target]['motion']['extra_ayes'] = int(args['<votes>'])
self.bot.notice(target, '*** Extra ayes: ' + args['<votes>'])
@command()
def nays(self, mask, target, args):
"""Set external nays for the current motion.
%%nays <votes>
"""
# we only care about ops and commands to channels
if not (target.is_channel and self.is_admin(mask, target)):
return
target = self.bot.casefold(target)
if not self.states[target]['meeting']['started']:
self.bot.notice(target, '*** No meeting started.')
return
if not self.states[target]['motion']['started']:
self.bot.notice(target, '*** No motion started.')
return
self.states[target]['motion']['extra_nays'] = int(args['<votes>'])
self.bot.notice(target, '*** Extra nays: ' + args['<votes>'])
@command()
def start(self, mask, target, args):
"""Start a meeting or motion.
%%start [meeting|motion]
"""
# we only care about ops and commands to channels
if not (target.is_channel and self.is_admin(mask, target)):
return
target = self.bot.casefold(target)
if args['meeting']:
self.states[target]['meeting']['started'] = True
self.bot.notice(target, '*** Meeting started.')
elif args['motion']:
if not self.states[target]['meeting']['started']:
self.bot.notice(target, '*** No meeting started.')
return
self.states[target]['motion']['started'] = True
self.bot.notice(target, '*** MOTION: ' + self.states[target]['motion']['text'])
self.bot.notice(target, '*** Put by: ' + self.states[target]['motion']['put_by'])
self.bot.notice(target, '*** Please now respond either "aye", "nay" or "abstain" '
'to record a vote.')
@command()
def cancel(self, mask, target, args):
"""Cancel a motion.
%%cancel motion
"""
# we only care about ops and commands to channels
if not (target.is_channel and self.is_admin(mask, target)):
return
channel = self.bot.casefold(target)
if args['motion']:
self.states[channel]['motion'] = {
'text': '',
'put_by': '',
'started': False,
'votes': {},
}
self.bot.notice(channel, '*** Motion cancelled.')
@command()
def stop(self, mask, target, args):
"""Stop a meeting or motion.
%%stop [meeting|motion]
"""
# we only care about ops and commands to channels
if not (target.is_channel and self.is_admin(mask, target)):
return
channel = self.bot.casefold(target)
if args['meeting']:
if not self.states[channel]['meeting']['started']:
self.bot.notice(channel, '*** No meeting started.')
return
# XXX - save the meeting data
self.states[channel]['meeting'] = {
'name': '',
'started': False,
'quorum': 0,
}
self.states[channel]['motion'] = {
'text': '',
'put_by': '',
'started': False,
'votes': {},
}
self.bot.notice(channel, '*** Meeting ended.')
elif args['motion']:
if not self.states[channel]['motion']['started']:
self.bot.notice(channel, '*** There is no motion to stop.')
return
# construct aye/nay list
ayes = []
nays = []
abstains = []
for nick, vote in self.states[channel]['motion']['votes'].items():
# user left channel
if nick not in self.bot.channels[channel]:
continue
if vote is True:
ayes.append(nick)
elif vote is False:
nays.append(nick)
else:
abstains.append(nick)
# count it up
extra_ayes = self.states[channel]['motion'].get('extra_ayes', 0)
extra_nays = self.states[channel]['motion'].get('extra_nays', 0)
aye_count = len(ayes) + extra_ayes
nay_count = len(nays) + extra_nays
abstain_count = len(abstains)
self.bot.notice(channel, '*** Votes')
self.bot.notice(channel, MOTION_RESULT_LIST.format(**{
'ayes': ', '.join(ayes) if ayes else 'none',
'nays': ', '.join(nays) if nays else 'none',
'abstains': ', '.join(abstains) if abstains else 'none',
}))
if extra_ayes or extra_nays:
self.bot.notice(channel, MOTION_EXTERNAL_VOTES.format(**{
'ayes': extra_ayes,
'nays': extra_nays,
}))
total = aye_count + nay_count + abstain_count
quorum = self.states[channel]['meeting']['quorum']
self.bot.notice(channel, '*** Tally')
self.bot.notice(channel, MOTION_RESULT_COUNT.format(**{
'ayes': aye_count,
'nays': nay_count,
'abstains': abstain_count,
'total': total,
}))
pc_in_favour = (aye_count / (aye_count + nay_count) * 100)
if total < quorum:
self.bot.notice(channel, MOTION_LAPSES_QUORUM.format(quorum=quorum))
elif (aye_count - nay_count) > 0:
self.bot.notice(channel, MOTION_CARRIES.format(in_favour=pc_in_favour))
else:
self.bot.notice(channel, MOTION_LAPSES_PC.format(in_favour=pc_in_favour))
self.states[channel]['motion'] = {
'text': '',
'put_by': '',
'started': False,
'votes': {},
}
# everyone commands
@irc3.event(irc3.rfc.PRIVMSG)
def aye_nay_abstain(self, mask, event, target, data):
"""Accept aye/nay/abstain in regards to a motion."""
# we only care about messages to channels
if not target.is_channel:
return
target = self.bot.casefold(target)
nick = self.bot.casefold(mask.nick)
cmd = data.casefold().split()[0]
if not self.states[target]['motion']['started'] or cmd not in ('aye', 'nay', 'abstain'):
return
if not (self.is_voice(mask, target) or self.is_admin(mask, target)):
self.bot.privmsg(nick, 'You are not recognised; your vote has not been '
'counted. If this a mistake, inform the operators.')
return
if cmd == 'aye':
self.states[target]['motion']['votes'][nick] = True
elif cmd == 'nay':
self.states[target]['motion']['votes'][nick] = False
elif cmd == 'abstain':
self.states[target]['motion']['votes'][nick] = None
@irc3.event(irc3.rfc.NEW_NICK)
def track_nick(self, nick, new_nick):
"""Track nick changes in regard to all motions."""
old_nick = self.bot.casefold(nick.nick)
new_nick = self.bot.casefold(new_nick)
for channel in self.states:
if old_nick in self.states[channel]['motion']['votes']:
self.states[channel]['motion']['votes'][new_nick] = self.states[channel]['motion']['votes'][old_nick]
del self.states[channel]['motion']['votes'][old_nick]
# This is just to help me debug, it prints everything, every event
@event(r'(?P<message>.*)')
def debug(self, message=None):
print(datetime.datetime.now().strftime("[%H:%M:%S]"), message)
print(' ', self.states)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
from abc import abstractmethod
from collections import defaultdict, namedtuple
import copy
import fnmatch
import os
import sys
from twitter.common.collections import OrderedSet
from twitter.common.dirutil import safe_delete, safe_rmtree
from pants import binary_util
from pants.backend.jvm.targets.java_tests import JavaTests as junit_tests
from pants.backend.jvm.tasks.jvm_task import JvmTask
from pants.backend.jvm.tasks.jvm_tool_task_mixin import JvmToolTaskMixin
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.java.util import execute_java
from pants.util.contextutil import temporary_file
from pants.util.dirutil import safe_mkdir, safe_open
_CWD_NOT_PRESENT='CWD NOT PRESENT'
# TODO(ji): Add unit tests.
# TODO(ji): Add coverage in ci.run (https://github.com/pantsbuild/pants/issues/83)
# The helper classes (_JUnitRunner and its subclasses) need to use
# methods inherited by JUnitRun from Task. Rather than pass a reference
# to the entire Task instance, we isolate the methods that are used
# in a named tuple and pass that one around.
# TODO(benjy): Why? This seems unnecessarily clunky. The runners only exist because we can't
# (yet?) pick a Task type based on cmd-line flags. But they act "as-if" they were Task types,
# so it seems prefectly reasonable for them to have a reference to the task.
# This trick just makes debugging harder, and requires extra work when a runner implementation
# needs some new thing from the task.
_TaskExports = namedtuple('_TaskExports',
['classpath',
'task_options',
'jvm_options',
'args',
'confs',
'get_base_classpath_for_target',
'register_jvm_tool',
'tool_classpath',
'workdir'])
def _classfile_to_classname(cls):
clsname, _ = os.path.splitext(cls.replace('/', '.'))
return clsname
class _JUnitRunner(object):
"""Helper class to run JUnit tests with or without coverage.
The default behavior is to just run JUnit tests."""
@classmethod
def register_options(cls, register):
_Coverage.register_options(register)
register('--skip', action='store_true', help='Skip running junit.')
register('--fail-fast', action='store_true',
help='Fail fast on the first test failure in a suite.')
register('--batch-size', type=int, default=sys.maxint,
help='Run at most this many tests in a single test process.')
register('--test', action='append',
help='Force running of just these tests. Tests can be specified using any of: '
'[classname], [classname]#[methodname], [filename] or [filename]#[methodname]')
register('--xml-report', action='store_true', help='Output an XML report for the test run.')
register('--per-test-timer', action='store_true', help='Show progress and timer for each test.')
register('--default-parallel', action='store_true',
help='Run classes without @TestParallel or @TestSerial annotations in parallel.')
register('--parallel-threads', type=int, default=0,
help='Number of threads to run tests in parallel. 0 for autoset.')
register('--test-shard',
help='Subset of tests to run, in the form M/N, 0 <= M < N. '
'For example, 1/3 means run tests number 2, 5, 8, 11, ...')
register('--suppress-output', action='store_true', default=True,
help='Redirect test output to files in .pants.d/test/junit. Implied by --xml-report.')
register('--cwd', default=_CWD_NOT_PRESENT, nargs='?',
help='Set the working directory. If no argument is passed, use the first target path.')
def register_jvm_tool(self, key, ini_section, ini_key, default):
tool = self._context.config.getlist(ini_section, ini_key, default)
self._task_exports.register_jvm_tool(key,
tool,
ini_section=ini_section,
ini_key=ini_key)
def __init__(self, task_exports, context):
self._task_exports = task_exports
self._context = context
self._junit_bootstrap_key = 'junit'
self.register_jvm_tool(key=self._junit_bootstrap_key,
ini_section='junit-run',
ini_key='junit-bootstrap-tools',
default=['//:junit'])
options = task_exports.task_options
self._tests_to_run = options.test
self._batch_size = options.batch_size
self._fail_fast = options.fail_fast
self._cwd_opt = options.cwd
self._args = copy.copy(task_exports.args)
if options.xml_report or options.suppress_output:
if self._fail_fast:
self._args.append('-fail-fast')
if options.xml_report:
self._args.append('-xmlreport')
self._args.append('-suppress-output')
self._args.append('-outdir')
self._args.append(task_exports.workdir)
if options.per_test_timer:
self._args.append('-per-test-timer')
if options.default_parallel:
self._args.append('-default-parallel')
self._args.append('-parallel-threads')
self._args.append(str(options.parallel_threads))
if options.test_shard:
self._args.append('-test-shard')
self._args.append(options.test_shard)
def execute(self, targets):
working_dir = None
if self._cwd_opt != _CWD_NOT_PRESENT:
working_dir = self._cwd_opt
if not working_dir and targets:
working_dir = targets[0].address.spec_path
# For running the junit tests, we're only interested in
# java_tests/junit_tests targets.
#
# But if coverage options are specified, the original
# behavior is that in addition to the junit runs, the coverage
# tools would also look into additional targets such as sources.
#
# Thus, we filter out the non-java-tests targets first but
# keep the original targets set intact for coverages.
java_tests_targets = list(self._test_target_candidates(targets))
tests = list(self._get_tests_to_run() if self._tests_to_run
else self._calculate_tests_from_targets(java_tests_targets))
if tests:
bootstrapped_cp = self._task_exports.tool_classpath(self._junit_bootstrap_key)
junit_classpath = self._task_exports.classpath(
cp=bootstrapped_cp,
confs=self._task_exports.confs,
exclusives_classpath=self._task_exports.get_base_classpath_for_target(java_tests_targets[0]))
self._context.release_lock()
self.instrument(targets, tests, junit_classpath)
def report():
self.report(targets, tests, junit_classpath)
try:
self.run(tests, junit_classpath, cwd=working_dir)
except TaskError:
report()
raise
else:
report()
def instrument(self, targets, tests, junit_classpath):
"""Called from coverage classes. Run any code instrumentation needed.
Subclasses should override this if they need more work done."""
pass
def run(self, tests, junit_classpath, cwd=None):
"""Run the tests in the appropriate environment.
Subclasses should override this if they need more work done.
:param tests: an iterable that contains all the test class names
extracted from the testing targets.
:param junit_classpath: the collective classpath value under which
the junit will be executed.
"""
self._run_tests(tests, junit_classpath, JUnitRun._MAIN, cwd=cwd)
def report(self, targets, tests, junit_classpath):
"""Post-processing of any test output.
Subclasses should override this if they need anything done here."""
pass
def _run_tests(self, tests, classpath, main, extra_jvm_options=None, cwd=None):
# TODO(John Sirois): Integrated batching with the test runner. As things stand we get
# results summaries for example for each batch but no overall summary.
# http://jira.local.twitter.com/browse/AWESOME-1114
extra_jvm_options = extra_jvm_options or []
result = 0
cwd = cwd or get_buildroot()
for batch in self._partition(tests):
with binary_util.safe_args(batch) as batch_tests:
result += abs(execute_java(
classpath=classpath,
main=main,
jvm_options=self._task_exports.jvm_options + extra_jvm_options,
args=self._args + batch_tests,
workunit_factory=self._context.new_workunit,
workunit_name='run',
workunit_labels=[WorkUnit.TEST],
cwd=cwd
))
if result != 0 and self._fail_fast:
break
if result != 0:
raise TaskError('java %s ... exited non-zero (%i)' % (main, result))
def _partition(self, tests):
stride = min(self._batch_size, len(tests))
for i in range(0, len(tests), stride):
yield tests[i:i+stride]
def _get_tests_to_run(self):
for test_spec in self._tests_to_run:
for c in self._interpret_test_spec(test_spec):
yield c
def _test_target_candidates(self, targets):
for target in targets:
if isinstance(target, junit_tests):
yield target
def _calculate_tests_from_targets(self, targets):
targets_to_classes = self._context.products.get_data('classes_by_target')
for target in self._test_target_candidates(targets):
target_products = targets_to_classes.get(target)
if target_products:
for _, classes in target_products.rel_paths():
for cls in classes:
yield _classfile_to_classname(cls)
def _classnames_from_source_file(self, srcfile):
relsrc = os.path.relpath(srcfile, get_buildroot()) if os.path.isabs(srcfile) else srcfile
source_products = self._context.products.get_data('classes_by_source').get(relsrc)
if not source_products:
# It's valid - if questionable - to have a source file with no classes when, for
# example, the source file has all its code commented out.
self._context.log.warn('Source file %s generated no classes' % srcfile)
else:
for _, classes in source_products.rel_paths():
for cls in classes:
yield _classfile_to_classname(cls)
def _interpret_test_spec(self, test_spec):
components = test_spec.split('#', 2)
classname_or_srcfile = components[0]
methodname = '#' + components[1] if len(components) == 2 else ''
if os.path.exists(classname_or_srcfile): # It's a source file.
srcfile = classname_or_srcfile # Alias for clarity.
for cls in self._classnames_from_source_file(srcfile):
# Tack the methodname onto all classes in the source file, as we
# can't know which method the user intended.
yield cls + methodname
else: # It's a classname.
classname = classname_or_srcfile
yield classname + methodname
class _Coverage(_JUnitRunner):
"""Base class for emma-like coverage processors. Do not instantiate."""
@classmethod
def register_options(cls, register):
register('--coverage-patterns', action='append',
help='Restrict coverage measurement. Values are class name prefixes in dotted form '
'with ? and * wildcards. If preceded with a - the pattern is excluded. For '
'example, to include all code in com.pants.raven except claws and the eye you '
'would use: {flag}=com.pants.raven.* {flag}=-com.pants.raven.claw '
'{flag}=-com.pants.raven.Eye.'.format(flag='--coverage_patterns'))
register('--coverage-console', action='store_true', default=True,
help='Output a simple coverage report to the console.')
register('--coverage-xml', action='store_true',
help='Output an XML coverage report.')
register('--coverage-html', action='store_true',
help='Output an HTML coverage report.')
register('--coverage-html-open', action='store_true',
help='Open the generated HTML coverage report in a browser. Implies --coverage-html.')
def __init__(self, task_exports, context):
super(_Coverage, self).__init__(task_exports, context)
options = task_exports.task_options
self._coverage = options.coverage
self._coverage_filters = options.coverage_patterns or []
self._coverage_dir = os.path.join(task_exports.workdir, 'coverage')
self._coverage_instrument_dir = os.path.join(self._coverage_dir, 'classes')
# TODO(ji): These may need to be transferred down to the Emma class, as the suffixes
# may be emma-specific. Resolve when we also provide cobertura support.
self._coverage_metadata_file = os.path.join(self._coverage_dir, 'coverage.em')
self._coverage_file = os.path.join(self._coverage_dir, 'coverage.ec')
self._coverage_report_console = options.coverage_console
self._coverage_console_file = os.path.join(self._coverage_dir, 'coverage.txt')
self._coverage_report_xml = options.coverage_xml
self._coverage_xml_file = os.path.join(self._coverage_dir, 'coverage.xml')
self._coverage_report_html_open = options.coverage_html_open
self._coverage_report_html = self._coverage_report_html_open or options.coverage_html
self._coverage_html_file = os.path.join(self._coverage_dir, 'html', 'index.html')
@abstractmethod
def instrument(self, targets, tests, junit_classpath):
pass
@abstractmethod
def run(self, tests, junit_classpath, cwd=None):
pass
@abstractmethod
def report(self, targets, tests, junit_classpath):
pass
# Utility methods, called from subclasses
def is_coverage_target(self, tgt):
return (tgt.is_java or tgt.is_scala) and not tgt.is_test and not tgt.is_codegen
def get_coverage_patterns(self, targets):
if self._coverage_filters:
return self._coverage_filters
else:
classes_under_test = set()
classes_by_source = self._context.products.get_data('classes_by_source')
def add_sources_under_test(tgt):
if self.is_coverage_target(tgt):
for source in tgt.sources_relative_to_buildroot():
source_products = classes_by_source.get(source)
if source_products:
for _, classes in source_products.rel_paths():
classes_under_test.update(_classfile_to_classname(cls) for cls in classes)
for target in targets:
target.walk(add_sources_under_test)
return classes_under_test
class Emma(_Coverage):
"""Class to run coverage tests with Emma."""
def __init__(self, task_exports, context):
super(Emma, self).__init__(task_exports, context)
self._emma_bootstrap_key = 'emma'
self.register_jvm_tool(self._emma_bootstrap_key,
ini_section='junit-run',
ini_key='emma-bootstrap-tools',
default=['//:emma'])
def instrument(self, targets, tests, junit_classpath):
safe_mkdir(self._coverage_instrument_dir, clean=True)
self._emma_classpath = self._task_exports.tool_classpath(self._emma_bootstrap_key)
with binary_util.safe_args(self.get_coverage_patterns(targets)) as patterns:
args = [
'instr',
'-out', self._coverage_metadata_file,
'-d', self._coverage_instrument_dir,
'-cp', os.pathsep.join(junit_classpath),
'-exit'
]
for pattern in patterns:
args.extend(['-filter', pattern])
main = 'emma'
result = execute_java(classpath=self._emma_classpath, main=main, args=args,
workunit_factory=self._context.new_workunit,
workunit_name='emma-instrument')
if result != 0:
raise TaskError("java %s ... exited non-zero (%i)"
" 'failed to instrument'" % (main, result))
def run(self, tests, junit_classpath, cwd=None):
self._run_tests(tests,
[self._coverage_instrument_dir] + junit_classpath + self._emma_classpath,
JUnitRun._MAIN,
extra_jvm_options=['-Demma.coverage.out.file={0}'.format(self._coverage_file)],
cwd=cwd)
def report(self, targets, tests, junit_classpath):
args = [
'report',
'-in', self._coverage_metadata_file,
'-in', self._coverage_file,
'-exit'
]
source_bases = set()
def collect_source_base(target):
if self.is_coverage_target(target):
source_bases.add(target.target_base)
for target in self._test_target_candidates(targets):
target.walk(collect_source_base)
for source_base in source_bases:
args.extend(['-sp', source_base])
sorting = ['-Dreport.sort', '+name,+class,+method,+block']
if self._coverage_report_console:
args.extend(['-r', 'txt',
'-Dreport.txt.out.file=%s' % self._coverage_console_file] + sorting)
if self._coverage_report_xml:
args.extend(['-r', 'xml', '-Dreport.xml.out.file=%s' % self._coverage_xml_file])
if self._coverage_report_html:
args.extend(['-r', 'html',
'-Dreport.html.out.file=%s' % self._coverage_html_file,
'-Dreport.out.encoding=UTF-8'] + sorting)
main = 'emma'
result = execute_java(classpath=self._emma_classpath, main=main, args=args,
workunit_factory=self._context.new_workunit,
workunit_name='emma-report')
if result != 0:
raise TaskError("java %s ... exited non-zero (%i)"
" 'failed to generate code coverage reports'" % (main, result))
if self._coverage_report_console:
with safe_open(self._coverage_console_file) as console_report:
sys.stdout.write(console_report.read())
if self._coverage_report_html_open:
binary_util.ui_open(self._coverage_html_file)
class Cobertura(_Coverage):
"""Class to run coverage tests with cobertura."""
def __init__(self, task_exports, context):
super(Cobertura, self).__init__(task_exports, context)
self._cobertura_bootstrap_key = 'cobertura'
self._coverage_datafile = os.path.join(self._coverage_dir, 'cobertura.ser')
self.register_jvm_tool(self._cobertura_bootstrap_key,
ini_section='junit-run',
ini_key='cobertura-bootstrap-tools',
default=['//:cobertura'])
self._rootdirs = defaultdict(OrderedSet)
self._include_filters = []
self._exclude_filters = []
for filt in self._coverage_filters:
if filt[0] == '-':
self._exclude_filters.append(filt[1:])
else:
self._include_filters.append(filt)
def instrument(self, targets, tests, junit_classpath):
self._cobertura_classpath = self._task_exports.tool_classpath(self._cobertura_bootstrap_key)
safe_delete(self._coverage_datafile)
classes_by_target = self._context.products.get_data('classes_by_target')
for target in targets:
if self.is_coverage_target(target):
classes_by_rootdir = classes_by_target.get(target)
if classes_by_rootdir:
for root, products in classes_by_rootdir.rel_paths():
self._rootdirs[root].update(products)
# Cobertura uses regular expressions for filters, and even then there are still problems
# with filtering. It turned out to be easier to just select which classes to instrument
# by filtering them here.
# TODO(ji): Investigate again how we can use cobertura's own filtering mechanisms.
if self._coverage_filters:
for basedir, classes in self._rootdirs.items():
updated_classes = []
for cls in classes:
does_match = False
for positive_filter in self._include_filters:
if fnmatch.fnmatchcase(_classfile_to_classname(cls), positive_filter):
does_match = True
for negative_filter in self._exclude_filters:
if fnmatch.fnmatchcase(_classfile_to_classname(cls), negative_filter):
does_match = False
if does_match:
updated_classes.append(cls)
self._rootdirs[basedir] = updated_classes
for basedir, classes in self._rootdirs.items():
if not classes:
continue # No point in running instrumentation if there is nothing to instrument!
args = [
'--basedir',
basedir,
'--datafile',
self._coverage_datafile,
]
with temporary_file() as fd:
fd.write('\n'.join(classes) + '\n')
args.append('--listOfFilesToInstrument')
args.append(fd.name)
main = 'net.sourceforge.cobertura.instrument.InstrumentMain'
result = execute_java(classpath=self._cobertura_classpath + junit_classpath,
main=main,
args=args,
workunit_factory=self._context.new_workunit,
workunit_name='cobertura-instrument')
if result != 0:
raise TaskError("java %s ... exited non-zero (%i)"
" 'failed to instrument'" % (main, result))
def run(self, tests, junit_classpath, cwd=None):
self._run_tests(tests,
self._cobertura_classpath + junit_classpath,
JUnitRun._MAIN,
extra_jvm_options=['-Dnet.sourceforge.cobertura.datafile=' + self._coverage_datafile],
cwd=cwd)
def _build_sources_by_class(self):
"""Invert classes_by_source."""
classes_by_source = self._context.products.get_data('classes_by_source')
source_by_class = dict()
for source_file, source_products in classes_by_source.items():
for root, products in source_products.rel_paths():
for product in products:
if not '$' in product:
if source_by_class.get(product):
if source_by_class.get(product) != source_file:
self._context.log.warn(
'Inconsistency finding source for class %s: already had %s, also found %s',
(product, source_by_class.get(product), source_file))
else:
source_by_class[product] = source_file
return source_by_class
def report(self, targets, tests, junit_classpath):
# Link files in the real source tree to files named using the classname.
# Do not include class file names containing '$', as these will always have
# a corresponding $-less class file, and they all point back to the same
# source.
# Put all these links to sources under self._coverage_dir/src
all_classes = set()
for basedir, classes in self._rootdirs.items():
all_classes.update([cls for cls in classes if '$' not in cls])
sources_by_class = self._build_sources_by_class()
coverage_source_root_dir = os.path.join(self._coverage_dir, 'src')
safe_rmtree(coverage_source_root_dir)
for cls in all_classes:
source_file = sources_by_class.get(cls)
if source_file:
# the class in @cls
# (e.g., 'com/pants/example/hello/welcome/WelcomeEverybody.class')
# was compiled from the file in @source_file
# (e.g., 'src/scala/com/pants/example/hello/welcome/Welcome.scala')
# Note that, in the case of scala files, the path leading up to Welcome.scala does not
# have to match the path in the corresponding .class file AT ALL. In this example,
# @source_file could very well have been 'src/hello-kitty/Welcome.scala'.
# However, cobertura expects the class file path to match the corresponding source
# file path below the source base directory(ies) (passed as (a) positional argument(s)),
# while it still gets the source file basename from the .class file.
# Here we create a fake hierachy under coverage_dir/src to mimic what cobertura expects.
class_dir = os.path.dirname(cls) # e.g., 'com/pants/example/hello/welcome'
fake_source_directory = os.path.join(coverage_source_root_dir, class_dir)
safe_mkdir(fake_source_directory)
fake_source_file = os.path.join(fake_source_directory, os.path.basename(source_file))
try:
os.symlink(os.path.relpath(source_file, fake_source_directory),
fake_source_file)
except OSError as e:
# These warnings appear when source files contain multiple classes.
self._context.log.warn(
'Could not symlink %s to %s: %s' %
(source_file, fake_source_file, e))
else:
self._context.log.error('class %s does not exist in a source file!' % cls)
report_formats = []
if self._coverage_report_xml:
report_formats.append('xml')
if self._coverage_report_html:
report_formats.append('html')
for report_format in report_formats:
report_dir = os.path.join(self._coverage_dir, report_format)
safe_mkdir(report_dir, clean=True)
args = [
coverage_source_root_dir,
'--datafile',
self._coverage_datafile,
'--destination',
report_dir,
'--format',
report_format,
]
main = 'net.sourceforge.cobertura.reporting.ReportMain'
result = execute_java(classpath=self._cobertura_classpath,
main=main,
args=args,
workunit_factory=self._context.new_workunit,
workunit_name='cobertura-report-' + report_format)
if result != 0:
raise TaskError("java %s ... exited non-zero (%i)"
" 'failed to report'" % (main, result))
class JUnitRun(JvmTask, JvmToolTaskMixin):
_MAIN = 'com.twitter.common.junit.runner.ConsoleRunner'
@classmethod
def register_options(cls, register):
super(JUnitRun, cls).register_options(register)
_JUnitRunner.register_options(register)
register('--coverage', action='store_true', help='Collect code coverage data.')
register('--coverage-processor', default='emma', help='Which coverage subsystem to use.')
def __init__(self, *args, **kwargs):
super(JUnitRun, self).__init__(*args, **kwargs)
task_exports = _TaskExports(classpath=self.classpath,
task_options=self.get_options(),
jvm_options=self.jvm_options,
args=self.args,
confs=self.confs,
get_base_classpath_for_target=self.get_base_classpath_for_target,
register_jvm_tool=self.register_jvm_tool,
tool_classpath=self.tool_classpath,
workdir=self.workdir)
options = self.get_options()
if options.coverage or options.coverage_html_open:
coverage_processor = options.coverage_processor
if coverage_processor == 'emma':
self._runner = Emma(task_exports, self.context)
elif coverage_processor == 'cobertura':
self._runner = Cobertura(task_exports, self.context)
else:
raise TaskError('unknown coverage processor %s' % coverage_processor)
else:
self._runner = _JUnitRunner(task_exports, self.context)
def prepare(self, round_manager):
super(JUnitRun, self).prepare(round_manager)
round_manager.require_data('resources_by_target')
# List of FQCN, FQCN#method, sourcefile or sourcefile#method.
round_manager.require_data('classes_by_target')
round_manager.require_data('classes_by_source')
def execute(self):
if not self.get_options().skip:
targets = self.context.targets()
self._runner.execute(targets)
| |
#!/usr/bin/env python
from nose.tools import *
import networkx
from test_multigraph import BaseMultiGraphTester, TestMultiGraph
class BaseMultiDiGraphTester(BaseMultiGraphTester):
def test_edges(self):
G=self.K3
assert_equal(sorted(G.edges()),[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
assert_raises((KeyError,networkx.NetworkXError), G.edges,-1)
def test_edges_data(self):
G=self.K3
assert_equal(sorted(G.edges(data=True)),
[(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),(2,0,{}),(2,1,{})])
assert_equal(sorted(G.edges(0,data=True)),[(0,1,{}),(0,2,{})])
assert_raises((KeyError,networkx.NetworkXError), G.neighbors,-1)
def test_edges(self):
G=self.K3
assert_equal(sorted(G.edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.edges(0)),[(0,1),(0,2)])
G.add_edge(0,1)
assert_equal(sorted(G.edges()),
[(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
def test_out_edges(self):
G=self.K3
assert_equal(sorted(G.out_edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
assert_raises((KeyError,networkx.NetworkXError), G.out_edges,-1)
assert_equal(sorted(G.out_edges(0,keys=True)),[(0,1,0),(0,2,0)])
def test_out_edges(self):
G=self.K3
assert_equal(sorted(G.out_edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.out_edges(0)),[(0,1),(0,2)])
G.add_edge(0,1,2)
assert_equal(sorted(G.out_edges()),
[(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
def test_in_edges(self):
G=self.K3
assert_equal(sorted(G.in_edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.in_edges(0)),[(1,0),(2,0)])
assert_raises((KeyError,networkx.NetworkXError), G.in_edges,-1)
G.add_edge(0,1,2)
assert_equal(sorted(G.in_edges()),
[(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.in_edges(0,keys=True)),[(1,0,0),(2,0,0)])
def test_in_edges(self):
G=self.K3
assert_equal(sorted(G.in_edges()),
[(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.in_edges(0)),[(1,0),(2,0)])
G.add_edge(0,1,2)
assert_equal(sorted(G.in_edges()),
[(0,1),(0,1),(0,2),(1,0),(1,2),(2,0),(2,1)])
assert_equal(sorted(G.in_edges(data=True,keys=False)),
[(0,1,{}),(0,1,{}),(0,2,{}),(1,0,{}),(1,2,{}),
(2,0,{}),(2,1,{})])
def is_shallow(self,H,G):
# graph
assert_equal(G.graph['foo'],H.graph['foo'])
G.graph['foo'].append(1)
assert_equal(G.graph['foo'],H.graph['foo'])
# node
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
G.node[0]['foo'].append(1)
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
# edge
assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
G[1][2][0]['foo'].append(1)
assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
def is_deep(self,H,G):
# graph
assert_equal(G.graph['foo'],H.graph['foo'])
G.graph['foo'].append(1)
assert_not_equal(G.graph['foo'],H.graph['foo'])
# node
assert_equal(G.node[0]['foo'],H.node[0]['foo'])
G.node[0]['foo'].append(1)
assert_not_equal(G.node[0]['foo'],H.node[0]['foo'])
# edge
assert_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
G[1][2][0]['foo'].append(1)
assert_not_equal(G[1][2][0]['foo'],H[1][2][0]['foo'])
def test_to_undirected(self):
# MultiDiGraph -> MultiGraph changes number of edges so it is
# not a copy operation... use is_shallow, not is_shallow_copy
G=self.K3
self.add_attributes(G)
H=networkx.MultiGraph(G)
self.is_shallow(H,G)
H=G.to_undirected()
self.is_deep(H,G)
def test_has_successor(self):
G=self.K3
assert_equal(G.has_successor(0,1),True)
assert_equal(G.has_successor(0,-1),False)
# def test_successors(self):
# G=self.K3
# assert_equal(sorted(G.successors(0)),[1,2])
# assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
def test_successors(self):
G=self.K3
assert_equal(sorted(G.successors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.successors,-1)
def test_has_predecessor(self):
G=self.K3
assert_equal(G.has_predecessor(0,1),True)
assert_equal(G.has_predecessor(0,-1),False)
# def test_predecessors(self):
# G=self.K3
# assert_equal(sorted(G.predecessors(0)),[1,2])
# assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
def test_predecessors(self):
G=self.K3
assert_equal(sorted(G.predecessors(0)),[1,2])
assert_raises((KeyError,networkx.NetworkXError), G.predecessors,-1)
def test_degree(self):
G=self.K3
assert_equal(list(G.degree()),[(0,4),(1,4),(2,4)])
assert_equal(dict(G.degree()),{0:4,1:4,2:4})
assert_equal(G.degree(0), 4)
assert_equal(list(G.degree(iter([0]))), [(0, 4)])
G.add_edge(0,1,weight=0.3,other=1.2)
assert_equal(list(G.degree(weight='weight')),[(0,4.3),(1,4.3),(2,4)])
assert_equal(list(G.degree(weight='other')),[(0,5.2),(1,5.2),(2,4)])
def test_in_degree(self):
G=self.K3
assert_equal(list(G.in_degree()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.in_degree()),{0:2,1:2,2:2})
assert_equal(G.in_degree(0), 2)
assert_equal(list(G.in_degree(iter([0]))), [(0, 2)])
assert_equal(G.in_degree(0,weight='weight'), 2)
def test_out_degree(self):
G=self.K3
assert_equal(list(G.out_degree()),[(0,2),(1,2),(2,2)])
assert_equal(dict(G.out_degree()),{0:2,1:2,2:2})
assert_equal(G.out_degree(0), 2)
assert_equal(list(G.out_degree(iter([0]))), [(0, 2)])
assert_equal(G.out_degree(0,weight='weight'), 2)
def test_size(self):
G=self.K3
assert_equal(G.size(),6)
assert_equal(G.number_of_edges(),6)
G.add_edge(0,1,weight=0.3,other=1.2)
assert_equal(G.size(weight='weight'),6.3)
assert_equal(G.size(weight='other'),7.2)
def test_to_undirected_reciprocal(self):
G=self.Graph()
G.add_edge(1,2)
assert_true(G.to_undirected().has_edge(1,2))
assert_false(G.to_undirected(reciprocal=True).has_edge(1,2))
G.add_edge(2,1)
assert_true(G.to_undirected(reciprocal=True).has_edge(1,2))
def test_reverse_copy(self):
G=networkx.MultiDiGraph([(0,1),(0,1)])
R=G.reverse()
assert_equal(sorted(R.edges()),[(1,0),(1,0)])
R.remove_edge(1,0)
assert_equal(sorted(R.edges()),[(1,0)])
assert_equal(sorted(G.edges()),[(0,1),(0,1)])
def test_reverse_nocopy(self):
G=networkx.MultiDiGraph([(0,1),(0,1)])
R=G.reverse(copy=False)
assert_equal(sorted(R.edges()),[(1,0),(1,0)])
R.remove_edge(1,0)
assert_equal(sorted(R.edges()),[(1,0)])
assert_equal(sorted(G.edges()),[(1,0)])
class TestMultiDiGraph(BaseMultiDiGraphTester,TestMultiGraph):
def setUp(self):
self.Graph=networkx.MultiDiGraph
# build K3
self.k3edges=[(0, 1), (0, 2), (1, 2)]
self.k3nodes=[0, 1, 2]
self.K3=self.Graph()
self.K3.adj={0:{},1:{},2:{}}
self.K3.succ=self.K3.adj
self.K3.pred={0:{},1:{},2:{}}
for u in self.k3nodes:
for v in self.k3nodes:
if u==v: continue
d={0:{}}
self.K3.succ[u][v]=d
self.K3.pred[v][u]=d
self.K3.adj=self.K3.succ
self.K3.edge=self.K3.adj
self.K3.node={}
self.K3.node[0]={}
self.K3.node[1]={}
self.K3.node[2]={}
def test_add_edge(self):
G=self.Graph()
G.add_edge(0,1)
assert_equal(G.adj,{0: {1: {0:{}}}, 1: {}})
assert_equal(G.succ,{0: {1: {0:{}}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{0:{}}}})
G=self.Graph()
G.add_edge(*(0,1))
assert_equal(G.adj,{0: {1: {0:{}}}, 1: {}})
assert_equal(G.succ,{0: {1: {0:{}}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{0:{}}}})
def test_add_edges_from(self):
G=self.Graph()
G.add_edges_from([(0,1),(0,1,{'weight':3})])
assert_equal(G.adj,{0: {1: {0:{},1:{'weight':3}}}, 1: {}})
assert_equal(G.succ,{0: {1: {0:{},1:{'weight':3}}}, 1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{0:{},1:{'weight':3}}}})
G.add_edges_from([(0,1),(0,1,{'weight':3})],weight=2)
assert_equal(G.succ,{0: {1: {0:{},
1:{'weight':3},
2:{'weight':2},
3:{'weight':3}}},
1: {}})
assert_equal(G.pred,{0: {}, 1: {0:{0:{},1:{'weight':3},
2:{'weight':2},
3:{'weight':3}}}})
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,)]) # too few in tuple
assert_raises(networkx.NetworkXError, G.add_edges_from,[(0,1,2,3,4)]) # too many in tuple
assert_raises(TypeError, G.add_edges_from,[0]) # not a tuple
def test_remove_edge(self):
G=self.K3
G.remove_edge(0,1)
assert_equal(G.succ,{0:{2:{0:{}}},
1:{0:{0:{}},2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
assert_equal(G.pred,{0:{1:{0:{}}, 2:{0:{}}},
1:{2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,0,2,
key=1)
def test_remove_multiedge(self):
G=self.K3
G.add_edge(0,1,key='parallel edge')
G.remove_edge(0,1,key='parallel edge')
assert_equal(G.adj,{0: {1: {0:{}}, 2: {0:{}}},
1: {0: {0:{}}, 2: {0:{}}},
2: {0: {0:{}}, 1: {0:{}}}})
assert_equal(G.succ,{0: {1: {0:{}}, 2: {0:{}}},
1: {0: {0:{}}, 2: {0:{}}},
2: {0: {0:{}}, 1: {0:{}}}})
assert_equal(G.pred,{0:{1: {0:{}},2:{0:{}}},
1:{0:{0:{}},2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
G.remove_edge(0,1)
assert_equal(G.succ,{0:{2:{0:{}}},
1:{0:{0:{}},2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
assert_equal(G.pred,{0:{1:{0:{}}, 2:{0:{}}},
1:{2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
assert_raises((KeyError,networkx.NetworkXError), G.remove_edge,-1,0)
def test_remove_edges_from(self):
G=self.K3
G.remove_edges_from([(0,1)])
assert_equal(G.succ,{0:{2:{0:{}}},
1:{0:{0:{}},2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
assert_equal(G.pred,{0:{1:{0:{}}, 2:{0:{}}},
1:{2:{0:{}}},
2:{0:{0:{}},1:{0:{}}}})
G.remove_edges_from([(0,0)]) # silent fail
| |
import contextlib
import re
import socket
import uuid
from django.conf import settings
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import AnonymousUser
from django.contrib.sessions.middleware import SessionMiddleware
from django.db import transaction
from django.urls import is_valid_path
from django.http import (
HttpResponsePermanentRedirect, HttpResponseRedirect,
JsonResponse)
from django.middleware import common
from django.utils.cache import patch_cache_control, patch_vary_headers
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_bytes, iri_to_uri
from django.utils.translation import activate, ugettext_lazy as _
from rest_framework import permissions
from six.moves.urllib.parse import quote
import MySQLdb as mysql
from corsheaders.middleware import CorsMiddleware as _CorsMiddleware
from olympia import amo
from olympia.amo.utils import render
from . import urlresolvers
from .templatetags.jinja_helpers import urlparams
auth_path = re.compile('%saccounts/authenticate/?$' % settings.DRF_API_REGEX)
class LocaleAndAppURLMiddleware(MiddlewareMixin):
"""
1. search for locale first
2. see if there are acceptable apps
3. save those matched parameters in the request
4. strip them from the URL so we can do stuff
"""
def process_request(self, request):
# Find locale, app
prefixer = urlresolvers.Prefixer(request)
# Always use a 302 redirect to avoid users being stuck in case of
# accidental misconfiguration.
redirect_type = HttpResponseRedirect
urlresolvers.set_url_prefix(prefixer)
full_path = prefixer.fix(prefixer.shortened_path)
if (prefixer.app == amo.MOBILE.short and
request.path.rstrip('/').endswith('/' + amo.MOBILE.short)):
return redirect_type(request.path.replace('/mobile', '/android'))
if ('lang' in request.GET and not re.match(
settings.SUPPORTED_NONAPPS_NONLOCALES_REGEX,
prefixer.shortened_path)):
# Blank out the locale so that we can set a new one. Remove lang
# from query params so we don't have an infinite loop.
prefixer.locale = ''
new_path = prefixer.fix(prefixer.shortened_path)
query = dict((force_bytes(k), request.GET[k]) for k in request.GET)
query.pop('lang')
return redirect_type(urlparams(new_path, **query))
if full_path != request.path:
query_string = request.META.get('QUERY_STRING', '')
full_path = quote(full_path.encode('utf-8'))
if query_string:
query_string = query_string.decode('utf-8', 'ignore')
full_path = u'%s?%s' % (full_path, query_string)
response = redirect_type(full_path)
# Cache the redirect for a year.
if not settings.DEBUG:
patch_cache_control(response, max_age=60 * 60 * 24 * 365)
# Vary on Accept-Language or User-Agent if we changed the locale or
# app.
old_app = prefixer.app
old_locale = prefixer.locale
new_locale, new_app, _ = prefixer.split_path(full_path)
if old_locale != new_locale:
patch_vary_headers(response, ['Accept-Language'])
if old_app != new_app:
patch_vary_headers(response, ['User-Agent'])
return response
request.path_info = '/' + prefixer.shortened_path
request.LANG = prefixer.locale or prefixer.get_language()
activate(request.LANG)
request.APP = amo.APPS.get(prefixer.app, amo.FIREFOX)
# Match legacy api requests too - IdentifyAPIRequestMiddleware is v3+
# TODO - remove this when legacy_api goes away
# https://github.com/mozilla/addons-server/issues/9274
request.is_legacy_api = request.path_info.startswith('/api/')
class AuthenticationMiddlewareWithoutAPI(AuthenticationMiddleware):
"""
Like AuthenticationMiddleware, but disabled for the API, which uses its
own authentication mechanism.
"""
def process_request(self, request):
legacy_or_drf_api = request.is_api or request.is_legacy_api
if legacy_or_drf_api and not auth_path.match(request.path):
request.user = AnonymousUser()
else:
return super(
AuthenticationMiddlewareWithoutAPI,
self).process_request(request)
class NoVarySessionMiddleware(SessionMiddleware):
"""
SessionMiddleware sets Vary: Cookie anytime request.session is accessed.
request.session is accessed indirectly anytime request.user is touched.
We always touch request.user to see if the user is authenticated, so every
request would be sending vary, so we'd get no caching.
We skip the cache in Zeus if someone has an AMOv3+ cookie, so varying on
Cookie at this level only hurts us.
"""
def process_response(self, request, response):
if settings.READ_ONLY:
return response
# Let SessionMiddleware do its processing but prevent it from changing
# the Vary header.
vary = None
if hasattr(response, 'get'):
vary = response.get('Vary', None)
new_response = (
super(NoVarySessionMiddleware, self)
.process_response(request, response))
if vary:
new_response['Vary'] = vary
else:
del new_response['Vary']
return new_response
class RemoveSlashMiddleware(MiddlewareMixin):
"""
Middleware that tries to remove a trailing slash if there was a 404.
If the response is a 404 because url resolution failed, we'll look for a
better url without a trailing slash.
"""
def process_response(self, request, response):
if (response.status_code == 404 and
request.path_info.endswith('/') and
not is_valid_path(request.path_info) and
is_valid_path(request.path_info[:-1])):
# Use request.path because we munged app/locale in path_info.
newurl = request.path[:-1]
if request.GET:
with safe_query_string(request):
newurl += '?' + request.META.get('QUERY_STRING', '')
return HttpResponsePermanentRedirect(newurl)
else:
return response
@contextlib.contextmanager
def safe_query_string(request):
"""
Turn the QUERY_STRING into a unicode- and ascii-safe string.
We need unicode so it can be combined with a reversed URL, but it has to be
ascii to go in a Location header. iri_to_uri seems like a good compromise.
"""
qs = request.META.get('QUERY_STRING', '')
try:
request.META['QUERY_STRING'] = iri_to_uri(qs)
yield
finally:
request.META['QUERY_STRING'] = qs
class CommonMiddleware(common.CommonMiddleware):
def process_request(self, request):
with safe_query_string(request):
return super(CommonMiddleware, self).process_request(request)
class NonAtomicRequestsForSafeHttpMethodsMiddleware(MiddlewareMixin):
"""
Middleware to make the view non-atomic if the HTTP method used is safe,
in order to avoid opening and closing a useless transaction.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
# This uses undocumented django APIS:
# - transaction.get_connection() followed by in_atomic_block property,
# which we need to make sure we're not messing with a transaction
# that has already started (which happens in tests using the regular
# TestCase class)
# - _non_atomic_requests(), which set the property to prevent the
# transaction on the view itself. We can't use non_atomic_requests
# (without the '_') as it returns a *new* view, and we can't do that
# in a middleware, we need to modify it in place and return None so
# that the rest of the middlewares are run.
is_method_safe = request.method in ('HEAD', 'GET', 'OPTIONS', 'TRACE')
if is_method_safe and not transaction.get_connection().in_atomic_block:
transaction._non_atomic_requests(view_func, using='default')
return None
class ReadOnlyMiddleware(MiddlewareMixin):
"""Middleware that announces a downtime which for us usually means
putting the site into read only mode.
Supports issuing `Retry-After` header.
"""
ERROR_MSG = _(
u'Some features are temporarily disabled while we '
u'perform website maintenance. We\'ll be back to '
u'full capacity shortly.')
def process_request(self, request):
if not settings.READ_ONLY:
return
if request.is_api:
writable_method = request.method not in permissions.SAFE_METHODS
if writable_method:
return JsonResponse({'error': self.ERROR_MSG}, status=503)
elif request.method == 'POST':
return render(request, 'amo/read-only.html', status=503)
def process_exception(self, request, exception):
if not settings.READ_ONLY:
return
if isinstance(exception, mysql.OperationalError):
if request.is_api:
return self._render_api_error()
return render(request, 'amo/read-only.html', status=503)
class SetRemoteAddrFromForwardedFor(MiddlewareMixin):
"""
Set request.META['REMOTE_ADDR'] from request.META['HTTP_X_FORWARDED_FOR'].
Our application servers should always be behind a load balancer that sets
this header correctly.
"""
def is_valid_ip(self, ip):
for af in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(af, ip)
return True
except socket.error:
pass
return False
def process_request(self, request):
ips = []
if 'HTTP_X_FORWARDED_FOR' in request.META:
xff = [i.strip() for i in
request.META['HTTP_X_FORWARDED_FOR'].split(',')]
ips = [ip for ip in xff if self.is_valid_ip(ip)]
else:
return
ips.append(request.META['REMOTE_ADDR'])
known = getattr(settings, 'KNOWN_PROXIES', [])
ips.reverse()
for ip in ips:
request.META['REMOTE_ADDR'] = ip
if ip not in known:
break
class ScrubRequestOnException(MiddlewareMixin):
"""
Hide sensitive information so they're not recorded in error logging.
* passwords in request.POST
* sessionid in request.COOKIES
"""
def process_exception(self, request, exception):
# Get a copy so it's mutable.
request.POST = request.POST.copy()
for key in request.POST:
if 'password' in key.lower():
request.POST[key] = '******'
# Remove session id from cookies
if settings.SESSION_COOKIE_NAME in request.COOKIES:
request.COOKIES[settings.SESSION_COOKIE_NAME] = '******'
# Clearing out all cookies in request.META. They will already
# be sent with request.COOKIES.
request.META['HTTP_COOKIE'] = '******'
class RequestIdMiddleware(MiddlewareMixin):
"""Middleware that adds a unique request-id to every incoming request.
This can be used to track a request across different system layers,
e.g to correlate logs with sentry exceptions.
We are exposing this request id in the `X-AMO-Request-ID` response header.
"""
def process_request(self, request):
request.request_id = uuid.uuid4().hex
def process_response(self, request, response):
request_id = getattr(request, 'request_id', None)
if request_id:
response['X-AMO-Request-ID'] = request.request_id
return response
class CorsMiddleware(_CorsMiddleware, MiddlewareMixin):
"""Wrapper to allow old style Middleware to work with django 1.10+.
Will be unneeded once
https://github.com/mstriemer/django-cors-headers/pull/3 is merged and a
new release of django-cors-headers-multi is available."""
pass
| |
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Logging support for Tornado.
Tornado uses three logger streams:
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
potentially other servers in the future)
* ``tornado.application``: Logging of errors from application code (i.e.
uncaught exceptions from callbacks)
* ``tornado.general``: General-purpose logging, including any errors
or warnings from Tornado itself.
These streams may be configured independently using the standard library's
`logging` module. For example, you may wish to send ``tornado.access`` logs
to a separate file for analysis.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import logging.handlers
import sys
from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type
try:
import curses
except ImportError:
curses = None
# Logger objects for internal tornado use
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
try:
return _unicode(s)
except UnicodeDecodeError:
return repr(s)
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(options=None, logger=None):
"""Turns on formatted logging output as configured.
This is called automaticaly by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
from tornado.options import options
if options.logging == 'none':
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not logger.handlers)):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options=None):
if options is None:
# late import to prevent cycle
from tornado.options import options
options.define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="debug|info|warning|error|none")
options.define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
options.define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
options.add_parse_callback(enable_pretty_logging)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.SetKey import SetKey
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.sara_set_expression import SetExpression
from sara_flexbe_states.sara_move_base import SaraMoveBase
from sara_flexbe_states.sara_set_head_angle_key import SaraSetHeadAngleKey
from flexbe_states.calculation_state import CalculationState
from sara_flexbe_states.GetClosestObstacle import GetClosestObstacle
from flexbe_states.check_condition_state import CheckConditionState
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.WonderlandGetEntityVerbal import WonderlandGetEntityVerbal
from sara_flexbe_states.GetAttribute import GetAttribute
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Tue Oct 24 2017
@author: Philippe La Madeleine
'''
class Action_MoveSM(Behavior):
'''
Move Sara to a specific point.
Tu peux mettre le nom d'une place au lieu d'une position (Merci Phil)
'''
def __init__(self):
super(Action_MoveSM, self).__init__()
self.name = 'Action_Move'
# parameters of this behavior
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:860 y:152, x:755 y:568
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['pose'])
_state_machine.userdata.pose = "crowd"
_state_machine.userdata.name = "destination"
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:130 y:365
_sm_look_around_0 = OperatableStateMachine(outcomes=['failed'])
with _sm_look_around_0:
# x:78 y:40
OperatableStateMachine.add('set pitch',
SetKey(Value=0.6),
transitions={'done': 'get angle'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'pitch'})
# x:348 y:210
OperatableStateMachine.add('set head',
SaraSetHeadAngleKey(),
transitions={'done': 'get angle'},
autonomy={'done': Autonomy.Off},
remapping={'yaw': 'yaw', 'pitch': 'pitch'})
# x:204 y:248
OperatableStateMachine.add('limit yaw',
CalculationState(calculation=lambda x: max(min(x,1), -1)),
transitions={'done': 'set head'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'yaw', 'output_value': 'yaw'})
# x:191 y:126
OperatableStateMachine.add('get angle',
GetClosestObstacle(topic="/scan", maximumDistance=2),
transitions={'done': 'limit yaw'},
autonomy={'done': Autonomy.Off},
remapping={'Angle': 'yaw', 'distance': 'distance', 'position': 'position'})
# x:30 y:365, x:130 y:365
_sm_move_1 = OperatableStateMachine(outcomes=['arrived', 'failed'], input_keys=['pose'])
with _sm_move_1:
# x:95 y:122
OperatableStateMachine.add('move',
SaraMoveBase(reference="map"),
transitions={'arrived': 'arrived', 'failed': 'failed'},
autonomy={'arrived': Autonomy.Off, 'failed': Autonomy.Off},
remapping={'pose': 'pose'})
# x:259 y:573, x:491 y:362, x:491 y:453
_sm_manage_name_2 = OperatableStateMachine(outcomes=['done', 'too much', 'not found'], input_keys=['pose'], output_keys=['pose', 'name'])
with _sm_manage_name_2:
# x:39 y:48
OperatableStateMachine.add('check if Pose',
CheckConditionState(predicate=lambda x: type(x) is type([])),
transitions={'true': 'getname', 'false': 'check if string'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose'})
# x:257 y:264
OperatableStateMachine.add('getcontainers',
CalculationState(calculation=lambda x: x[1:]),
transitions={'done': 'get wonderland entity'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pose', 'output_value': 'containers'})
# x:232 y:354
OperatableStateMachine.add('get wonderland entity',
WonderlandGetEntityVerbal(),
transitions={'one': 'get waypoint', 'multiple': 'too much', 'none': 'not found', 'error': 'not found'},
autonomy={'one': Autonomy.Off, 'multiple': Autonomy.Off, 'none': Autonomy.Off, 'error': Autonomy.Off},
remapping={'name': 'name', 'containers': 'containers', 'entities': 'entities', 'firstEntity': 'firstEntity'})
# x:238 y:442
OperatableStateMachine.add('get waypoint',
GetAttribute(attributes=["waypoint"]),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'object': 'firstEntity', 'waypoint': 'pose'})
# x:256 y:166
OperatableStateMachine.add('getname',
CalculationState(calculation=lambda x: x[0]),
transitions={'done': 'getcontainers'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pose', 'output_value': 'name'})
# x:42 y:180
OperatableStateMachine.add('check if string',
CheckConditionState(predicate=lambda x: type(x) is type("")),
transitions={'true': 'remap to name', 'false': 'done'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'pose'})
# x:40 y:251
OperatableStateMachine.add('remap to name',
CalculationState(calculation=lambda x: x),
transitions={'done': 'set containers empty'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pose', 'output_value': 'name'})
# x:30 y:326
OperatableStateMachine.add('set containers empty',
SetKey(Value=[]),
transitions={'done': 'get wonderland entity'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'containers'})
# x:30 y:365, x:130 y:365, x:230 y:365, x:330 y:365, x:430 y:365
_sm_move_concurent_3 = ConcurrencyContainer(outcomes=['arrived', 'failed'], input_keys=['pose'], conditions=[
('arrived', [('Move', 'arrived')]),
('failed', [('Move', 'failed')]),
('failed', [('Look around', 'failed')])
])
with _sm_move_concurent_3:
# x:30 y:40
OperatableStateMachine.add('Move',
_sm_move_1,
transitions={'arrived': 'arrived', 'failed': 'failed'},
autonomy={'arrived': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pose': 'pose'})
# x:268 y:74
OperatableStateMachine.add('Look around',
_sm_look_around_0,
transitions={'failed': 'failed'},
autonomy={'failed': Autonomy.Inherit})
with _state_machine:
# x:54 y:27
OperatableStateMachine.add('SetCount',
SetKey(Value=2),
transitions={'done': 'manage name'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Count'})
# x:258 y:250
OperatableStateMachine.add('stuck',
SaraSay(sentence="I'm getting stuck.", input_keys=[], emotion=2, block=True),
transitions={'done': 'Count--'},
autonomy={'done': Autonomy.Off})
# x:49 y:251
OperatableStateMachine.add('try again',
SaraSay(sentence="But I'm still going.", input_keys=[], emotion=1, block=False),
transitions={'done': 'Move concurent'},
autonomy={'done': Autonomy.Off})
# x:360 y:508
OperatableStateMachine.add('sorry',
SaraSay(sentence="Well. It seem's I can't go there.", input_keys=[], emotion=2, block=True),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:672 y:147
OperatableStateMachine.add('set blink',
SetExpression(emotion=6, brightness=-1),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
# x:252 y:146
OperatableStateMachine.add('Move concurent',
_sm_move_concurent_3,
transitions={'arrived': 'reset head', 'failed': 'stuck'},
autonomy={'arrived': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'pose': 'pose'})
# x:254 y:353
OperatableStateMachine.add('Count--',
CalculationState(calculation=lambda x: x-1),
transitions={'done': 'check count'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'Count', 'output_value': 'Count'})
# x:37 y:351
OperatableStateMachine.add('check count',
CheckConditionState(predicate=lambda x: x>=0),
transitions={'true': 'try again', 'false': 'sorry'},
autonomy={'true': Autonomy.Off, 'false': Autonomy.Off},
remapping={'input_value': 'Count'})
# x:497 y:149
OperatableStateMachine.add('reset head',
SaraSetHeadAngle(pitch=0.1, yaw=0),
transitions={'done': 'set blink'},
autonomy={'done': Autonomy.Off})
# x:269 y:21
OperatableStateMachine.add('manage name',
_sm_manage_name_2,
transitions={'done': 'set head', 'too much': 'say too much', 'not found': 'say not known'},
autonomy={'done': Autonomy.Inherit, 'too much': Autonomy.Inherit, 'not found': Autonomy.Inherit},
remapping={'pose': 'pose', 'name': 'name'})
# x:46 y:147
OperatableStateMachine.add('set head',
SaraSetHeadAngle(pitch=0.8, yaw=0),
transitions={'done': 'Move concurent'},
autonomy={'done': Autonomy.Off})
# x:477 y:333
OperatableStateMachine.add('say too much',
SaraSay(sentence=lambda x: "There is more than one "+x[0]+".", input_keys=["poseName"], emotion=3, block=True),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off},
remapping={'poseName': 'name'})
# x:445 y:418
OperatableStateMachine.add('say not known',
SaraSay(sentence=lambda x: "I don't know where the "+x[0]+" is.", input_keys=["poseName"], emotion=0, block=True),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off},
remapping={'poseName': 'name'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
| |
import requests
import json
import sys
class airbnbScraper:
def __init__(self, init_rooms):
self.rooms = []
self.results = {}
self.baseURL = 'https://www.airbnb.co.uk/rooms/'
# required data with key and value for prettyprint
self.requiredData = {
'name':'Property Name',
'localized_room_type':'Property Type',
'bedrooms':'# of Bedrooms',
'beds':'# of Beds',
'bathroom_label':'# of Bathrooms'
}
# New line variable for handling prettyprint
self.newLine = '\n\r'
if len(init_rooms) > 0:
#for room in init_rooms:
# self.rooms.append(room)
self.rooms = init_rooms
else:
# given room list, hard coded for now
self.rooms = [
'https://www.airbnb.co.uk/rooms/14531512?s=51',
'https://www.airbnb.co.uk/rooms/19278160?s=51',
'https://www.airbnb.co.uk/rooms/19292873?s=51'
]
def validateParameter(self, parameter):
# we're only interested in URLs which are in the format
# https://www.airbnb.co.uk/rooms/<ROOM_ID>
# or room id's
urlAirbnbValidator = parameter.find(self.baseURL)
# if the URL is in the parameter
if urlAirbnbValidator > -1:
return parameter
else:
# if not, check if the parameter is a room id
try:
return self.baseURL + str( int(parameter) )
# if not, throw error.
except:
raise ValueError('Only full Airbnb.co.uk URLs (' + self.baseURL + '<ROOM_ID>) or room IDs are accepted.', parameter)
def getPrettyAttributeName(self, attribute):
return self.requiredData[attribute]
def getRoomJson(self, request):
startTag = "<!--"
endTag = "-->"
# check request response
request.raise_for_status()
# grab text from http request
rText = request.text
# check for the JSON holding the room data
scriptLoc = rText.find('<script type="application/json" data-hypernova-key="p3show_marketplacebundlejs"')
if scriptLoc > -1:
# trim down the html page
trimmed = rText[scriptLoc:]
# extract the data we need by the HTML comment tags (remembering to include the length of the start tag itself)
data = trimmed[ trimmed.find(startTag) + len(startTag) : trimmed.find(endTag) ]
# load it as json and return it
return json.loads(data)
else:
# throw error
raise ValueError('Room JSON not found')
def getListingJson(self, roomJson):
# return the data we're interested in from the overall JSON
return roomJson['bootstrapData']['reduxData']['marketplacePdp']['listingInfo']['listing']
def getAmenityJson(self, listingJson):
# return the amenity data
return listingJson['listing_amenities']
def getListingId(self,listingJson):
return listingJson['id']
def getListingData(self, listingJson):
# results
listingData = {}
# loop through each attribute in the listing
for attribute in listingJson:
# if it's something we're interested in, add it to the results
if attribute in self.requiredData:
# grabs the human friendly name for the attribute E.G. "Property Name" for "localized_room_type"
listingData[self.getPrettyAttributeName(attribute)] = listingJson[attribute]
return listingData
def printListingData(self, filename, listingJson):
filename.write( self.getListingData(listingJson) )
def getAmenityData(self, amenityJson):
# results
amenityData = []
# Iterator for unnamed elements in json
i = 0
# loop through each amenity
for amenity in amenityJson:
# all amenities are listed, but only those which 'is_present' are the ones we're interested in
if amenity['is_present']:
# amenity names are pretty enough, no need to tidy
amenityData.append(amenityJson[i]['name'])
i += 1
return amenityData
def printAmenityData(self, filename, amenityJson):
filename.write( self.getAmenityData(amenityJson) )
def getListingTitle(self, listingId):
return self.newLine + '==== ROOM: ' + str ( listingId ) + ' ====' + self.newLine
def printListingTitle(self, filename):
filename.write(self.getListingTitle())
def getAmenityTitle(self):
return self.newLine + '==== AMENITIES ====' + self.newLine
def printAmenityTitle(self, filename):
filename.write(self.getAmenityTitle())
def scrapeUrl(self, url):
roomData = {}
# validate the parameter we were given
url = self.validateParameter(url)
# make the request, get the overall JSON
urlData = self.getRoomJson(requests.get(url))
# get the listing JSON
listingData = self.getListingJson(urlData)
# get the amenity JSON
amenityData = self.getAmenityJson(listingData)
# add listing data to the room object
roomData['Listing'] = self.getListingData(listingData)
# add amenity data to room object
roomData['Amenities'] = self.getAmenityData(amenityData)
# add room object to the overall results
self.results[self.getListingId(listingData)] = roomData
return self.results
def scrapeUrls(self):
for room in self.rooms:
self.scrapeUrl(room)
return self.results
def printResults(self):
# for each room
for entry in self.results:
#print the data
print( self.getListingTitle( entry ) )
# loop around the listing details
for key, value in self.results[entry]['Listing'].items():
print ( key + ': ' + str( value ) )
print( self.getAmenityTitle() )
# loop around the amenities
for amenity in self.results[entry]['Amenities']:
print ( str(amenity) )
if __name__ == "__main__":
# pass parameters (ignoring function call)
scraper = airbnbScraper(sys.argv[1:])
# loop through each room we're interested in
scraper.scrapeUrls()
scraper.printResults()
| |
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A simple IPC mechanism for communicating between two local processes. We
use marshal to serialize data - this means that both client and server must
run the same Python version, and that clients must be trusted (as
un-marshalling untrusted data can result in arbitrary code execution).
"""
import asyncio
import fcntl
import json
import marshal
import os.path
import socket
import struct
from typing import Any, Optional, Tuple
from libqtile.log_utils import logger
from libqtile.utils import get_cache_dir
HDRFORMAT = "!L"
HDRLEN = struct.calcsize(HDRFORMAT)
SOCKBASE = "qtilesocket.%s"
class IPCError(Exception):
pass
def find_sockfile(display: str = None):
"""
Finds the appropriate socket file for the given display.
If unspecified, the socket file is determined as follows:
- If WAYLAND_DISPLAY is set, use it.
- else if DISPLAY is set, use that.
- else check for the existence of a socket file for WAYLAND_DISPLAY=wayland-0
and if it exists, use it.
- else check for the existence of a socket file for DISPLAY=:0
and if it exists, use it.
- else raise an IPCError.
"""
cache_directory = get_cache_dir()
if display:
return os.path.join(cache_directory, SOCKBASE % display)
display = os.environ.get("WAYLAND_DISPLAY")
if display:
return os.path.join(cache_directory, SOCKBASE % display)
display = os.environ.get("DISPLAY")
if display:
return os.path.join(cache_directory, SOCKBASE % display)
sockfile = os.path.join(cache_directory, SOCKBASE % "wayland-0")
if os.path.exists(sockfile):
return sockfile
sockfile = os.path.join(cache_directory, SOCKBASE % ":0")
if os.path.exists(sockfile):
return sockfile
raise IPCError("Could not find socket file.")
class _IPC:
"""A helper class to handle properly packing and unpacking messages"""
@staticmethod
def unpack(data: bytes, *, is_json: Optional[bool] = None) -> Tuple[Any, bool]:
"""Unpack the incoming message
Parameters
----------
data : bytes
The incoming message to unpack
is_json : Optional[bool]
If the message should be unpacked as json. By default, try to
unpack json and fallback gracefully to marshalled bytes.
Returns
-------
Tuple[Any, bool]
A tuple of the unpacked object and a boolean denoting if the
message was deserialized using json. If True, the return message
should be packed as json.
"""
if is_json is None or is_json:
try:
return json.loads(data.decode()), True
except ValueError as e:
if is_json:
raise IPCError("Unable to decode json data") from e
try:
assert len(data) >= HDRLEN
size = struct.unpack(HDRFORMAT, data[:HDRLEN])[0]
assert size >= len(data[HDRLEN:])
return marshal.loads(data[HDRLEN:HDRLEN + size]), False
except AssertionError as e:
raise IPCError(
"error reading reply! (probably the socket was disconnected)"
) from e
@staticmethod
def pack(msg: Any, *, is_json: bool = False) -> bytes:
"""Pack the object into a message to pass"""
if is_json:
json_obj = json.dumps(msg)
return json_obj.encode()
msg_bytes = marshal.dumps(msg)
size = struct.pack(HDRFORMAT, len(msg_bytes))
return size + msg_bytes
class Client:
def __init__(self, socket_path: str, is_json=False) -> None:
"""Create a new IPC client
Parameters
----------
socket_path : str
The file path to the file that is used to open the connection to
the running IPC server.
is_json : bool
Pack and unpack messages as json
"""
self.socket_path = socket_path
self.is_json = is_json
def call(self, data: Any) -> Any:
return self.send(data)
def send(self, msg: Any) -> Any:
"""Send the message and return the response from the server
If any exception is raised by the server, that will propogate out of
this call.
"""
return asyncio.run(self.async_send(msg))
async def async_send(self, msg: Any) -> Any:
"""Send the message to the server
Connect to the server, then pack and send the message to the server,
then wait for and return the response from the server.
"""
try:
reader, writer = await asyncio.wait_for(
asyncio.open_unix_connection(path=self.socket_path), timeout=3
)
except (ConnectionRefusedError, FileNotFoundError):
raise IPCError("Could not open {}".format(self.socket_path))
try:
send_data = _IPC.pack(msg, is_json=self.is_json)
writer.write(send_data)
writer.write_eof()
read_data = await asyncio.wait_for(reader.read(), timeout=10)
except asyncio.TimeoutError:
raise IPCError("Server not responding")
finally:
# see the note in Server._server_callback()
writer.close()
await writer.wait_closed()
data, _ = _IPC.unpack(read_data, is_json=self.is_json)
return data
class Server:
def __init__(self, socket_path: str, handler) -> None:
self.socket_path = socket_path
self.handler = handler
self.server = None # type: Optional[asyncio.AbstractServer]
if os.path.exists(socket_path):
os.unlink(socket_path)
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
flags = fcntl.fcntl(self.sock.fileno(), fcntl.F_GETFD)
fcntl.fcntl(self.sock.fileno(), fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
self.sock.bind(self.socket_path)
async def _server_callback(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Callback when a connection is made to the server
Read the data sent from the client, execute the requested command, and
send the reply back to the client.
"""
try:
logger.debug("Connection made to server")
data = await reader.read()
logger.debug("EOF received by server")
req, is_json = _IPC.unpack(data)
except IPCError:
logger.warning("Invalid data received, closing connection")
else:
rep = self.handler(req)
result = _IPC.pack(rep, is_json=is_json)
logger.debug("Sending result on receive EOF")
writer.write(result)
logger.debug("Closing connection on receive EOF")
writer.write_eof()
finally:
writer.close()
await writer.wait_closed()
async def __aenter__(self) -> "Server":
"""Start and return the server"""
await self.start()
return self
async def __aexit__(self, _exc_type, _exc_value, _tb) -> None:
"""Close and shutdown the server"""
await self.close()
async def start(self) -> None:
"""Start the server"""
assert self.server is None
logger.debug("Starting server")
server_coroutine = asyncio.start_unix_server(
self._server_callback, sock=self.sock
)
self.server = await server_coroutine
async def close(self) -> None:
"""Close and shutdown the server"""
assert self.server is not None
logger.debug("Stopping server on close")
self.server.close()
await self.server.wait_closed()
self.server = None
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Ken Pepple
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for flavors code
"""
import sys
import time
from nova.compute import flavors
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova.openstack.common.db.sqlalchemy import session as sql_session
from nova import test
DEFAULT_FLAVORS = [
{'memory_mb': 512, 'root_gb': 1, 'deleted_at': None, 'name': 'm1.tiny',
'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0,
'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '1',
'vcpu_weight': None, 'id': 2},
{'memory_mb': 2048, 'root_gb': 20, 'deleted_at': None, 'name': 'm1.small',
'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 1, 'extra_specs': {}, 'swap': 0,
'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '2',
'vcpu_weight': None, 'id': 5},
{'memory_mb': 4096, 'root_gb': 40, 'deleted_at': None, 'name': 'm1.medium',
'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 2, 'extra_specs': {}, 'swap': 0,
'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '3',
'vcpu_weight': None, 'id': 1},
{'memory_mb': 8192, 'root_gb': 80, 'deleted_at': None, 'name': 'm1.large',
'deleted': 0, 'created_at': None, 'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 4, 'extra_specs': {}, 'swap': 0,
'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '4',
'vcpu_weight': None, 'id': 3},
{'memory_mb': 16384, 'root_gb': 160, 'deleted_at': None,
'name': 'm1.xlarge', 'deleted': 0, 'created_at': None, 'ephemeral_gb': 0,
'updated_at': None, 'disabled': False, 'vcpus': 8, 'extra_specs': {},
'swap': 0, 'rxtx_factor': 1.0, 'is_public': True, 'flavorid': '5',
'vcpu_weight': None, 'id': 4}
]
class InstanceTypeTestCase(test.TestCase):
"""Test cases for flavor code."""
def _generate_name(self):
"""return a name not in the DB."""
nonexistent_flavor = str(int(time.time()))
all_flavors = flavors.get_all_flavors()
while nonexistent_flavor in all_flavors:
nonexistent_flavor += "z"
else:
return nonexistent_flavor
def _generate_flavorid(self):
"""return a flavorid not in the DB."""
nonexistent_flavor = 2700
flavor_ids = [value["id"] for key, value in
flavors.get_all_flavors().iteritems()]
while nonexistent_flavor in flavor_ids:
nonexistent_flavor += 1
else:
return nonexistent_flavor
def _existing_flavor(self):
"""return first flavor name."""
return flavors.get_all_flavors().keys()[0]
def test_add_instance_type_access(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
type_ref = flavors.create('some flavor', 256, 1, 120, 100,
flavorid=flavor_id)
access_ref = flavors.add_flavor_access(flavor_id,
project_id,
ctxt=ctxt)
self.assertEqual(access_ref["project_id"], project_id)
self.assertEqual(access_ref["instance_type_id"], type_ref["id"])
def test_add_flavor_access_already_exists(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
flavors.add_flavor_access(flavor_id, project_id, ctxt=ctxt)
self.assertRaises(exception.FlavorAccessExists,
flavors.add_flavor_access,
flavor_id, project_id, ctxt)
def test_add_flavor_access_invalid_flavor(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'no_such_flavor'
self.assertRaises(exception.FlavorNotFound,
flavors.add_flavor_access,
flavor_id, project_id, ctxt)
def test_remove_flavor_access(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
flavors.add_flavor_access(flavor_id, project_id, ctxt)
flavors.remove_flavor_access(flavor_id, project_id, ctxt)
projects = flavors.get_flavor_access_by_flavor_id(flavor_id,
ctxt)
self.assertEqual([], projects)
def test_remove_flavor_access_doesnt_exists(self):
user_id = 'fake'
project_id = 'fake'
ctxt = context.RequestContext(user_id, project_id, is_admin=True)
flavor_id = 'flavor1'
flavors.create('some flavor', 256, 1, 120, 100, flavorid=flavor_id)
self.assertRaises(exception.FlavorAccessNotFound,
flavors.remove_flavor_access,
flavor_id, project_id, ctxt=ctxt)
def test_get_all_instance_types(self):
# Ensures that all flavors can be retrieved.
session = sql_session.get_session()
total_instance_types = session.query(models.InstanceTypes).count()
inst_types = flavors.get_all_flavors()
self.assertEqual(total_instance_types, len(inst_types))
def test_non_existent_inst_type_shouldnt_delete(self):
# Ensures that flavor creation fails with invalid args.
self.assertRaises(exception.FlavorNotFoundByName,
flavors.destroy,
'unknown_flavor')
def test_will_not_destroy_with_no_name(self):
# Ensure destroy said path of no name raises error.
self.assertRaises(exception.FlavorNotFoundByName,
flavors.destroy, None)
def test_will_not_get_bad_default_instance_type(self):
# ensures error raised on bad default flavor.
self.flags(default_flavor='unknown_flavor')
self.assertRaises(exception.FlavorNotFound,
flavors.get_default_flavor)
def test_will_get_flavor_by_id(self):
default_instance_type = flavors.get_default_flavor()
instance_type_id = default_instance_type['id']
fetched = flavors.get_flavor(instance_type_id)
self.assertEqual(default_instance_type, fetched)
def test_will_not_get_flavor_by_unknown_id(self):
# Ensure get by name returns default flavor with no name.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor, 10000)
def test_will_not_get_flavor_with_bad_id(self):
# Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor, 'asdf')
def test_flavor_get_by_None_name_returns_default(self):
# Ensure get by name returns default flavor with no name.
default = flavors.get_default_flavor()
actual = flavors.get_flavor_by_name(None)
self.assertEqual(default, actual)
def test_will_not_get_flavor_with_bad_name(self):
# Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor_by_name, 10000)
def test_will_not_get_instance_by_unknown_flavor_id(self):
# Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor_by_flavor_id,
'unknown_flavor')
def test_will_get_instance_by_flavor_id(self):
default_instance_type = flavors.get_default_flavor()
flavorid = default_instance_type['flavorid']
fetched = flavors.get_flavor_by_flavor_id(flavorid)
self.assertEqual(default_instance_type, fetched)
def test_can_read_deleted_types_using_flavor_id(self):
# Ensure deleted flavors can be read when querying flavor_id.
inst_type_name = "test"
inst_type_flavor_id = "test1"
inst_type = flavors.create(inst_type_name, 256, 1, 120, 100,
inst_type_flavor_id)
self.assertEqual(inst_type_name, inst_type["name"])
# NOTE(jk0): The deleted flavor will show up here because the context
# in get_flavor_by_flavor_id() is set to use read_deleted by
# default.
flavors.destroy(inst_type["name"])
deleted_inst_type = flavors.get_flavor_by_flavor_id(
inst_type_flavor_id)
self.assertEqual(inst_type_name, deleted_inst_type["name"])
def test_read_deleted_false_converting_flavorid(self):
"""
Ensure deleted flavors are not returned when not needed (for
example when creating a server and attempting to translate from
flavorid to instance_type_id.
"""
flavors.create("instance_type1", 256, 1, 120, 100, "test1")
flavors.destroy("instance_type1")
flavors.create("instance_type1_redo", 256, 1, 120, 100, "test1")
instance_type = flavors.get_flavor_by_flavor_id(
"test1", read_deleted="no")
self.assertEqual("instance_type1_redo", instance_type["name"])
def test_get_all_flavors_sorted_list_sort(self):
# Test default sort
all_flavors = flavors.get_all_flavors_sorted_list()
self.assertEqual(DEFAULT_FLAVORS, all_flavors)
# Test sorted by name
all_flavors = flavors.get_all_flavors_sorted_list(sort_key='name')
expected = sorted(DEFAULT_FLAVORS, key=lambda item: item['name'])
self.assertEqual(expected, all_flavors)
def test_get_all_flavors_sorted_list_limit(self):
limited_flavors = flavors.get_all_flavors_sorted_list(limit=2)
self.assertEqual(2, len(limited_flavors))
def test_get_all_flavors_sorted_list_marker(self):
all_flavors = flavors.get_all_flavors_sorted_list()
# Set the 3rd result as the marker
marker_flavorid = all_flavors[2]['flavorid']
marked_flavors = flavors.get_all_flavors_sorted_list(
marker=marker_flavorid)
# We expect everything /after/ the 3rd result
expected_results = all_flavors[3:]
self.assertEqual(expected_results, marked_flavors)
def test_get_inactive_flavors(self):
flav1 = flavors.create('flavor1', 256, 1, 120)
flav2 = flavors.create('flavor2', 512, 4, 250)
flavors.destroy('flavor1')
returned_flavors_ids = flavors.get_all_flavors().keys()
self.assertNotIn(flav1['id'], returned_flavors_ids)
self.assertIn(flav2['id'], returned_flavors_ids)
returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys()
self.assertIn(flav1['id'], returned_flavors_ids)
self.assertIn(flav2['id'], returned_flavors_ids)
def test_get_inactive_flavors_with_same_name(self):
flav1 = flavors.create('flavor', 256, 1, 120)
flavors.destroy('flavor')
flav2 = flavors.create('flavor', 512, 4, 250)
returned_flavors_ids = flavors.get_all_flavors().keys()
self.assertNotIn(flav1['id'], returned_flavors_ids)
self.assertIn(flav2['id'], returned_flavors_ids)
returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys()
self.assertIn(flav1['id'], returned_flavors_ids)
self.assertIn(flav2['id'], returned_flavors_ids)
def test_get_inactive_flavors_with_same_flavorid(self):
flav1 = flavors.create('flavor', 256, 1, 120, 100, "flavid")
flavors.destroy('flavor')
flav2 = flavors.create('flavor', 512, 4, 250, 100, "flavid")
returned_flavors_ids = flavors.get_all_flavors().keys()
self.assertNotIn(flav1['id'], returned_flavors_ids)
self.assertIn(flav2['id'], returned_flavors_ids)
returned_flavors_ids = flavors.get_all_flavors(inactive=True).keys()
self.assertIn(flav1['id'], returned_flavors_ids)
self.assertIn(flav2['id'], returned_flavors_ids)
class InstanceTypeToolsTest(test.TestCase):
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_flavor(self, prefix):
instance_type = flavors.get_default_flavor()
metadata = {}
flavors.save_flavor_info(metadata, instance_type,
prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
_instance_type = flavors.extract_flavor(instance, prefix)
props = flavors.system_metadata_flavor_props.keys()
for key in instance_type.keys():
if key not in props:
del instance_type[key]
self.assertEqual(instance_type, _instance_type)
def test_extract_flavor(self):
self._test_extract_flavor('')
def test_extract_flavor_prefix(self):
self._test_extract_flavor('foo_')
def test_save_flavor_info(self):
instance_type = flavors.get_default_flavor()
example = {}
example_prefix = {}
for key in flavors.system_metadata_flavor_props.keys():
example['instance_type_%s' % key] = instance_type[key]
example_prefix['fooinstance_type_%s' % key] = instance_type[key]
metadata = {}
flavors.save_flavor_info(metadata, instance_type)
self.assertEqual(example, metadata)
metadata = {}
flavors.save_flavor_info(metadata, instance_type, 'foo')
self.assertEqual(example_prefix, metadata)
def test_delete_flavor_info(self):
instance_type = flavors.get_default_flavor()
metadata = {}
flavors.save_flavor_info(metadata, instance_type)
flavors.save_flavor_info(metadata, instance_type, '_')
flavors.delete_flavor_info(metadata, '', '_')
self.assertEqual(metadata, {})
class InstanceTypeFilteringTest(test.TestCase):
"""Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
self.context = context.get_admin_context()
def assertFilterResults(self, filters, expected):
inst_types = db.flavor_get_all(
self.context, filters=filters)
inst_names = [i['name'] for i in inst_types]
self.assertEqual(inst_names, expected)
def test_no_filters(self):
filters = None
expected = ['m1.tiny', 'm1.small', 'm1.medium', 'm1.large',
'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_filter(self):
# Exclude tiny instance which is 512 MB.
filters = dict(min_memory_mb=513)
expected = ['m1.small', 'm1.medium', 'm1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_root_gb_filter(self):
# Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_root_gb=80)
expected = ['m1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_AND_root_gb_filter(self):
# Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_memory_mb=16384, min_root_gb=80)
expected = ['m1.xlarge']
self.assertFilterResults(filters, expected)
class CreateInstanceTypeTest(test.TestCase):
def assertInvalidInput(self, *create_args, **create_kwargs):
self.assertRaises(exception.InvalidInput, flavors.create,
*create_args, **create_kwargs)
def test_create_with_valid_name(self):
# Names can contain alphanumeric and [_.- ]
flavors.create('azAZ09. -_', 64, 1, 120)
# And they are not limited to ascii characters
# E.g.: m1.huge in simplified Chinese
flavors.create(u'm1.\u5DE8\u5927', 6400, 100, 12000)
def test_name_with_special_characters(self):
# Names can contain alphanumeric and [_.- ]
flavors.create('_foo.bar-123', 64, 1, 120)
# Ensure instance types raises InvalidInput for invalid characters.
self.assertInvalidInput('foobar#', 64, 1, 120)
def test_non_ascii_name_with_special_characters(self):
self.assertInvalidInput(u'm1.\u5DE8\u5927 #', 64, 1, 120)
def test_name_length_checks(self):
MAX_LEN = 255
# Flavor name with 255 characters or less is valid.
flavors.create('a' * MAX_LEN, 64, 1, 120)
# Flavor name which is more than 255 characters will cause error.
self.assertInvalidInput('a' * (MAX_LEN + 1), 64, 1, 120)
# Flavor name which is empty should cause an error
self.assertInvalidInput('', 64, 1, 120)
def test_all_whitespace_flavor_names_rejected(self):
self.assertInvalidInput(' ', 64, 1, 120)
def test_flavorid_with_invalid_characters(self):
# Ensure Flavor ID can only contain [a-zA-Z0-9_.- ]
self.assertInvalidInput('a', 64, 1, 120, flavorid=u'\u2605')
self.assertInvalidInput('a', 64, 1, 120, flavorid='%%$%$@#$#@$@#$^%')
def test_flavorid_length_checks(self):
MAX_LEN = 255
# Flavor ID which is more than 255 characters will cause error.
self.assertInvalidInput('a', 64, 1, 120, flavorid='a' * (MAX_LEN + 1))
def test_memory_must_be_positive_integer(self):
self.assertInvalidInput('flavor1', 'foo', 1, 120)
self.assertInvalidInput('flavor1', -1, 1, 120)
self.assertInvalidInput('flavor1', 0, 1, 120)
self.assertInvalidInput('flavor1', sys.maxint + 1, 1, 120)
flavors.create('flavor1', 1, 1, 120)
def test_vcpus_must_be_positive_integer(self):
self.assertInvalidInput('flavor`', 64, 'foo', 120)
self.assertInvalidInput('flavor1', 64, -1, 120)
self.assertInvalidInput('flavor1', 64, 0, 120)
self.assertInvalidInput('flavor1', 64, sys.maxint + 1, 120)
flavors.create('flavor1', 64, 1, 120)
def test_root_gb_must_be_nonnegative_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 'foo')
self.assertInvalidInput('flavor1', 64, 1, -1)
self.assertInvalidInput('flavor1', 64, 1, sys.maxint + 1)
flavors.create('flavor1', 64, 1, 0)
flavors.create('flavor2', 64, 1, 120)
def test_ephemeral_gb_must_be_nonnegative_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=-1)
self.assertInvalidInput('flavor1', 64, 1, 120,
ephemeral_gb=sys.maxint + 1)
flavors.create('flavor1', 64, 1, 120, ephemeral_gb=0)
flavors.create('flavor2', 64, 1, 120, ephemeral_gb=120)
def test_swap_must_be_nonnegative_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 120, swap='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, swap=-1)
self.assertInvalidInput('flavor1', 64, 1, 120, swap=sys.maxint + 1)
flavors.create('flavor1', 64, 1, 120, swap=0)
flavors.create('flavor2', 64, 1, 120, swap=1)
def test_rxtx_factor_must_be_positive_float(self):
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=-1.0)
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=0.0)
flavor = flavors.create('flavor1', 64, 1, 120, rxtx_factor=1.0)
self.assertEqual(1.0, flavor['rxtx_factor'])
flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=1.1)
self.assertEqual(1.1, flavor['rxtx_factor'])
def test_is_public_must_be_valid_bool_string(self):
self.assertInvalidInput('flavor1', 64, 1, 120, is_public='foo')
flavors.create('flavor1', 64, 1, 120, is_public='TRUE')
flavors.create('flavor2', 64, 1, 120, is_public='False')
flavors.create('flavor3', 64, 1, 120, is_public='Yes')
flavors.create('flavor4', 64, 1, 120, is_public='No')
flavors.create('flavor5', 64, 1, 120, is_public='Y')
flavors.create('flavor6', 64, 1, 120, is_public='N')
flavors.create('flavor7', 64, 1, 120, is_public='1')
flavors.create('flavor8', 64, 1, 120, is_public='0')
flavors.create('flavor9', 64, 1, 120, is_public='true')
def test_flavorid_populated(self):
flavor1 = flavors.create('flavor1', 64, 1, 120)
self.assertIsNot(None, flavor1['flavorid'])
flavor2 = flavors.create('flavor2', 64, 1, 120, flavorid='')
self.assertIsNot(None, flavor2['flavorid'])
flavor3 = flavors.create('flavor3', 64, 1, 120, flavorid='foo')
self.assertEqual('foo', flavor3['flavorid'])
def test_default_values(self):
flavor1 = flavors.create('flavor1', 64, 1, 120)
self.assertIsNot(None, flavor1['flavorid'])
self.assertEqual(flavor1['ephemeral_gb'], 0)
self.assertEqual(flavor1['swap'], 0)
self.assertEqual(flavor1['rxtx_factor'], 1.0)
def test_basic_create(self):
# Ensure instance types can be created.
original_list = flavors.get_all_flavors()
# Create new type and make sure values stick
flavor = flavors.create('flavor', 64, 1, 120)
self.assertEqual(flavor['name'], 'flavor')
self.assertEqual(flavor['memory_mb'], 64)
self.assertEqual(flavor['vcpus'], 1)
self.assertEqual(flavor['root_gb'], 120)
# Ensure new type shows up in list
new_list = flavors.get_all_flavors()
self.assertNotEqual(len(original_list), len(new_list),
'flavor was not created')
def test_create_then_delete(self):
original_list = flavors.get_all_flavors()
flavor = flavors.create('flavor', 64, 1, 120)
# Ensure new type shows up in list
new_list = flavors.get_all_flavors()
self.assertNotEqual(len(original_list), len(new_list),
'instance type was not created')
flavors.destroy('flavor')
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor, flavor['id'])
# Deleted instance should not be in list anymore
new_list = flavors.get_all_flavors()
self.assertEqual(original_list, new_list)
def test_duplicate_names_fail(self):
# Ensures that name duplicates raise FlavorCreateFailed.
flavors.create('flavor', 256, 1, 120, 200, 'flavor1')
self.assertRaises(exception.FlavorExists,
flavors.create,
'flavor', 64, 1, 120)
def test_duplicate_flavorids_fail(self):
# Ensures that flavorid duplicates raise FlavorCreateFailed.
flavors.create('flavor1', 64, 1, 120, flavorid='flavorid')
self.assertRaises(exception.FlavorIdExists,
flavors.create,
'flavor2', 64, 1, 120, flavorid='flavorid')
| |
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generate melodies from a trained checkpoint of an improv RNN model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
import time
from magenta.models.improv_rnn import improv_rnn_config_flags
from magenta.models.improv_rnn import improv_rnn_model
from magenta.models.improv_rnn import improv_rnn_sequence_generator
from magenta.models.shared import sequence_generator
from magenta.models.shared import sequence_generator_bundle
import note_seq
from note_seq.protobuf import generator_pb2
from note_seq.protobuf import music_pb2
import tensorflow.compat.v1 as tf
CHORD_SYMBOL = music_pb2.NoteSequence.TextAnnotation.CHORD_SYMBOL
# Velocity at which to play chord notes when rendering chords.
CHORD_VELOCITY = 50
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string(
'run_dir', None,
'Path to the directory where the latest checkpoint will be loaded from.')
tf.app.flags.DEFINE_string(
'bundle_file', None,
'Path to the bundle file. If specified, this will take priority over '
'run_dir, unless save_generator_bundle is True, in which case both this '
'flag and run_dir are required')
tf.app.flags.DEFINE_boolean(
'save_generator_bundle', False,
'If true, instead of generating a sequence, will save this generator as a '
'bundle file in the location specified by the bundle_file flag')
tf.app.flags.DEFINE_string(
'bundle_description', None,
'A short, human-readable text description of the bundle (e.g., training '
'data, hyper parameters, etc.).')
tf.app.flags.DEFINE_string(
'output_dir', '/tmp/improv_rnn/generated',
'The directory where MIDI files will be saved to.')
tf.app.flags.DEFINE_integer(
'num_outputs', 10,
'The number of lead sheets to generate. One MIDI file will be created for '
'each.')
tf.app.flags.DEFINE_integer(
'steps_per_chord', 16,
'The number of melody steps to take per backing chord. Each step is a 16th '
'of a bar, so if backing_chords = "C G Am F" and steps_per_chord = 16, '
'four bars will be generated.')
tf.app.flags.DEFINE_string(
'primer_melody', '', 'A string representation of a Python list of '
'note_seq.Melody event values. For example: '
'"[60, -2, 60, -2, 67, -2, 67, -2]". If specified, this melody will be '
'used as the priming melody. If a priming melody is not specified, '
'melodies will be generated from scratch.')
tf.app.flags.DEFINE_string(
'backing_chords', 'C G Am F C G F C',
'A string representation of a chord progression, with chord symbols '
'separated by spaces. For example: "C Dm7 G13 Cmaj7". The duration of each '
'chord, in steps, is specified by the steps_per_chord flag.')
tf.app.flags.DEFINE_string(
'primer_midi', '',
'The path to a MIDI file containing a melody that will be used as a '
'priming melody. If a primer melody is not specified, melodies will be '
'generated from scratch.')
tf.app.flags.DEFINE_boolean(
'render_chords', False,
'If true, the backing chords will also be rendered as notes in the output '
'MIDI files.')
tf.app.flags.DEFINE_float(
'qpm', None,
'The quarters per minute to play generated output at. If a primer MIDI is '
'given, the qpm from that will override this flag. If qpm is None, qpm '
'will default to 120.')
tf.app.flags.DEFINE_float(
'temperature', 1.0,
'The randomness of the generated melodies. 1.0 uses the unaltered softmax '
'probabilities, greater than 1.0 makes melodies more random, less than 1.0 '
'makes melodies less random.')
tf.app.flags.DEFINE_integer(
'beam_size', 1,
'The beam size to use for beam search when generating melodies.')
tf.app.flags.DEFINE_integer(
'branch_factor', 1,
'The branch factor to use for beam search when generating melodies.')
tf.app.flags.DEFINE_integer(
'steps_per_iteration', 1,
'The number of melody steps to take per beam search iteration.')
tf.app.flags.DEFINE_string(
'log', 'INFO',
'The threshold for what messages will be logged DEBUG, INFO, WARN, ERROR, '
'or FATAL.')
def get_checkpoint():
"""Get the training dir to be used by the model."""
if FLAGS.run_dir and FLAGS.bundle_file and not FLAGS.save_generator_bundle:
raise sequence_generator.SequenceGeneratorError(
'Cannot specify both bundle_file and run_dir')
if FLAGS.run_dir:
train_dir = os.path.join(os.path.expanduser(FLAGS.run_dir), 'train')
return train_dir
else:
return None
def get_bundle():
"""Returns a generator_pb2.GeneratorBundle object based read from bundle_file.
Returns:
Either a generator_pb2.GeneratorBundle or None if the bundle_file flag is
not set or the save_generator_bundle flag is set.
"""
if FLAGS.save_generator_bundle:
return None
if FLAGS.bundle_file is None:
return None
bundle_file = os.path.expanduser(FLAGS.bundle_file)
return sequence_generator_bundle.read_bundle_file(bundle_file)
def run_with_flags(generator):
"""Generates melodies and saves them as MIDI files.
Uses the options specified by the flags defined in this module.
Args:
generator: The ImprovRnnSequenceGenerator to use for generation.
"""
if not FLAGS.output_dir:
tf.logging.fatal('--output_dir required')
return
FLAGS.output_dir = os.path.expanduser(FLAGS.output_dir)
primer_midi = None
if FLAGS.primer_midi:
primer_midi = os.path.expanduser(FLAGS.primer_midi)
if not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
primer_sequence = None
qpm = FLAGS.qpm if FLAGS.qpm else note_seq.DEFAULT_QUARTERS_PER_MINUTE
if FLAGS.primer_melody:
primer_melody = note_seq.Melody(ast.literal_eval(FLAGS.primer_melody))
primer_sequence = primer_melody.to_sequence(qpm=qpm)
elif primer_midi:
primer_sequence = note_seq.midi_file_to_sequence_proto(primer_midi)
if primer_sequence.tempos and primer_sequence.tempos[0].qpm:
qpm = primer_sequence.tempos[0].qpm
else:
tf.logging.warning(
'No priming sequence specified. Defaulting to a single middle C.')
primer_melody = note_seq.Melody([60])
primer_sequence = primer_melody.to_sequence(qpm=qpm)
# Create backing chord progression from flags.
raw_chords = FLAGS.backing_chords.split()
repeated_chords = [chord for chord in raw_chords
for _ in range(FLAGS.steps_per_chord)]
backing_chords = note_seq.ChordProgression(repeated_chords)
# Derive the total number of seconds to generate based on the QPM of the
# priming sequence and the length of the backing chord progression.
seconds_per_step = 60.0 / qpm / generator.steps_per_quarter
total_seconds = len(backing_chords) * seconds_per_step
# Specify start/stop time for generation based on starting generation at the
# end of the priming sequence and continuing until the sequence is num_steps
# long.
generator_options = generator_pb2.GeneratorOptions()
if primer_sequence:
input_sequence = primer_sequence
# Set the start time to begin on the next step after the last note ends.
if primer_sequence.notes:
last_end_time = max(n.end_time for n in primer_sequence.notes)
else:
last_end_time = 0
generate_section = generator_options.generate_sections.add(
start_time=last_end_time + seconds_per_step,
end_time=total_seconds)
if generate_section.start_time >= generate_section.end_time:
tf.logging.fatal(
'Priming sequence is longer than the total number of steps '
'requested: Priming sequence length: %s, Generation length '
'requested: %s',
generate_section.start_time, total_seconds)
return
else:
input_sequence = music_pb2.NoteSequence()
input_sequence.tempos.add().qpm = qpm
generate_section = generator_options.generate_sections.add(
start_time=0,
end_time=total_seconds)
# Add the backing chords to the input sequence.
chord_sequence = backing_chords.to_sequence(sequence_start_time=0.0, qpm=qpm)
for text_annotation in chord_sequence.text_annotations:
if text_annotation.annotation_type == CHORD_SYMBOL:
chord = input_sequence.text_annotations.add()
chord.CopyFrom(text_annotation)
input_sequence.total_time = len(backing_chords) * seconds_per_step
generator_options.args['temperature'].float_value = FLAGS.temperature
generator_options.args['beam_size'].int_value = FLAGS.beam_size
generator_options.args['branch_factor'].int_value = FLAGS.branch_factor
generator_options.args[
'steps_per_iteration'].int_value = FLAGS.steps_per_iteration
tf.logging.debug('input_sequence: %s', input_sequence)
tf.logging.debug('generator_options: %s', generator_options)
# Make the generate request num_outputs times and save the output as midi
# files.
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
digits = len(str(FLAGS.num_outputs))
for i in range(FLAGS.num_outputs):
generated_sequence = generator.generate(input_sequence, generator_options)
if FLAGS.render_chords:
renderer = note_seq.BasicChordRenderer(velocity=CHORD_VELOCITY)
renderer.render(generated_sequence)
midi_filename = '%s_%s.mid' % (date_and_time, str(i + 1).zfill(digits))
midi_path = os.path.join(FLAGS.output_dir, midi_filename)
note_seq.sequence_proto_to_midi_file(generated_sequence, midi_path)
tf.logging.info('Wrote %d MIDI files to %s',
FLAGS.num_outputs, FLAGS.output_dir)
def main(unused_argv):
"""Saves bundle or runs generator based on flags."""
tf.logging.set_verbosity(FLAGS.log)
bundle = get_bundle()
if bundle:
config_id = bundle.generator_details.id
config = improv_rnn_model.default_configs[config_id]
config.hparams.parse(FLAGS.hparams)
else:
config = improv_rnn_config_flags.config_from_flags()
# Having too large of a batch size will slow generation down unnecessarily.
config.hparams.batch_size = min(
config.hparams.batch_size, FLAGS.beam_size * FLAGS.branch_factor)
generator = improv_rnn_sequence_generator.ImprovRnnSequenceGenerator(
model=improv_rnn_model.ImprovRnnModel(config),
details=config.details,
steps_per_quarter=config.steps_per_quarter,
checkpoint=get_checkpoint(),
bundle=bundle)
if FLAGS.save_generator_bundle:
bundle_filename = os.path.expanduser(FLAGS.bundle_file)
if FLAGS.bundle_description is None:
tf.logging.warning('No bundle description provided.')
tf.logging.info('Saving generator bundle to %s', bundle_filename)
generator.create_bundle_file(bundle_filename, FLAGS.bundle_description)
else:
run_with_flags(generator)
def console_entry_point():
tf.disable_v2_behavior()
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| |
# coding=utf8
# Based on yibo's R script and JianXiao's Python script
from scipy import sparse
from sklearn.feature_selection import SelectPercentile, f_classif, chi2
import pandas as pd
import numpy as np
from scipy import sparse as ssp
import pylab as plt
from sklearn.preprocessing import LabelEncoder,LabelBinarizer,MinMaxScaler,OneHotEncoder
from sklearn.feature_extraction.text import TfidfVectorizer,CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import Pipeline,make_pipeline
from sklearn.cross_validation import StratifiedKFold,KFold
from sklearn.base import BaseEstimator
from sklearn.feature_selection import SelectFromModel,SelectPercentile,f_classif
from sklearn.linear_model import Ridge,LogisticRegression
from keras.preprocessing import sequence
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras import backend as K
from keras.layers import Input, Embedding, LSTM, Dense,Flatten, Dropout, merge,Convolution1D,MaxPooling1D,Lambda
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD,Nadam
from keras.layers.advanced_activations import PReLU,LeakyReLU,ELU,SReLU
from keras.models import Model
from keras.utils.visualize_util import plot
seed = 1024
path = "data/"
# import code; code.interact(local=dict(globals(), **locals()))
# Create bag-of-apps in character string format
# first by event
# then merge to generate larger bags by device
##################
# App Events
##################
print("# Read App Events")
app_ev = pd.read_csv(path+"app_events.csv", dtype={'device_id': np.str})
# remove duplicates(app_id)
app_ev = app_ev.groupby("event_id")["app_id"].apply(
lambda x: " ".join(set("app_id:" + str(s) for s in x)))
##################
# Events
##################
print("# Read Events")
events = pd.read_csv(path+"events.csv", dtype={'device_id': np.str})
events["app_id"] = events["event_id"].map(app_ev)
events = events.dropna()
del app_ev
events = events[["device_id", "app_id"]]
# remove duplicates(app_id)
events = events.groupby("device_id")["app_id"].apply(
lambda x: " ".join(set(str(" ".join(str(s) for s in x)).split(" "))))
events = events.reset_index(name="app_id")
# expand to multiple rows
events = pd.concat([pd.Series(row['device_id'], row['app_id'].split(' '))
for _, row in events.iterrows()]).reset_index()
events.columns = ['app_id', 'device_id']
##################
# Phone Brand
##################
print("# Read Phone Brand")
pbd = pd.read_csv(path+"phone_brand_device_model.csv",
dtype={'device_id': np.str})
pbd.drop_duplicates('device_id', keep='first', inplace=True)
##################
# Train and Test
##################
print("# Generate Train and Test")
train = pd.read_csv(path+"gender_age_train.csv",
dtype={'device_id': np.str})
train["gender"][train["gender"]=='M']=1
train["gender"][train["gender"]=='F']=0
Y_gender = train["gender"]
Y_age = train["age"]
Y_age = np.log(Y_age)
train.drop(["age", "gender"], axis=1, inplace=True)
test = pd.read_csv(path+"gender_age_test.csv",
dtype={'device_id': np.str})
test["group"] = np.nan
split_len = len(train)
# Group Labels
Y = train["group"]
lable_group = LabelEncoder()
Y = lable_group.fit_transform(Y)
device_id = test["device_id"]
# Concat
Df = pd.concat((train, test), axis=0, ignore_index=True)
Df = pd.merge(Df, pbd, how="left", on="device_id")
Df["phone_brand"] = Df["phone_brand"].apply(lambda x: "phone_brand:" + str(x))
Df["device_model"] = Df["device_model"].apply(
lambda x: "device_model:" + str(x))
###################
# Concat Feature
###################
f1 = Df[["device_id", "phone_brand"]] # phone_brand
f2 = Df[["device_id", "device_model"]] # device_model
f3 = events[["device_id", "app_id"]] # app_id
del Df
f1.columns.values[1] = "feature"
f2.columns.values[1] = "feature"
f3.columns.values[1] = "feature"
FLS = pd.concat((f1, f2, f3), axis=0, ignore_index=True)
###################
# User-Item Feature
###################
print("# User-Item-Feature")
device_ids = FLS["device_id"].unique()
feature_cs = FLS["feature"].unique()
data = np.ones(len(FLS))
dec = LabelEncoder().fit(FLS["device_id"])
row = dec.transform(FLS["device_id"])
col = LabelEncoder().fit_transform(FLS["feature"])
sparse_matrix = sparse.csr_matrix(
(data, (row, col)), shape=(len(device_ids), len(feature_cs)))
sparse_matrix = sparse_matrix[:, sparse_matrix.getnnz(0) > 0]
##################
# Data
##################
train_row = dec.transform(train["device_id"])
train_sp = sparse_matrix[train_row, :]
test_row = dec.transform(test["device_id"])
test_sp = sparse_matrix[test_row, :]
skf = StratifiedKFold(Y, n_folds=10, shuffle=True, random_state=seed)
# skf = KFold(train.shape[0],n_folds=5, shuffle=True, random_state=seed)
for ind_tr, ind_te in skf:
X_train = train_sp[ind_tr]
X_val = train_sp[ind_te]
y_train = Y[ind_tr]
y_val = Y[ind_te]
y_train_gender = Y_gender[ind_tr]
y_val_gender = Y_gender[ind_te]
y_train_age = Y_age[ind_tr]
y_val_age = Y_age[ind_te]
break
##################
# Feature Sel
##################
print("# Feature Selection")
selector = SelectPercentile(f_classif, percentile=23)
selector.fit(X_train, y_train)
X_train = selector.transform(X_train).toarray()
X_val = selector.transform(X_val).toarray()
train_sp = selector.transform(train_sp)
test_sp = selector.transform(test_sp).toarray()
print("# Num of Features: ", X_train.shape[1])
group_lb = LabelBinarizer()
labels = group_lb.fit_transform(Y)
y_train = group_lb.transform(y_train)
y_val = group_lb.transform(y_val)
inputs = Input(shape=(X_train.shape[1],), dtype='float32')
fc1 = Dense(512)(inputs)
fc1 = SReLU()(fc1)
dp1 = Dropout(0.5)(fc1)
y_train = [y_train,y_train_gender,y_train_age]
y_val = [y_val,y_val_gender,y_val_age]
# fc1_g = Dense(256)(dp1)
# fc1_g = SReLU()(fc1_g)
# dp1_g = Dropout(0.5)(fc1_g)
outputs_gender = Dense(1,activation='sigmoid',name='outputs_gender')(dp1)
# fc1_a = Dense(256)(dp1)
# fc1_a = SReLU()(fc1_a)
# dp1_a = Dropout(0.5)(fc1_a)
outputs_age = Dense(1,activation='linear',name='outputs_age')(dp1)
fc2 = Dense(512)(dp1)
fc2 = SReLU()(fc2)
dp2 = Dropout(0.5)(fc2)
outputs = Dense(12,activation='softmax',name='outputs')(dp2)
inputs = [
inputs,
]
outputs = [
outputs,
outputs_gender,
outputs_age,
]
model = Model(input=inputs, output=outputs)
nadam = Nadam(lr=1e-4)
sgd = SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)
# model.compile(
# optimizer=nadam,
# loss={'outputs': 'categorical_crossentropy'}
# )
model.compile(
optimizer=nadam,
loss={'outputs': 'categorical_crossentropy', 'outputs_gender': 'binary_crossentropy','outputs_age':'mse'},
loss_weights={'outputs': 1., 'outputs_gender': 1.,'outputs_age': 1.}
)
model_name = 'mlp_%s.hdf5'%'sparse'
model_checkpoint = ModelCheckpoint(path+model_name, monitor='val_outputs_loss', save_best_only=True)
early_stopping = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
plot(model, to_file=path+'%s.png'%model_name.replace('.hdf5',''),show_shapes=True)
nb_epoch = 20
batch_size = 128
load_model = False
if load_model:
print('Load Model')
model.load_weights(path+model_name)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=True,
callbacks=[model_checkpoint, early_stopping],
validation_data=[X_val,y_val]
)
model.load_weights(path+model_name)
X_train = pd.read_csv(path+'gender_age_train.csv')
group_le = LabelEncoder()
group_lb = LabelBinarizer()
labels = group_le.fit_transform(X_train['group'].values)
labels = group_lb.fit_transform(labels)
device_id = pd.read_csv(path+'gender_age_test.csv')['device_id']
y_preds = model.predict(test_sp)[0]
# Write results
submission = pd.DataFrame(y_preds, columns=group_le.classes_)
submission["device_id"] = device_id
submission = submission.set_index("device_id")
submission.to_csv('submission_mlp_sparse.csv', index=True, index_label='device_id')
| |
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parse the report output of the llvm test suite or regression tests,
filter out known failures, and check for new failures
pnacl/scripts/parse_llvm_test_report.py [options]+ reportfile
"""
from __future__ import print_function
import csv
import logging
import optparse
import os
import sys
import StringIO
# exclude these tests
EXCLUDES = {}
def ParseCommandLine(argv):
parser = optparse.OptionParser(prog=argv[0])
parser.add_option('-x', '--exclude', action='append', dest='excludes',
default=[],
help='Add list of excluded tests (expected fails)')
parser.add_option('-c', '--check-excludes', action='store_true',
default=False, dest='check_excludes',
help='Report tests which unexpectedly pass')
parser.add_option('-v', '--verbose', action='store_true',
default=False, dest='verbose',
help='Print compilation/run logs of failing tests')
parser.add_option('-p', '--build-path', dest='buildpath',
help='Path to test-suite build directory')
parser.add_option('-a', '--attribute', dest='attributes', action='append',
default=[],
help='Add attribute of test configuration (e.g. arch)')
parser.add_option('-t', '--testsuite', action='store_true', dest='testsuite',
default=False)
parser.add_option('-l', '--lit', action='store_true', dest='lit',
default=False)
options, args = parser.parse_args(argv[1:])
return options, args
def Fatal(text):
print(text, file=sys.stderr)
sys.exit(1)
def IsFullname(name):
return name.find('/') != -1
def GetShortname(fullname):
return fullname.split('/')[-1]
def ParseTestsuiteCSV(filecontents):
''' Parse a CSV file output by llvm testsuite with a record for each test.
returns 2 dictionaries:
1) a mapping from the short name of the test (without the path) to
a list of full pathnames that match it. It contains all the tests.
2) a mapping of all test failures, mapping full test path to the type
of failure (compile or exec)
'''
alltests = {}
failures = {}
reader = csv.DictReader(StringIO.StringIO(filecontents))
testcount = 0
for row in reader:
testcount += 1
fullname = row['Program']
shortname = GetShortname(fullname)
fullnames = alltests.get(shortname, [])
fullnames.append(fullname)
alltests[shortname] = fullnames
if row['CC'] == '*':
failures[fullname] = 'compile'
elif row['Exec'] == '*':
failures[fullname] = 'exec'
logging.info('%d tests, %d failures', testcount, len(failures))
return alltests, failures
def ParseLit(filecontents):
''' Parse the output of the LLVM regression test runner (lit/make check).
returns a dictionary mapping test name to the type of failure
(Clang, LLVM, LLVMUnit, etc)
'''
alltests = {}
failures = {}
testcount = 0
for line in filecontents.splitlines():
l = line.split()
if len(l) < 4:
continue
if l[0] in ('PASS:', 'FAIL:', 'XFAIL:', 'XPASS:', 'UNSUPPORTED:'):
testcount += 1
fullname = ''.join(l[1:4])
shortname = GetShortname(fullname)
fullnames = alltests.get(shortname, [])
fullnames.append(fullname)
alltests[shortname] = fullnames
if l[0] in ('FAIL:', 'XPASS:'):
failures[fullname] = l[1]
logging.info('%d tests, %d failures', testcount, len(failures))
return alltests, failures
def ParseExcludeFile(filename, config_attributes,
check_test_names=False, alltests=None):
''' Parse a list of excludes (known test failures). Excludes can be specified
by shortname (e.g. fbench) or by full path
(e.g. SingleSource/Benchmarks/Misc/fbench) but if there is more than
one test with the same shortname, the full name must be given.
Errors are reported if an exclude does not match exactly one test
in alltests, or if there are duplicate excludes.
Returns:
Number of failures in the exclusion file.
'''
errors = 0
f = open(filename)
for line in f:
line = line.strip()
if not line: continue
if line.startswith('#'): continue
tokens = line.split()
if len(tokens) > 1:
testname = tokens[0]
attributes = set(tokens[1].split(','))
if not attributes.issubset(config_attributes):
continue
else:
testname = line
if testname in EXCLUDES:
logging.error('Duplicate exclude: %s', line)
errors += 1
if IsFullname(testname):
shortname = GetShortname(testname)
if shortname not in alltests or testname not in alltests[shortname]:
logging.error('Exclude %s not found in list of tests', line)
errors += 1
fullname = testname
else:
# short name is specified
shortname = testname
if shortname not in alltests:
logging.error('Exclude %s not found in list of tests', shortname)
errors += 1
if len(alltests[shortname]) > 1:
logging.error('Exclude %s matches more than one test: %s. ' +
'Specify full name in exclude file.',
shortname, str(alltests[shortname]))
errors += 1
fullname = alltests[shortname][0]
if fullname in EXCLUDES:
logging.error('Duplicate exclude %s', fullname)
errors += 1
EXCLUDES[fullname] = filename
f.close()
logging.info('Parsed %s: now %d total excludes', filename, len(EXCLUDES))
return errors
def DumpFileContents(name):
error = not os.path.exists(name)
logging.debug(name)
try:
logging.debug(open(name, 'rb').read())
except IOError:
error = True
if error:
logging.error("Couldn't open file: %s", name)
# Make the bots go red
logging.error('@@@STEP_FAILURE@@@')
def PrintTestsuiteCompilationResult(path, test):
''' Print the compilation and run results for the specified test in the
LLVM testsuite.
These results are left in several different log files by the testsuite
driver, and are different for MultiSource/SingleSource tests
'''
logging.debug('RESULTS for %s', test)
testpath = os.path.join(path, test)
testdir, testname = os.path.split(testpath)
outputdir = os.path.join(testdir, 'Output')
logging.debug('COMPILE phase')
logging.debug('OBJECT file phase')
if test.startswith('MultiSource'):
for f in os.listdir(outputdir):
if f.endswith('llvm.o.compile'):
DumpFileContents(os.path.join(outputdir, f))
elif test.startswith('SingleSource'):
DumpFileContents(os.path.join(outputdir, testname + '.llvm.o.compile'))
else:
Fatal('ERROR: unrecognized test type ' + test)
logging.debug('PEXE generation phase')
DumpFileContents(os.path.join(outputdir,
testname + '.nonfinal.pexe.compile'))
logging.debug('PEXE finalization phase')
DumpFileContents(os.path.join(outputdir, testname + '.final.pexe.finalize'))
logging.debug('TRANSLATION phase')
DumpFileContents(os.path.join(outputdir, testname + '.nexe.translate'))
logging.debug('EXECUTION phase')
logging.debug('native output:')
DumpFileContents(os.path.join(outputdir, testname + '.out-nat'))
logging.debug('pnacl output:')
DumpFileContents(os.path.join(outputdir, testname + '.out-pnacl'))
def main(argv):
options, args = ParseCommandLine(argv)
if len(args) != 1:
Fatal('Must specify filename to parse')
filename = args[0]
return Report(vars(options), filename=filename)
def Report(options, filename=None, filecontents=None):
loglevel = logging.INFO
if options['verbose']:
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel, format='%(message)s')
if not (filename or filecontents):
Fatal('ERROR: must specify filename or filecontents')
failures = {}
logging.debug('Full test results:')
if not filecontents:
with open(filename, 'rb') as f:
filecontents = f.read();
# get the set of tests and failures
if options['testsuite']:
if options['verbose'] and options['buildpath'] is None:
Fatal('ERROR: must specify build path if verbose output is desired')
alltests, failures = ParseTestsuiteCSV(filecontents)
check_test_names = True
elif options['lit']:
alltests, failures = ParseLit(filecontents)
check_test_names = True
else:
Fatal('Must specify either testsuite (-t) or lit (-l) output format')
# get the set of excludes
exclusion_failures = 0
for f in options['excludes']:
exclusion_failures += ParseExcludeFile(f, set(options['attributes']),
check_test_names=check_test_names,
alltests=alltests)
# Regardless of the verbose option, do a dry run of
# PrintTestsuiteCompilationResult so we can catch errors when intermediate
# filenames in the compilation pipeline change.
# E.g. https://code.google.com/p/nativeclient/issues/detail?id=3659
if len(alltests) and options['testsuite']:
logging.disable(logging.INFO)
PrintTestsuiteCompilationResult(options['buildpath'],
alltests.values()[0][0])
logging.disable(logging.NOTSET)
# intersect them and check for unexpected fails/passes
unexpected_failures = 0
unexpected_passes = 0
for tests in alltests.itervalues():
for test in tests:
if test in failures:
if test not in EXCLUDES:
unexpected_failures += 1
logging.info('[ FAILED ] %s: %s failure', test, failures[test])
if options['testsuite']:
PrintTestsuiteCompilationResult(options['buildpath'], test)
elif test in EXCLUDES:
unexpected_passes += 1
logging.info('%s: unexpected success', test)
logging.info('%d unexpected failures %d unexpected passes',
unexpected_failures, unexpected_passes)
if exclusion_failures:
logging.info('%d problems in known_failures exclusion files',
exclusion_failures)
if options['check_excludes']:
return unexpected_failures + unexpected_passes + exclusion_failures > 0
return unexpected_failures + exclusion_failures > 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import socket
import netaddr
from oslo.config import cfg
from neutron.agent.common import config
from neutron.agent.linux.dhcp import DictModel
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
DEVICE_OWNER_NETWORK_PROBE = 'network:probe'
DEVICE_OWNER_COMPUTE_PROBE = 'compute:probe'
class NeutronDebugAgent():
OPTS = [
# Needed for drivers
cfg.BoolOpt('use_namespaces', default=True,
help=_("Use Linux network namespaces")),
cfg.StrOpt('interface_driver',
help=_("The driver used to manage the virtual "
"interface.")),
cfg.StrOpt('external_network_bridge', default='br-ex',
help=_("Name of bridge used for external network "
"traffic.")),
]
def __init__(self, conf, client, driver):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
self.client = client
self.driver = driver
def _get_namespace(self, port):
return "qprobe-%s" % port.id
def create_probe(self, network_id, device_owner='network'):
network = self._get_network(network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
port = self._create_port(network, device_owner)
port.network = network
interface_name = self.driver.get_device_name(port)
namespace = None
if self.conf.use_namespaces:
namespace = self._get_namespace(port)
if ip_lib.device_exists(interface_name, self.root_helper, namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name)
else:
self.driver.plug(network.id,
port.id,
interface_name,
port.mac_address,
bridge=bridge,
namespace=namespace)
ip_cidrs = []
for fixed_ip in port.fixed_ips:
subnet = fixed_ip.subnet
net = netaddr.IPNetwork(subnet.cidr)
ip_cidr = '%s/%s' % (fixed_ip.ip_address, net.prefixlen)
ip_cidrs.append(ip_cidr)
self.driver.init_l3(interface_name, ip_cidrs, namespace=namespace)
return port
def _get_subnet(self, subnet_id):
subnet_dict = self.client.show_subnet(subnet_id)['subnet']
return DictModel(subnet_dict)
def _get_network(self, network_id):
network_dict = self.client.show_network(network_id)['network']
network = DictModel(network_dict)
network.external = network_dict.get('router:external')
obj_subnet = [self._get_subnet(s_id) for s_id in network.subnets]
network.subnets = obj_subnet
return network
def clear_probe(self):
ports = self.client.list_ports(
device_id=socket.gethostname(),
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
self.delete_probe(port['id'])
def delete_probe(self, port_id):
port = DictModel(self.client.show_port(port_id)['port'])
network = self._get_network(port.network_id)
bridge = None
if network.external:
bridge = self.conf.external_network_bridge
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces and ip.netns.exists(namespace):
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge,
namespace=namespace)
try:
ip.netns.delete(namespace)
except Exception:
LOG.warn(_('Failed to delete namespace %s'), namespace)
else:
self.driver.unplug(self.driver.get_device_name(port),
bridge=bridge)
self.client.delete_port(port.id)
def list_probes(self):
ports = self.client.list_ports(
device_owner=[DEVICE_OWNER_NETWORK_PROBE,
DEVICE_OWNER_COMPUTE_PROBE])
info = ports['ports']
for port in info:
port['device_name'] = self.driver.get_device_name(DictModel(port))
return info
def exec_command(self, port_id, command=None):
port = DictModel(self.client.show_port(port_id)['port'])
ip = ip_lib.IPWrapper(self.root_helper)
namespace = self._get_namespace(port)
if self.conf.use_namespaces:
if not command:
return "sudo ip netns exec %s" % self._get_namespace(port)
namespace = ip.ensure_namespace(namespace)
return namespace.netns.execute(shlex.split(command))
else:
return utils.execute(shlex.split(command))
def ensure_probe(self, network_id):
ports = self.client.list_ports(network_id=network_id,
device_id=socket.gethostname(),
device_owner=DEVICE_OWNER_NETWORK_PROBE)
info = ports.get('ports', [])
if info:
return DictModel(info[0])
else:
return self.create_probe(network_id)
def ping_all(self, network_id=None, timeout=1):
if network_id:
ports = self.client.list_ports(network_id=network_id)['ports']
else:
ports = self.client.list_ports()['ports']
result = ""
for port in ports:
probe = self.ensure_probe(port['network_id'])
if port['device_owner'] == DEVICE_OWNER_NETWORK_PROBE:
continue
for fixed_ip in port['fixed_ips']:
address = fixed_ip['ip_address']
subnet = self._get_subnet(fixed_ip['subnet_id'])
if subnet.ip_version == 4:
ping_command = 'ping'
else:
ping_command = 'ping6'
result += self.exec_command(probe.id,
'%s -c 1 -w %s %s' % (ping_command,
timeout,
address))
return result
def _create_port(self, network, device_owner):
body = dict(port=dict(
admin_state_up=True,
network_id=network.id,
device_id='%s' % socket.gethostname(),
device_owner='%s:probe' % device_owner,
tenant_id=network.tenant_id,
fixed_ips=[dict(subnet_id=s.id) for s in network.subnets]))
port_dict = self.client.create_port(body)['port']
port = DictModel(port_dict)
port.network = network
for fixed_ip in port.fixed_ips:
fixed_ip.subnet = self._get_subnet(fixed_ip.subnet_id)
return port
| |
# Copyright (C) 2019 Akamai Technologies, Inc.
# Copyright (C) 2011-2017 Nominum, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Nominum Command Channel Sessions"""
import socket
import sys
import threading
import nomcc.closer
import nomcc.connection
import nomcc.encryption
import nomcc.exceptions
import nomcc.message
import nomcc.sequence
class RequestState(object):
"""RequestState represents a request "in flight".
When ready to wait for the response, call get_response() and the
thread will block until the answer is available or a timeout occurs.
The 'request' field is the request that was sent.
The 'response' field is the response received, but is not valid
until 'done' is set. Typically you call get_response() instead of
reading 'response' directly.
"""
def __init__(self, session, request, return_data, raise_error,
sequence_ok):
"""Create a RequestState for 'request' on 'session'.
If 'return_data' is true, then get_response() will return the
_data section of the response.
If 'raise_error' is true, then get_response() will raise a
nomcc.exceptions.Error exception if the response has an 'err' field.
"""
self.done = threading.Event()
self.request = request
self.return_data = return_data
self.raise_error = raise_error
self.sequence_ok = sequence_ok
self.response = None
self.exception = None
def wait(self, timeout=nomcc.closer.DEFAULT_TIMEOUT):
"""Wait for the request to complete, or the specified timeout
to occur.
"""
self.done.wait(timeout)
def get_response(self, timeout=nomcc.closer.DEFAULT_TIMEOUT):
"""Get the response to the request.
Waits until the request completes or the specified timeout occurs.
"""
if not self.done.wait(timeout):
raise nomcc.exceptions.Timeout
if self.exception is not None:
raise self.exception
if not self.sequence_ok:
_ctrl = self.response['_ctrl']
if '_seq' in _ctrl:
raise nomcc.exceptions.UnexpectedSequence
_data = self.response['_data']
if self.raise_error and 'err' in _data:
raise nomcc.exceptions.Error(_data['err'])
if self.return_data:
return self.response['_data']
else:
return self.response
def __call__(self, session, message):
"""Handle 'message' by binding it to the response attribute and
waking up any waiters.
"""
self.response = message
self.done.set()
return True
def return_exception(self, exception):
self.exception = exception
self.done.set()
def _reader(session):
"""Reader thread."""
try:
while True:
(message, state) = session.connection.read()
session.not_idle()
handled = False
# try to handle the message ...
try:
if callable(state):
handled = state(session, message)
if not handled and session.dispatch is not None:
handled = session.dispatch(session, message, state)
except (SystemExit, KeyboardInterrupt,
nomcc.exceptions.Closing):
# ... passing a few exceptions on ...
raise
except Exception as e:
# ... but turning most into error responses
if nomcc.message.is_request(message):
response = nomcc.message.error(message, str(e))
session.write(response)
handled = True
session.connection.trace("session reader thread",
"handling error: %s" % str(e))
if not handled:
if nomcc.message.is_request(message):
response = nomcc.message.error(message,
"unknown request")
session.write(response)
# otherwise we just drop the message
except EOFError:
session.connection.trace("session reader thread",
"end-of-input")
except Exception:
(ty, va) = sys.exc_info()[:2]
session.connection.trace("session reader thread",
"exiting due to exception %s: %s" %
(str(ty), str(va)))
finally:
session.request_close()
def _writer(session):
"""Writer thread."""
try:
while True:
with session.write_lock:
while len(session.write_queue) == 0:
session.wake_writer.wait()
(message, state) = session.write_queue.pop(0)
if message is None:
# request to exit
break
session.not_idle()
try:
session.connection.write(message, state)
except (socket.error, socket.timeout):
# socket problems are not something we can continue from
# so reraise
raise
except Exception as e:
# Something went wrong in rendering, but nothing was sent,
# so the connection is still ok. Try to inform the
# originator.
if state is not None:
try:
state.return_exception(e)
except Exception:
# We don't expect this path to happen very
# often, so we just trace it for now as
# opposed to trying to notify the session
# about the bad message some other way.
(ty, va) = sys.exc_info()[:2]
session.connection.trace("session writer thread",
"sending message threw " +
"exception %s: %s" %
(str(ty), str(va)))
except Exception:
(ty, va) = sys.exc_info()[:2]
session.connection.trace("session writer thread",
"exiting due to exception %s: %s" %
(str(ty), str(va)))
# We can't continue, so ask for shutdown.
session.request_close()
class Session(nomcc.closer.ThreadedCloser):
"""A command channel session.
Run a command channel session on 'connection'. A session has
independent reader and writer threads. Writes are queued and then
sent by the writer thread.
Sessions support the context manager protocol.
"""
def __init__(self, connection, dispatch=None, want_start=True):
"""Initialize a session.
If 'dispatch' is not None, then it will be used to handle messages
that don't have an associated RequestState.
If 'want_start' is True, then the Session's service threads will
be started.
"""
super(Session, self).__init__()
self.connection = connection
self.dispatch = dispatch
# sequence_loc covers sequences and next_id
self.sequence_lock = threading.Lock()
self.sequences = {}
self.next_id = 1
# write_lock covers write_queue and wake_writer
self.write_lock = threading.Lock()
self.write_queue = []
self.wake_writer = threading.Condition(self.write_lock)
self.reader = threading.Thread(target=_reader, args=[self],
name="cc-reader")
self.reader.daemon = True
self.writer = threading.Thread(target=_writer, args=[self],
name="cc-writer")
self.writer.daemon = True
self.started = False
if want_start:
self.start()
def start(self):
"""Start the session (if not already started)."""
if not self.started:
self._start_closer()
self.reader.start()
self.writer.start()
self.started = True
def _close(self):
# We seem to need to do the following shutdown on at
# least OS X, Linux, and Solaris.
#
# If we don't do it, then the close() does nothing if there's
# a recv() outstanding in another thread until either:
#
# the recv() reads some data (assuming more ever gets sent)
# or
#
# the other side closes
#
try:
self.connection.shutdown()
except socket.error:
# This can happen e.g. if the socket is not connected any
# more. We don't care since we're closing, so just eat
# the exception.
pass
self.reader.join()
# Take possession of the reader's outstanding state and end any
# requests
outstanding = self.connection.take_outstanding()
for state in outstanding.values():
if state is not None:
state.exception = nomcc.exceptions.Closing()
state.done.set()
with self.write_lock:
# Tell writer to exit.
self.write_queue.insert(0, (None, None))
self.wake_writer.notify()
self.writer.join()
with self.write_lock:
# take possession of the queue remnants
wq = self.write_queue
# prevent further write attempts
self.write_queue = None
for (message, state) in wq:
if state is not None:
state.exception = nomcc.exceptions.Closing()
state.done.set()
# Take possession of sequences and close them
with self.sequence_lock:
sequences = self.sequences
self.sequences = None
for sequence in sequences.values():
sequence.close()
self.connection.close()
def write(self, message, state=None):
"""Add 'message' to the write queue.
Arbitrary state 'state' is associated with the message.
Clients should NOT need to call this method directly.
"""
with self.write_lock:
if self.write_queue is None:
raise nomcc.exceptions.Closing
self.write_queue.append((message, state))
if len(self.write_queue) == 1:
self.wake_writer.notify()
def ask(self, request, raise_error=True, sequence_ok=False):
"""Send a request.
Note that 'tell()' is usually the more appropriate method to call
if you want to wait for the answer.
'request' may be a string, a _data section dictionary, or a
complete CC message dictionary. If the request is a string,
then its value will be treated as the desired CC 'type' and
only the _data section will be returned. If the request is
just a _data section, then only a _data section will be
returned in the response.
If 'raise_error' is true, then get_response() will raise a
nomcc.exceptions.Error exception if the response has an 'err' field.
If 'sequence_ok' is true, then sequence responses are allowed,
and the caller is expected to deal with the sequence protocol.
Normally you should use sequence() to get a sequence.
Returns a RequestState object that may be used later to retreive
the response.
"""
if isinstance(request, str):
request = {'_data': {'type': request}}
return_data = True
elif '_data' not in request:
# Request is not a full message; caller prefers to deal
# just with _data. Wrap into a proper message, and
# remember to unwrap later.
request = {'_data': request}
return_data = True
else:
return_data = False
rstate = RequestState(self, request, return_data, raise_error,
sequence_ok)
self.write(request, rstate)
return rstate
def tell(self, request, timeout=nomcc.closer.DEFAULT_TIMEOUT,
raise_error=True, sequence_ok=False):
"""Send a request and wait for a response.
'request' may be a string, a _data section dictionary, or a
complete CC message dictionary. If the request is a string,
then its value will be treated as the desired CC 'type' and
only the _data section will be returned. If the request is
just a _data section, then only a _data section will be
returned in the response.
The request will timeout and raise an exception if not answered
within 'timeout' seconds.
If 'raise_error' is true, then get_response() will raise a
nomcc.exceptions.Error exception if the response has an 'err' field.
If 'sequence_ok' is true, then sequence responses are allowed,
and the caller is expected to deal with the sequence protocol.
Normally you should use sequence() to get a sequence.
Returns the response.
"""
return self.ask(
request, raise_error, sequence_ok
).get_response(timeout)
def sequence(self, data, timeout=nomcc.closer.DEFAULT_TIMEOUT,
num=nomcc.sequence.DEFAULT_BATCHING,
raise_error=True):
"""Send a request for a multi-response question, returning a
a nomcc.sequence.Reader object which may be used to iterate the
responses.
'data' is the _data section of the request to send, or a string.
If 'data' is s a string, then its value will be treated as the
desired CC type.
The request will timeout and raise an exception if the next response
isn't answered within 'timeout' seconds.
'num' is a hint about the number of responses to return per
network round-trip. The default is
nomcc.sequence.DEFAULT_BATCHING.
If 'raise_error' is true, then get_response() will raise a
nomcc.exceptions.Error exception if the response has an 'err' field.
Returns a nomcc.sequence.Reader object.
"""
return nomcc.sequence.Reader(self, data, timeout, num, raise_error)
def add_sequence(self, sequence):
"""Add the specified sequence object to the set of known sequences.
Returns the sequence id.
"""
with self.sequence_lock:
id = str(self.next_id)
self.next_id += 1
self.sequences[id] = sequence
return id
def delete_sequence(self, id):
"""Delete the sequence object for the specified id.
A KeyError exception will be raised if the specified sequence does
not exist.
"""
with self.sequence_lock:
sequence = self.sequences[id]
del self.sequences[id]
sequence.close()
def get_sequence(self, id):
"""Get sequence object for the specified id."""
with self.sequence_lock:
return self.sequences.get(id)
def getpeername(self):
"""Get the peername of the other half of the connection.
Returns an address tuple appropriate to the address family of the
connection.
"""
return self.connection.getpeername()
def set_dispatch(self, dispatch):
"""Set the dispatch function for this session.
"""
self.dispatch = dispatch
def new(*args, **kwargs):
"""Create a new session.
All arguments are passed directly to the Session constructor.
"""
return Session(*args, **kwargs)
def connect(*args, **kwargs):
"""Establish a command channel session with a server.
All arguments are passed directly to nomcc.connection.connect(), whose
documentation is reproduced here for convenience.
'where' is a string or a channel object. If a string, it can be the
name of a channel to be retrieved from /etc/channel.conf, or a channel
literal of the form address[#port[#secret]].
'timeout' is the timeout for the initial socket.connect(). The
default is None.
'encryption_policy' specifies the encryption policy to use for the
connection, the default is nomcc.encryption.DESIRED, which
attempts to use encryption but will permit communication if the
remote server does not allow encryption.
'source' is the source address and port to use in standard Python
tuple form. The default is (0.0.0.0, 0) or (::0, 0, 0, 0) as
appropriate.
'tracer' is a method taking a connection object, an operation string, and
a message string. The method is invoked at various points of the
during the connection and can be used for debugging.
Returns a Session object.
"""
return Session(nomcc.connection.connect(*args, **kwargs))
| |
<<<<<<< HEAD
<<<<<<< HEAD
# Tests that work for both bytes and buffer objects.
# See PEP 3137.
import struct
import sys
class MixinBytesBufferCommonTests(object):
"""Tests that work for both bytes and buffer objects.
See PEP 3137.
"""
def marshal(self, x):
"""Convert x into the appropriate type for these tests."""
raise RuntimeError('test class must provide a marshal method')
def test_islower(self):
self.assertFalse(self.marshal(b'').islower())
self.assertTrue(self.marshal(b'a').islower())
self.assertFalse(self.marshal(b'A').islower())
self.assertFalse(self.marshal(b'\n').islower())
self.assertTrue(self.marshal(b'abc').islower())
self.assertFalse(self.marshal(b'aBc').islower())
self.assertTrue(self.marshal(b'abc\n').islower())
self.assertRaises(TypeError, self.marshal(b'abc').islower, 42)
def test_isupper(self):
self.assertFalse(self.marshal(b'').isupper())
self.assertFalse(self.marshal(b'a').isupper())
self.assertTrue(self.marshal(b'A').isupper())
self.assertFalse(self.marshal(b'\n').isupper())
self.assertTrue(self.marshal(b'ABC').isupper())
self.assertFalse(self.marshal(b'AbC').isupper())
self.assertTrue(self.marshal(b'ABC\n').isupper())
self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42)
def test_istitle(self):
self.assertFalse(self.marshal(b'').istitle())
self.assertFalse(self.marshal(b'a').istitle())
self.assertTrue(self.marshal(b'A').istitle())
self.assertFalse(self.marshal(b'\n').istitle())
self.assertTrue(self.marshal(b'A Titlecased Line').istitle())
self.assertTrue(self.marshal(b'A\nTitlecased Line').istitle())
self.assertTrue(self.marshal(b'A Titlecased, Line').istitle())
self.assertFalse(self.marshal(b'Not a capitalized String').istitle())
self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle())
self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle())
self.assertFalse(self.marshal(b'NOT').istitle())
self.assertRaises(TypeError, self.marshal(b'abc').istitle, 42)
def test_isspace(self):
self.assertFalse(self.marshal(b'').isspace())
self.assertFalse(self.marshal(b'a').isspace())
self.assertTrue(self.marshal(b' ').isspace())
self.assertTrue(self.marshal(b'\t').isspace())
self.assertTrue(self.marshal(b'\r').isspace())
self.assertTrue(self.marshal(b'\n').isspace())
self.assertTrue(self.marshal(b' \t\r\n').isspace())
self.assertFalse(self.marshal(b' \t\r\na').isspace())
self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42)
def test_isalpha(self):
self.assertFalse(self.marshal(b'').isalpha())
self.assertTrue(self.marshal(b'a').isalpha())
self.assertTrue(self.marshal(b'A').isalpha())
self.assertFalse(self.marshal(b'\n').isalpha())
self.assertTrue(self.marshal(b'abc').isalpha())
self.assertFalse(self.marshal(b'aBc123').isalpha())
self.assertFalse(self.marshal(b'abc\n').isalpha())
self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42)
def test_isalnum(self):
self.assertFalse(self.marshal(b'').isalnum())
self.assertTrue(self.marshal(b'a').isalnum())
self.assertTrue(self.marshal(b'A').isalnum())
self.assertFalse(self.marshal(b'\n').isalnum())
self.assertTrue(self.marshal(b'123abc456').isalnum())
self.assertTrue(self.marshal(b'a1b3c').isalnum())
self.assertFalse(self.marshal(b'aBc000 ').isalnum())
self.assertFalse(self.marshal(b'abc\n').isalnum())
self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42)
def test_isdigit(self):
self.assertFalse(self.marshal(b'').isdigit())
self.assertFalse(self.marshal(b'a').isdigit())
self.assertTrue(self.marshal(b'0').isdigit())
self.assertTrue(self.marshal(b'0123456789').isdigit())
self.assertFalse(self.marshal(b'0123456789a').isdigit())
self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42)
def test_lower(self):
self.assertEqual(b'hello', self.marshal(b'HeLLo').lower())
self.assertEqual(b'hello', self.marshal(b'hello').lower())
self.assertRaises(TypeError, self.marshal(b'hello').lower, 42)
def test_upper(self):
self.assertEqual(b'HELLO', self.marshal(b'HeLLo').upper())
self.assertEqual(b'HELLO', self.marshal(b'HELLO').upper())
self.assertRaises(TypeError, self.marshal(b'hello').upper, 42)
def test_capitalize(self):
self.assertEqual(b' hello ', self.marshal(b' hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'hello ').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'aaaa').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'AaAa').capitalize())
self.assertRaises(TypeError, self.marshal(b'hello').capitalize, 42)
def test_ljust(self):
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(10))
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(6))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(3))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(2))
self.assertEqual(b'abc*******', self.marshal(b'abc').ljust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').ljust)
def test_rjust(self):
self.assertEqual(b' abc', self.marshal(b'abc').rjust(10))
self.assertEqual(b' abc', self.marshal(b'abc').rjust(6))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(3))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(2))
self.assertEqual(b'*******abc', self.marshal(b'abc').rjust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').rjust)
def test_center(self):
self.assertEqual(b' abc ', self.marshal(b'abc').center(10))
self.assertEqual(b' abc ', self.marshal(b'abc').center(6))
self.assertEqual(b'abc', self.marshal(b'abc').center(3))
self.assertEqual(b'abc', self.marshal(b'abc').center(2))
self.assertEqual(b'***abc****', self.marshal(b'abc').center(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').center)
def test_swapcase(self):
self.assertEqual(b'hEllO CoMPuTErS',
self.marshal(b'HeLLo cOmpUteRs').swapcase())
self.assertRaises(TypeError, self.marshal(b'hello').swapcase, 42)
def test_zfill(self):
self.assertEqual(b'123', self.marshal(b'123').zfill(2))
self.assertEqual(b'123', self.marshal(b'123').zfill(3))
self.assertEqual(b'0123', self.marshal(b'123').zfill(4))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(3))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(4))
self.assertEqual(b'+0123', self.marshal(b'+123').zfill(5))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(3))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(4))
self.assertEqual(b'-0123', self.marshal(b'-123').zfill(5))
self.assertEqual(b'000', self.marshal(b'').zfill(3))
self.assertEqual(b'34', self.marshal(b'34').zfill(1))
self.assertEqual(b'0034', self.marshal(b'34').zfill(4))
self.assertRaises(TypeError, self.marshal(b'123').zfill)
def test_expandtabs(self):
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab\r\ndef\ng\r\nhi',
self.marshal(b'abc\r\nab\r\ndef\ng\r\nhi').expandtabs(4))
# check keyword args
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=4))
self.assertEqual(b' a\n b', self.marshal(b' \ta\n\tb').expandtabs(1))
self.assertRaises(TypeError, self.marshal(b'hello').expandtabs, 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.assertRaises(OverflowError,
self.marshal(b'\ta\n\tb').expandtabs, sys.maxsize)
def test_title(self):
self.assertEqual(b' Hello ', self.marshal(b' hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').title())
self.assertEqual(b'Format This As Title String',
self.marshal(b'fOrMaT thIs aS titLe String').title())
self.assertEqual(b'Format,This-As*Title;String',
self.marshal(b'fOrMaT,thIs-aS*titLe;String').title())
self.assertEqual(b'Getint', self.marshal(b'getInt').title())
self.assertRaises(TypeError, self.marshal(b'hello').title, 42)
def test_splitlines(self):
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\rghi').splitlines())
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi\n').splitlines())
self.assertEqual([b'abc', b'def', b'ghi', b''],
self.marshal(b'abc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(True))
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=True))
self.assertRaises(TypeError, self.marshal(b'abc').splitlines, 42, 42)
=======
# Tests that work for both bytes and buffer objects.
# See PEP 3137.
import struct
import sys
class MixinBytesBufferCommonTests(object):
"""Tests that work for both bytes and buffer objects.
See PEP 3137.
"""
def marshal(self, x):
"""Convert x into the appropriate type for these tests."""
raise RuntimeError('test class must provide a marshal method')
def test_islower(self):
self.assertFalse(self.marshal(b'').islower())
self.assertTrue(self.marshal(b'a').islower())
self.assertFalse(self.marshal(b'A').islower())
self.assertFalse(self.marshal(b'\n').islower())
self.assertTrue(self.marshal(b'abc').islower())
self.assertFalse(self.marshal(b'aBc').islower())
self.assertTrue(self.marshal(b'abc\n').islower())
self.assertRaises(TypeError, self.marshal(b'abc').islower, 42)
def test_isupper(self):
self.assertFalse(self.marshal(b'').isupper())
self.assertFalse(self.marshal(b'a').isupper())
self.assertTrue(self.marshal(b'A').isupper())
self.assertFalse(self.marshal(b'\n').isupper())
self.assertTrue(self.marshal(b'ABC').isupper())
self.assertFalse(self.marshal(b'AbC').isupper())
self.assertTrue(self.marshal(b'ABC\n').isupper())
self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42)
def test_istitle(self):
self.assertFalse(self.marshal(b'').istitle())
self.assertFalse(self.marshal(b'a').istitle())
self.assertTrue(self.marshal(b'A').istitle())
self.assertFalse(self.marshal(b'\n').istitle())
self.assertTrue(self.marshal(b'A Titlecased Line').istitle())
self.assertTrue(self.marshal(b'A\nTitlecased Line').istitle())
self.assertTrue(self.marshal(b'A Titlecased, Line').istitle())
self.assertFalse(self.marshal(b'Not a capitalized String').istitle())
self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle())
self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle())
self.assertFalse(self.marshal(b'NOT').istitle())
self.assertRaises(TypeError, self.marshal(b'abc').istitle, 42)
def test_isspace(self):
self.assertFalse(self.marshal(b'').isspace())
self.assertFalse(self.marshal(b'a').isspace())
self.assertTrue(self.marshal(b' ').isspace())
self.assertTrue(self.marshal(b'\t').isspace())
self.assertTrue(self.marshal(b'\r').isspace())
self.assertTrue(self.marshal(b'\n').isspace())
self.assertTrue(self.marshal(b' \t\r\n').isspace())
self.assertFalse(self.marshal(b' \t\r\na').isspace())
self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42)
def test_isalpha(self):
self.assertFalse(self.marshal(b'').isalpha())
self.assertTrue(self.marshal(b'a').isalpha())
self.assertTrue(self.marshal(b'A').isalpha())
self.assertFalse(self.marshal(b'\n').isalpha())
self.assertTrue(self.marshal(b'abc').isalpha())
self.assertFalse(self.marshal(b'aBc123').isalpha())
self.assertFalse(self.marshal(b'abc\n').isalpha())
self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42)
def test_isalnum(self):
self.assertFalse(self.marshal(b'').isalnum())
self.assertTrue(self.marshal(b'a').isalnum())
self.assertTrue(self.marshal(b'A').isalnum())
self.assertFalse(self.marshal(b'\n').isalnum())
self.assertTrue(self.marshal(b'123abc456').isalnum())
self.assertTrue(self.marshal(b'a1b3c').isalnum())
self.assertFalse(self.marshal(b'aBc000 ').isalnum())
self.assertFalse(self.marshal(b'abc\n').isalnum())
self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42)
def test_isdigit(self):
self.assertFalse(self.marshal(b'').isdigit())
self.assertFalse(self.marshal(b'a').isdigit())
self.assertTrue(self.marshal(b'0').isdigit())
self.assertTrue(self.marshal(b'0123456789').isdigit())
self.assertFalse(self.marshal(b'0123456789a').isdigit())
self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42)
def test_lower(self):
self.assertEqual(b'hello', self.marshal(b'HeLLo').lower())
self.assertEqual(b'hello', self.marshal(b'hello').lower())
self.assertRaises(TypeError, self.marshal(b'hello').lower, 42)
def test_upper(self):
self.assertEqual(b'HELLO', self.marshal(b'HeLLo').upper())
self.assertEqual(b'HELLO', self.marshal(b'HELLO').upper())
self.assertRaises(TypeError, self.marshal(b'hello').upper, 42)
def test_capitalize(self):
self.assertEqual(b' hello ', self.marshal(b' hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'hello ').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'aaaa').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'AaAa').capitalize())
self.assertRaises(TypeError, self.marshal(b'hello').capitalize, 42)
def test_ljust(self):
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(10))
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(6))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(3))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(2))
self.assertEqual(b'abc*******', self.marshal(b'abc').ljust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').ljust)
def test_rjust(self):
self.assertEqual(b' abc', self.marshal(b'abc').rjust(10))
self.assertEqual(b' abc', self.marshal(b'abc').rjust(6))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(3))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(2))
self.assertEqual(b'*******abc', self.marshal(b'abc').rjust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').rjust)
def test_center(self):
self.assertEqual(b' abc ', self.marshal(b'abc').center(10))
self.assertEqual(b' abc ', self.marshal(b'abc').center(6))
self.assertEqual(b'abc', self.marshal(b'abc').center(3))
self.assertEqual(b'abc', self.marshal(b'abc').center(2))
self.assertEqual(b'***abc****', self.marshal(b'abc').center(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').center)
def test_swapcase(self):
self.assertEqual(b'hEllO CoMPuTErS',
self.marshal(b'HeLLo cOmpUteRs').swapcase())
self.assertRaises(TypeError, self.marshal(b'hello').swapcase, 42)
def test_zfill(self):
self.assertEqual(b'123', self.marshal(b'123').zfill(2))
self.assertEqual(b'123', self.marshal(b'123').zfill(3))
self.assertEqual(b'0123', self.marshal(b'123').zfill(4))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(3))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(4))
self.assertEqual(b'+0123', self.marshal(b'+123').zfill(5))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(3))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(4))
self.assertEqual(b'-0123', self.marshal(b'-123').zfill(5))
self.assertEqual(b'000', self.marshal(b'').zfill(3))
self.assertEqual(b'34', self.marshal(b'34').zfill(1))
self.assertEqual(b'0034', self.marshal(b'34').zfill(4))
self.assertRaises(TypeError, self.marshal(b'123').zfill)
def test_expandtabs(self):
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab\r\ndef\ng\r\nhi',
self.marshal(b'abc\r\nab\r\ndef\ng\r\nhi').expandtabs(4))
# check keyword args
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=4))
self.assertEqual(b' a\n b', self.marshal(b' \ta\n\tb').expandtabs(1))
self.assertRaises(TypeError, self.marshal(b'hello').expandtabs, 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.assertRaises(OverflowError,
self.marshal(b'\ta\n\tb').expandtabs, sys.maxsize)
def test_title(self):
self.assertEqual(b' Hello ', self.marshal(b' hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').title())
self.assertEqual(b'Format This As Title String',
self.marshal(b'fOrMaT thIs aS titLe String').title())
self.assertEqual(b'Format,This-As*Title;String',
self.marshal(b'fOrMaT,thIs-aS*titLe;String').title())
self.assertEqual(b'Getint', self.marshal(b'getInt').title())
self.assertRaises(TypeError, self.marshal(b'hello').title, 42)
def test_splitlines(self):
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\rghi').splitlines())
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi\n').splitlines())
self.assertEqual([b'abc', b'def', b'ghi', b''],
self.marshal(b'abc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(True))
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=True))
self.assertRaises(TypeError, self.marshal(b'abc').splitlines, 42, 42)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
# Tests that work for both bytes and buffer objects.
# See PEP 3137.
import struct
import sys
class MixinBytesBufferCommonTests(object):
"""Tests that work for both bytes and buffer objects.
See PEP 3137.
"""
def marshal(self, x):
"""Convert x into the appropriate type for these tests."""
raise RuntimeError('test class must provide a marshal method')
def test_islower(self):
self.assertFalse(self.marshal(b'').islower())
self.assertTrue(self.marshal(b'a').islower())
self.assertFalse(self.marshal(b'A').islower())
self.assertFalse(self.marshal(b'\n').islower())
self.assertTrue(self.marshal(b'abc').islower())
self.assertFalse(self.marshal(b'aBc').islower())
self.assertTrue(self.marshal(b'abc\n').islower())
self.assertRaises(TypeError, self.marshal(b'abc').islower, 42)
def test_isupper(self):
self.assertFalse(self.marshal(b'').isupper())
self.assertFalse(self.marshal(b'a').isupper())
self.assertTrue(self.marshal(b'A').isupper())
self.assertFalse(self.marshal(b'\n').isupper())
self.assertTrue(self.marshal(b'ABC').isupper())
self.assertFalse(self.marshal(b'AbC').isupper())
self.assertTrue(self.marshal(b'ABC\n').isupper())
self.assertRaises(TypeError, self.marshal(b'abc').isupper, 42)
def test_istitle(self):
self.assertFalse(self.marshal(b'').istitle())
self.assertFalse(self.marshal(b'a').istitle())
self.assertTrue(self.marshal(b'A').istitle())
self.assertFalse(self.marshal(b'\n').istitle())
self.assertTrue(self.marshal(b'A Titlecased Line').istitle())
self.assertTrue(self.marshal(b'A\nTitlecased Line').istitle())
self.assertTrue(self.marshal(b'A Titlecased, Line').istitle())
self.assertFalse(self.marshal(b'Not a capitalized String').istitle())
self.assertFalse(self.marshal(b'Not\ta Titlecase String').istitle())
self.assertFalse(self.marshal(b'Not--a Titlecase String').istitle())
self.assertFalse(self.marshal(b'NOT').istitle())
self.assertRaises(TypeError, self.marshal(b'abc').istitle, 42)
def test_isspace(self):
self.assertFalse(self.marshal(b'').isspace())
self.assertFalse(self.marshal(b'a').isspace())
self.assertTrue(self.marshal(b' ').isspace())
self.assertTrue(self.marshal(b'\t').isspace())
self.assertTrue(self.marshal(b'\r').isspace())
self.assertTrue(self.marshal(b'\n').isspace())
self.assertTrue(self.marshal(b' \t\r\n').isspace())
self.assertFalse(self.marshal(b' \t\r\na').isspace())
self.assertRaises(TypeError, self.marshal(b'abc').isspace, 42)
def test_isalpha(self):
self.assertFalse(self.marshal(b'').isalpha())
self.assertTrue(self.marshal(b'a').isalpha())
self.assertTrue(self.marshal(b'A').isalpha())
self.assertFalse(self.marshal(b'\n').isalpha())
self.assertTrue(self.marshal(b'abc').isalpha())
self.assertFalse(self.marshal(b'aBc123').isalpha())
self.assertFalse(self.marshal(b'abc\n').isalpha())
self.assertRaises(TypeError, self.marshal(b'abc').isalpha, 42)
def test_isalnum(self):
self.assertFalse(self.marshal(b'').isalnum())
self.assertTrue(self.marshal(b'a').isalnum())
self.assertTrue(self.marshal(b'A').isalnum())
self.assertFalse(self.marshal(b'\n').isalnum())
self.assertTrue(self.marshal(b'123abc456').isalnum())
self.assertTrue(self.marshal(b'a1b3c').isalnum())
self.assertFalse(self.marshal(b'aBc000 ').isalnum())
self.assertFalse(self.marshal(b'abc\n').isalnum())
self.assertRaises(TypeError, self.marshal(b'abc').isalnum, 42)
def test_isdigit(self):
self.assertFalse(self.marshal(b'').isdigit())
self.assertFalse(self.marshal(b'a').isdigit())
self.assertTrue(self.marshal(b'0').isdigit())
self.assertTrue(self.marshal(b'0123456789').isdigit())
self.assertFalse(self.marshal(b'0123456789a').isdigit())
self.assertRaises(TypeError, self.marshal(b'abc').isdigit, 42)
def test_lower(self):
self.assertEqual(b'hello', self.marshal(b'HeLLo').lower())
self.assertEqual(b'hello', self.marshal(b'hello').lower())
self.assertRaises(TypeError, self.marshal(b'hello').lower, 42)
def test_upper(self):
self.assertEqual(b'HELLO', self.marshal(b'HeLLo').upper())
self.assertEqual(b'HELLO', self.marshal(b'HELLO').upper())
self.assertRaises(TypeError, self.marshal(b'hello').upper, 42)
def test_capitalize(self):
self.assertEqual(b' hello ', self.marshal(b' hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').capitalize())
self.assertEqual(b'Hello ', self.marshal(b'hello ').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'aaaa').capitalize())
self.assertEqual(b'Aaaa', self.marshal(b'AaAa').capitalize())
self.assertRaises(TypeError, self.marshal(b'hello').capitalize, 42)
def test_ljust(self):
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(10))
self.assertEqual(b'abc ', self.marshal(b'abc').ljust(6))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(3))
self.assertEqual(b'abc', self.marshal(b'abc').ljust(2))
self.assertEqual(b'abc*******', self.marshal(b'abc').ljust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').ljust)
def test_rjust(self):
self.assertEqual(b' abc', self.marshal(b'abc').rjust(10))
self.assertEqual(b' abc', self.marshal(b'abc').rjust(6))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(3))
self.assertEqual(b'abc', self.marshal(b'abc').rjust(2))
self.assertEqual(b'*******abc', self.marshal(b'abc').rjust(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').rjust)
def test_center(self):
self.assertEqual(b' abc ', self.marshal(b'abc').center(10))
self.assertEqual(b' abc ', self.marshal(b'abc').center(6))
self.assertEqual(b'abc', self.marshal(b'abc').center(3))
self.assertEqual(b'abc', self.marshal(b'abc').center(2))
self.assertEqual(b'***abc****', self.marshal(b'abc').center(10, b'*'))
self.assertRaises(TypeError, self.marshal(b'abc').center)
def test_swapcase(self):
self.assertEqual(b'hEllO CoMPuTErS',
self.marshal(b'HeLLo cOmpUteRs').swapcase())
self.assertRaises(TypeError, self.marshal(b'hello').swapcase, 42)
def test_zfill(self):
self.assertEqual(b'123', self.marshal(b'123').zfill(2))
self.assertEqual(b'123', self.marshal(b'123').zfill(3))
self.assertEqual(b'0123', self.marshal(b'123').zfill(4))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(3))
self.assertEqual(b'+123', self.marshal(b'+123').zfill(4))
self.assertEqual(b'+0123', self.marshal(b'+123').zfill(5))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(3))
self.assertEqual(b'-123', self.marshal(b'-123').zfill(4))
self.assertEqual(b'-0123', self.marshal(b'-123').zfill(5))
self.assertEqual(b'000', self.marshal(b'').zfill(3))
self.assertEqual(b'34', self.marshal(b'34').zfill(1))
self.assertEqual(b'0034', self.marshal(b'34').zfill(4))
self.assertRaises(TypeError, self.marshal(b'123').zfill)
def test_expandtabs(self):
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs())
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(8))
self.assertEqual(b'abc\r\nab def\ng hi',
self.marshal(b'abc\r\nab\tdef\ng\thi').expandtabs(4))
self.assertEqual(b'abc\r\nab\r\ndef\ng\r\nhi',
self.marshal(b'abc\r\nab\r\ndef\ng\r\nhi').expandtabs(4))
# check keyword args
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=8))
self.assertEqual(b'abc\rab def\ng hi',
self.marshal(b'abc\rab\tdef\ng\thi').expandtabs(tabsize=4))
self.assertEqual(b' a\n b', self.marshal(b' \ta\n\tb').expandtabs(1))
self.assertRaises(TypeError, self.marshal(b'hello').expandtabs, 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxsize < (1 << 32) and struct.calcsize('P') == 4:
self.assertRaises(OverflowError,
self.marshal(b'\ta\n\tb').expandtabs, sys.maxsize)
def test_title(self):
self.assertEqual(b' Hello ', self.marshal(b' hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'hello ').title())
self.assertEqual(b'Hello ', self.marshal(b'Hello ').title())
self.assertEqual(b'Format This As Title String',
self.marshal(b'fOrMaT thIs aS titLe String').title())
self.assertEqual(b'Format,This-As*Title;String',
self.marshal(b'fOrMaT,thIs-aS*titLe;String').title())
self.assertEqual(b'Getint', self.marshal(b'getInt').title())
self.assertRaises(TypeError, self.marshal(b'hello').title, 42)
def test_splitlines(self):
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\rghi').splitlines())
self.assertEqual([b'abc', b'def', b'', b'ghi'],
self.marshal(b'abc\ndef\n\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi').splitlines())
self.assertEqual([b'abc', b'def', b'ghi'],
self.marshal(b'abc\ndef\r\nghi\n').splitlines())
self.assertEqual([b'abc', b'def', b'ghi', b''],
self.marshal(b'abc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines())
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(True))
self.assertEqual([b'', b'abc', b'def', b'ghi', b''],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=False))
self.assertEqual([b'\n', b'abc\n', b'def\r\n', b'ghi\n', b'\r'],
self.marshal(b'\nabc\ndef\r\nghi\n\r').splitlines(keepends=True))
self.assertRaises(TypeError, self.marshal(b'abc').splitlines, 42, 42)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| |
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from scanner import make_scanner
def _import_c_scanstring():
try:
from _speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
class JSONDecodeError(ValueError):
"""Subclass of ValueError with the following additional properties:
msg: The unformatted error message
doc: The JSON document being parsed
pos: The start index of doc where parsing failed
end: The end index of doc where parsing failed (may be None)
lineno: The line corresponding to pos
colno: The column corresponding to pos
endlineno: The line corresponding to end (may be None)
endcolno: The column corresponding to end (may be None)
"""
def __init__(self, msg, doc, pos, end=None):
ValueError.__init__(self, errmsg(msg, doc, pos, end=end))
self.msg = msg
self.doc = doc
self.pos = pos
self.end = end
self.lineno, self.colno = linecol(doc, pos)
if end is not None:
self.endlineno, self.endcolno = linecol(doc, end)
else:
self.endlineno, self.endcolno = None, None
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
#fmt = '{0}: line {1} column {2} (char {3})'
#return fmt.format(msg, lineno, colno, pos)
fmt = '%s: line %d column %d (char %d)'
return fmt % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
#fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
#return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at" % (terminator,)
#msg = "Invalid control character {0!r} at".format(terminator)
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\escape: " + repr(esc)
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
esc = s[end + 1:end + 5]
next_end = end + 5
if len(esc) != 4:
msg = "Invalid \\uXXXX escape"
raise JSONDecodeError(msg, s, end)
uni = int(esc, 16)
# Check for surrogate pair on UCS-4 systems
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise JSONDecodeError(msg, s, end)
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise JSONDecodeError(msg, s, end)
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
char = unichr(uni)
end = next_end
# Append the unescaped character
_append(char)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting : delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError("Expecting property name", s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise JSONDecodeError("Expecting object", s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting , delimiter", s, end)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise JSONDecodeError("No JSON object could be decoded", s, idx)
return obj, end
| |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A setup module for the GRPC Python package."""
from distutils import cygwinccompiler
from distutils import extension as _extension
from distutils import util
import os
import os.path
import pkg_resources
import platform
import re
import shlex
import shutil
import sys
import sysconfig
import setuptools
from setuptools.command import egg_info
# Redirect the manifest template from MANIFEST.in to PYTHON-MANIFEST.in.
egg_info.manifest_maker.template = 'PYTHON-MANIFEST.in'
PY3 = sys.version_info.major == 3
PYTHON_STEM = os.path.join('src', 'python', 'grpcio')
CORE_INCLUDE = ('include', '.',)
BORINGSSL_INCLUDE = (os.path.join('third_party', 'boringssl', 'include'),)
ZLIB_INCLUDE = (os.path.join('third_party', 'zlib'),)
CARES_INCLUDE = (
os.path.join('third_party', 'cares'),
os.path.join('third_party', 'cares', 'cares'),)
if 'linux' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_linux'),)
if 'darwin' in sys.platform:
CARES_INCLUDE += (os.path.join('third_party', 'cares', 'config_darwin'),)
README = os.path.join(PYTHON_STEM, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath(PYTHON_STEM))
# Break import-style to ensure we can actually find our in-repo dependencies.
import _spawn_patch
import commands
import grpc_core_dependencies
import grpc_version
_spawn_patch.monkeypatch_spawn()
LICENSE = 'Apache License 2.0'
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support. Even if this
# is set to false, if the script detects that the generated `.c` file isn't
# present, then it will still attempt to use Cython.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# Environment variable to determine whether or not to enable coverage analysis
# in Cython modules.
ENABLE_CYTHON_TRACING = os.environ.get(
'GRPC_PYTHON_ENABLE_CYTHON_TRACING', False)
# Environment variable specifying whether or not there's interest in setting up
# documentation building.
ENABLE_DOCUMENTATION_BUILD = os.environ.get(
'GRPC_PYTHON_ENABLE_DOCUMENTATION_BUILD', False)
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = ''
if 'win32' in sys.platform and sys.version_info < (3, 5):
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11'
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
elif 'win32' in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -D_PYTHON_MSVC'
elif "linux" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -std=c++11 -std=gnu99 -fvisibility=hidden -fno-wrapv -fno-exceptions'
elif "darwin" in sys.platform:
EXTRA_ENV_COMPILE_ARGS += ' -fvisibility=hidden -fno-wrapv -fno-exceptions'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = ''
if "linux" in sys.platform or "darwin" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -lpthread'
elif "win32" in sys.platform and sys.version_info < (3, 5):
msvcr = cygwinccompiler.get_msvcr()[0]
# TODO(atash) sift through the GCC specs to see if libstdc++ can have any
# influence on the linkage outcome on MinGW for non-C++ programs.
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
'-static'.format(msvcr=msvcr))
if "linux" in sys.platform:
EXTRA_ENV_LINK_ARGS += ' -Wl,-wrap,memcpy -static-libgcc'
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
CYTHON_EXTENSION_PACKAGE_NAMES = ()
CYTHON_EXTENSION_MODULE_NAMES = ('grpc._cython.cygrpc',)
CYTHON_HELPER_C_FILES = ()
CORE_C_FILES = tuple(grpc_core_dependencies.CORE_SOURCE_FILES)
if "win32" in sys.platform and "64bit" in platform.architecture()[0]:
CORE_C_FILES = filter(lambda x: 'third_party/cares' not in x, CORE_C_FILES)
EXTENSION_INCLUDE_DIRECTORIES = (
(PYTHON_STEM,) + CORE_INCLUDE + BORINGSSL_INCLUDE + ZLIB_INCLUDE +
CARES_INCLUDE)
EXTENSION_LIBRARIES = ()
if "linux" in sys.platform:
EXTENSION_LIBRARIES += ('rt',)
if not "win32" in sys.platform:
EXTENSION_LIBRARIES += ('m',)
if "win32" in sys.platform:
EXTENSION_LIBRARIES += ('advapi32', 'ws2_32',)
DEFINE_MACROS = (
('OPENSSL_NO_ASM', 1), ('_WIN32_WINNT', 0x600),
('GPR_BACKWARDS_COMPATIBILITY_MODE', 1),)
if "win32" in sys.platform:
DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1), ('CARES_STATICLIB', 1),)
if '64bit' in platform.architecture()[0]:
# TODO(zyc): Re-enble c-ares on x64 windows after fixing the
# ares_library_init compilation issue
DEFINE_MACROS += (('MS_WIN64', 1), ('GRPC_ARES', 0),)
elif sys.version_info >= (3, 5):
# For some reason, this is needed to get access to inet_pton/inet_ntop
# on msvc, but only for 32 bits
DEFINE_MACROS += (('NTDDI_VERSION', 0x06000000),)
else:
DEFINE_MACROS += (('HAVE_CONFIG_H', 1),)
LDFLAGS = tuple(EXTRA_LINK_ARGS)
CFLAGS = tuple(EXTRA_COMPILE_ARGS)
if "linux" in sys.platform or "darwin" in sys.platform:
pymodinit_type = 'PyObject*' if PY3 else 'void'
pymodinit = '__attribute__((visibility ("default"))) {}'.format(pymodinit_type)
DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python3 was built with.
# For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
if 'darwin' in sys.platform and PY3:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.7.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.7'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)',
r'macosx-10.7-\1',
util.get_platform())
def cython_extensions_and_necessity():
cython_module_files = [os.path.join(PYTHON_STEM,
name.replace('.', '/') + '.pyx')
for name in CYTHON_EXTENSION_MODULE_NAMES]
config = os.environ.get('CONFIG', 'opt')
prefix = 'libs/' + config + '/'
if "darwin" in sys.platform:
extra_objects = [prefix + 'libares.a',
prefix + 'libboringssl.a',
prefix + 'libgpr.a',
prefix + 'libgrpc.a']
core_c_files = []
else:
core_c_files = list(CORE_C_FILES)
extra_objects = []
extensions = [
_extension.Extension(
name=module_name,
sources=[module_file] + list(CYTHON_HELPER_C_FILES) + core_c_files,
include_dirs=list(EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(EXTENSION_LIBRARIES),
define_macros=list(DEFINE_MACROS),
extra_objects=extra_objects,
extra_compile_args=list(CFLAGS),
extra_link_args=list(LDFLAGS),
) for (module_name, module_file) in zip(list(CYTHON_EXTENSION_MODULE_NAMES), cython_module_files)
]
need_cython = BUILD_WITH_CYTHON
if not BUILD_WITH_CYTHON:
need_cython = need_cython or not commands.check_and_update_cythonization(extensions)
return commands.try_cythonize(extensions, linetracing=ENABLE_CYTHON_TRACING, mandatory=BUILD_WITH_CYTHON), need_cython
CYTHON_EXTENSION_MODULES, need_cython = cython_extensions_and_necessity()
PACKAGE_DIRECTORIES = {
'': PYTHON_STEM,
}
INSTALL_REQUIRES = (
'six>=1.5.2',
# TODO(atash): eventually split the grpcio package into a metapackage
# depending on protobuf and the runtime component (independent of protobuf)
'protobuf>=3.3.0',
)
if not PY3:
INSTALL_REQUIRES += ('futures>=2.2.0', 'enum34>=1.0.4')
SETUP_REQUIRES = INSTALL_REQUIRES + (
'sphinx>=1.3',
'sphinx_rtd_theme>=0.1.8',
'six>=1.10',
) if ENABLE_DOCUMENTATION_BUILD else ()
try:
import Cython
except ImportError:
if BUILD_WITH_CYTHON:
sys.stderr.write(
"You requested a Cython build via GRPC_PYTHON_BUILD_WITH_CYTHON, "
"but do not have Cython installed. We won't stop you from using "
"other commands, but the extension files will fail to build.\n")
elif need_cython:
sys.stderr.write(
'We could not find Cython. Setup may take 10-20 minutes.\n')
SETUP_REQUIRES += ('cython>=0.23',)
COMMAND_CLASS = {
'doc': commands.SphinxDocumentation,
'build_project_metadata': commands.BuildProjectMetadata,
'build_py': commands.BuildPy,
'build_ext': commands.BuildExt,
'gather': commands.Gather,
}
# Ensure that package data is copied over before any commands have been run:
credentials_dir = os.path.join(PYTHON_STEM, 'grpc', '_cython', '_credentials')
try:
os.mkdir(credentials_dir)
except OSError:
pass
shutil.copyfile(os.path.join('etc', 'roots.pem'),
os.path.join(credentials_dir, 'roots.pem'))
PACKAGE_DATA = {
# Binaries that may or may not be present in the final installation, but are
# mentioned here for completeness.
'grpc._cython': [
'_credentials/roots.pem',
'_windows/grpc_c.32.python',
'_windows/grpc_c.64.python',
],
}
PACKAGES = setuptools.find_packages(PYTHON_STEM)
setuptools.setup(
name='grpcio',
version=grpc_version.VERSION,
description='HTTP/2-based RPC framework',
author='The gRPC Authors',
author_email='grpc-io@googlegroups.com',
url='http://www.grpc.io',
license=LICENSE,
long_description=open(README).read(),
ext_modules=CYTHON_EXTENSION_MODULES,
packages=list(PACKAGES),
package_dir=PACKAGE_DIRECTORIES,
package_data=PACKAGE_DATA,
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES,
cmdclass=COMMAND_CLASS,
)
| |
"""
Network Users
=============
Manage the users configuration on network devices via the NAPALM proxy.
:codeauthor: Mircea Ulinic <ping@mirceaulinic.net>
:maturity: new
:depends: napalm
:platform: unix
Dependencies
------------
- :mod:`NAPALM proxy minion <salt.proxy.napalm>`
- :mod:`Users configuration management module <salt.modules.napalm_users>`
.. versionadded:: 2016.11.0
"""
import copy
import logging
import salt.utils.json
import salt.utils.napalm
log = logging.getLogger(__name__)
# ----------------------------------------------------------------------------------------------------------------------
# state properties
# ----------------------------------------------------------------------------------------------------------------------
__virtualname__ = "netusers"
# ----------------------------------------------------------------------------------------------------------------------
# global variables
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# property functions
# ----------------------------------------------------------------------------------------------------------------------
def __virtual__():
"""
NAPALM library must be installed for this module to work and run in a (proxy) minion.
"""
return salt.utils.napalm.virtual(__opts__, __virtualname__, __file__)
# ----------------------------------------------------------------------------------------------------------------------
# helper functions -- will not be exported
# ----------------------------------------------------------------------------------------------------------------------
def _retrieve_users():
"""Retrieves configured users"""
return __salt__["users.config"]()
def _ordered_dict_to_dict(probes):
"""."""
return salt.utils.json.loads(salt.utils.json.dumps(probes))
def _expand_users(device_users, common_users):
"""Creates a longer list of accepted users on the device."""
expected_users = copy.deepcopy(common_users)
expected_users.update(device_users)
return expected_users
def _check_users(users):
"""Checks if the input dictionary of users is valid."""
messg = ""
valid = True
for user, user_details in users.items():
if not user_details:
valid = False
messg += "Please provide details for username {user}.\n".format(user=user)
continue
if not (
isinstance(user_details.get("level"), int)
or 0 <= user_details.get("level") <= 15
):
# warn!
messg += (
"Level must be a integer between 0 and 15 for username {user}. Will"
" assume 0.\n".format(user=user)
)
return valid, messg
def _compute_diff(configured, expected):
"""Computes the differences between the actual config and the expected config"""
diff = {"add": {}, "update": {}, "remove": {}}
configured_users = set(configured.keys())
expected_users = set(expected.keys())
add_usernames = expected_users - configured_users
remove_usernames = configured_users - expected_users
common_usernames = expected_users & configured_users
add = {username: expected.get(username) for username in add_usernames}
remove = {username: configured.get(username) for username in remove_usernames}
update = {}
for username in common_usernames:
user_configuration = configured.get(username)
user_expected = expected.get(username)
if user_configuration == user_expected:
continue
update[username] = {}
for field, field_value in user_expected.items():
if user_configuration.get(field) != field_value:
update[username][field] = field_value
diff.update({"add": add, "update": update, "remove": remove})
return diff
def _set_users(users):
"""Calls users.set_users."""
return __salt__["users.set_users"](users, commit=False)
def _update_users(users):
"""Calls users.set_users."""
return __salt__["users.set_users"](users, commit=False)
def _delete_users(users):
"""Calls users.delete_users."""
return __salt__["users.delete_users"](users, commit=False)
# ----------------------------------------------------------------------------------------------------------------------
# callable functions
# ----------------------------------------------------------------------------------------------------------------------
def managed(name, users=None, defaults=None):
"""
Manages the configuration of the users on the device, as specified in the state SLS file. Users not defined in that
file will be removed whilst users not configured on the device, will be added.
SLS Example:
.. code-block:: yaml
netusers_example:
netusers.managed:
- users:
admin:
level: 15
password: $1$knmhgPPv$g8745biu4rb.Zf.IT.F/U1
sshkeys: []
restricted:
level: 1
password: $1$j34j5k4b$4d5SVjTiz1l.Zf.IT.F/K7
martin:
level: 15
password: ''
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg
jonathan:
level: 15
password: ''
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N
CLI Example:
.. code-block:: bash
salt 'edge01.kix01' state.sls router.users
Output example (raw python - can be reused in other modules):
.. code-block:: python
{
'netusers_|-netusers_example_|-netusers_example_|-managed': {
'comment': 'Configuration updated!',
'name': 'netusers_example',
'start_time': '10:57:08.678811',
'__id__': 'netusers_example',
'duration': 1620.982,
'__run_num__': 0,
'changes': {
'updated': {
'admin': {
'level': 15
},
'restricted': {
'level': 1
},
'martin': {
'sshkeys': [
'ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4vwWHh0w
JPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZKtCjO8LhbWCa+
X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87Oz1nKsKuNzm2csoUQlJ
trmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwxM570s35Of/vV0zoOccj753sXn
pvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t+wAAAIBURwSPZVElXe+9a43sF6M4ysT
7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0
bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v+zvMmv8KvQgHg'
]
}
},
'added': {
'jonathan': {
'password': '',
'sshkeys': [
'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgGR9zPkHG
ZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qaoqwpLB15GwLfEX
Bx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006xeHh7rv7HtXF6zH3WId
Uhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9xZBq6DBb+sESMS4s7nFcsruMo
edb+BAc3aww0naeWpogjSt+We7y2N'
],
'level': 15
}
},
'removed': {
}
},
'result': True
}
}
CLI Output:
.. code-block:: bash
edge01.kix01:
----------
ID: netusers_example
Function: netusers.managed
Result: True
Comment: Configuration updated!
Started: 11:03:31.957725
Duration: 1220.435 ms
Changes:
----------
added:
----------
jonathan:
----------
level:
15
password:
sshkeys:
- ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcgxE6HZF/xjFtIt0thEDKPjFJxW9BpZtTVstYbDgG
R9zPkHGZJT/j345jk345jk453jk43545j35nl3kln34n5kl4ghv3/JzWt/0Js5KZp/51KRNCs9O4t07qao
qwpLB15GwLfEXBx9dW26zc4O+hi6754trxcfghvjbo98765drt/LYIEg0KSQPWyJEK1g31gacbxN7Ab006
xeHh7rv7HtXF6zH3WIdUhq9rtdUag6kYnv6qvjG7sbCyHGYu5vZB7GytnNuVNbZuI+RdFvmHSnErV9HCu9
xZBq6DBb+sESMS4s7nFcsruMoedb+BAc3aww0naeWpogjSt+We7y2N
removed:
----------
updated:
----------
martin:
----------
sshkeys:
- ssh-dss AAAAB3NzaC1kc3MAAACBAK9dP3KariMlM/JmFW9rTSm5cXs4nR0+o6fTHP9o+bOLXMBTP8R4
vwWHh0wJPjQmJYafAqZTnlgi0srGjyifFwPtODppDWLCgLe2M4LXnu3OMqknr54w344zPHP3iFwWxHrBrZ
KtCjO8LhbWCa+X528+i87t6r5e4ersdfxgchvjbknlio87t6r5drcfhgjhbknio8976tycv7t86ftyiu87
Oz1nKsKuNzm2csoUQlJtrmRfpjsOPNookmOz5wG0YxhwDmKeo6fWK+ATk1OiP+QT39fn4G77j8o+e4WAwx
M570s35Of/vV0zoOccj753sXnpvJenvwpM2H6o3a9ALvehAJKWodAgZT7X8+iu786r5drtycghvjbiu78t
+wAAAIBURwSPZVElXe+9a43sF6M4ysT7Xv+6wTsa8q86E3+RYyu8O2ObI2kwNLC3/HTgFniE/YqRG+WJac
81/VHWQNP822gns8RVrWKjqBktmQoEm7z5yy0bkjui78675dytcghvjkoi9y7t867ftcuvhbuu9t78gy/v
+zvMmv8KvQgHg
admin:
----------
level:
15
restricted:
----------
level:
1
Summary for edge01.kix01
------------
Succeeded: 1 (changed=1)
Failed: 0
------------
Total states run: 1
Total run time: 1.220 s
"""
result = False
comment = ""
changes = {}
ret = {"name": name, "changes": changes, "result": result, "comment": comment}
users = _ordered_dict_to_dict(users)
defaults = _ordered_dict_to_dict(defaults)
expected_users = _expand_users(users, defaults)
valid, message = _check_users(expected_users)
if not valid: # check and clean
ret["comment"] = "Please provide a valid configuration: {error}".format(
error=message
)
return ret
# ----- Retrieve existing users configuration and determine differences ------------------------------------------->
users_output = _retrieve_users()
if not users_output.get("result"):
ret["comment"] = "Cannot retrieve users from the device: {reason}".format(
reason=users_output.get("comment")
)
return ret
configured_users = users_output.get("out", {})
if configured_users == expected_users:
ret.update({"comment": "Users already configured as needed.", "result": True})
return ret
diff = _compute_diff(configured_users, expected_users)
users_to_add = diff.get("add", {})
users_to_update = diff.get("update", {})
users_to_remove = diff.get("remove", {})
changes = {
"added": users_to_add,
"updated": users_to_update,
"removed": users_to_remove,
}
ret.update({"changes": changes})
if __opts__["test"] is True:
ret.update(
{"result": None, "comment": "Testing mode: configuration was not changed!"}
)
return ret
# <---- Retrieve existing NTP peers and determine peers to be added/removed --------------------------------------->
# ----- Call _set_users and _delete_users as needed --------------------------------------------------------------->
expected_config_change = False
successfully_changed = True
if users_to_add:
_set = _set_users(users_to_add)
if _set.get("result"):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += "Cannot configure new users: {reason}".format(
reason=_set.get("comment")
)
if users_to_update:
_update = _update_users(users_to_update)
if _update.get("result"):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += "Cannot update the users configuration: {reason}".format(
reason=_update.get("comment")
)
if users_to_remove:
_delete = _delete_users(users_to_remove)
if _delete.get("result"):
expected_config_change = True
else: # something went wrong...
successfully_changed = False
comment += "Cannot remove users: {reason}".format(
reason=_delete.get("comment")
)
# <---- Call _set_users and _delete_users as needed ----------------------------------------------------------------
# ----- Try to commit changes ------------------------------------------------------------------------------------->
if expected_config_change and successfully_changed:
config_result, config_comment = __salt__["net.config_control"]()
result = config_result
comment += config_comment
# <---- Try to commit changes --------------------------------------------------------------------------------------
if expected_config_change and result and not comment:
comment = "Configuration updated!"
ret.update({"result": result, "comment": comment})
return ret
| |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""MB - the Meta-Build wrapper around GYP and GN
MB is a wrapper script for GYP and GN that can be used to generate build files
for sets of canned configurations and analyze them.
"""
from __future__ import print_function
import argparse
import ast
import errno
import json
import os
import pipes
import pprint
import shlex
import shutil
import sys
import subprocess
import tempfile
def main(args):
mbw = MetaBuildWrapper()
mbw.ParseArgs(args)
return mbw.args.func()
class MetaBuildWrapper(object):
def __init__(self):
p = os.path
d = os.path.dirname
self.chromium_src_dir = p.normpath(d(d(d(p.abspath(__file__)))))
self.default_config = p.join(self.chromium_src_dir, 'tools', 'mb',
'mb_config.pyl')
self.platform = sys.platform
self.args = argparse.Namespace()
self.configs = {}
self.masters = {}
self.mixins = {}
self.private_configs = []
self.common_dev_configs = []
self.unsupported_configs = []
def ParseArgs(self, argv):
def AddCommonOptions(subp):
subp.add_argument('-b', '--builder',
help='builder name to look up config from')
subp.add_argument('-m', '--master',
help='master name to look up config from')
subp.add_argument('-c', '--config',
help='configuration to analyze')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.add_argument('-g', '--goma-dir', default=self.ExpandUser('~/goma'),
help='path to goma directory (default is %(default)s).')
subp.add_argument('-n', '--dryrun', action='store_true',
help='Do a dry run (i.e., do nothing, just print '
'the commands that will run)')
subp.add_argument('-v', '--verbose', action='store_true',
help='verbose logging')
parser = argparse.ArgumentParser(prog='mb')
subps = parser.add_subparsers()
subp = subps.add_parser('analyze',
help='analyze whether changes to a set of files '
'will cause a set of binaries to be rebuilt.')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path build was generated into.')
subp.add_argument('input_path', nargs=1,
help='path to a file containing the input arguments '
'as a JSON object.')
subp.add_argument('output_path', nargs=1,
help='path to a file containing the output arguments '
'as a JSON object.')
subp.set_defaults(func=self.CmdAnalyze)
subp = subps.add_parser('gen',
help='generate a new set of build files')
AddCommonOptions(subp)
subp.add_argument('--swarming-targets-file',
help='save runtime dependencies for targets listed '
'in file.')
subp.add_argument('path', nargs=1,
help='path to generate build into')
subp.set_defaults(func=self.CmdGen)
subp = subps.add_parser('lookup',
help='look up the command for a given config or '
'builder')
AddCommonOptions(subp)
subp.set_defaults(func=self.CmdLookup)
subp = subps.add_parser('validate',
help='validate the config file')
subp.add_argument('-f', '--config-file', metavar='PATH',
default=self.default_config,
help='path to config file '
'(default is //tools/mb/mb_config.pyl)')
subp.set_defaults(func=self.CmdValidate)
subp = subps.add_parser('help',
help='Get help on a subcommand.')
subp.add_argument(nargs='?', action='store', dest='subcommand',
help='The command to get help for.')
subp.set_defaults(func=self.CmdHelp)
self.args = parser.parse_args(argv)
def CmdAnalyze(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
return self.RunGNAnalyze(vals)
elif vals['type'] == 'gyp':
return self.RunGYPAnalyze(vals)
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdGen(self):
vals = self.GetConfig()
self.ClobberIfNeeded(vals)
if vals['type'] == 'gn':
return self.RunGNGen(vals)
if vals['type'] == 'gyp':
return self.RunGYPGen(vals)
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
def CmdLookup(self):
vals = self.GetConfig()
if vals['type'] == 'gn':
cmd = self.GNCmd('gen', '<path>', vals['gn_args'])
elif vals['type'] == 'gyp':
if vals['gyp_crosscompile']:
self.Print('GYP_CROSSCOMPILE=1')
cmd = self.GYPCmd('<path>', vals['gyp_defines'], vals['gyp_config'])
else:
raise MBErr('Unknown meta-build type "%s"' % vals['type'])
self.PrintCmd(cmd)
return 0
def CmdHelp(self):
if self.args.subcommand:
self.ParseArgs([self.args.subcommand, '--help'])
else:
self.ParseArgs(['--help'])
def CmdValidate(self):
errs = []
# Read the file to make sure it parses.
self.ReadConfigFile()
# Figure out the whole list of configs and ensure that no config is
# listed in more than one category.
all_configs = {}
for config in self.common_dev_configs:
all_configs[config] = 'common_dev_configs'
for config in self.private_configs:
if config in all_configs:
errs.append('config "%s" listed in "private_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'private_configs'
for config in self.unsupported_configs:
if config in all_configs:
errs.append('config "%s" listed in "unsupported_configs" also '
'listed in "%s"' % (config, all_configs['config']))
else:
all_configs[config] = 'unsupported_configs'
for master in self.masters:
for builder in self.masters[master]:
config = self.masters[master][builder]
if config in all_configs and all_configs[config] not in self.masters:
errs.append('Config "%s" used by a bot is also listed in "%s".' %
(config, all_configs[config]))
else:
all_configs[config] = master
# Check that every referenced config actually exists.
for config, loc in all_configs.items():
if not config in self.configs:
errs.append('Unknown config "%s" referenced from "%s".' %
(config, loc))
# Check that every actual config is actually referenced.
for config in self.configs:
if not config in all_configs:
errs.append('Unused config "%s".' % config)
# Figure out the whole list of mixins, and check that every mixin
# listed by a config or another mixin actually exists.
referenced_mixins = set()
for config, mixins in self.configs.items():
for mixin in mixins:
if not mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by config "%s".' %
(mixin, config))
referenced_mixins.add(mixin)
for mixin in self.mixins:
for sub_mixin in self.mixins[mixin].get('mixins', []):
if not sub_mixin in self.mixins:
errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
(sub_mixin, mixin))
referenced_mixins.add(sub_mixin)
# Check that every mixin defined is actually referenced somewhere.
for mixin in self.mixins:
if not mixin in referenced_mixins:
errs.append('Unreferenced mixin "%s".' % mixin)
if errs:
raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
'\n ' + '\n '.join(errs))
self.Print('mb config file %s looks ok.' % self.args.config_file)
return 0
def GetConfig(self):
self.ReadConfigFile()
config = self.ConfigFromArgs()
if not config in self.configs:
raise MBErr('Config "%s" not found in %s' %
(config, self.args.config_file))
return self.FlattenConfig(config)
def ReadConfigFile(self):
if not self.Exists(self.args.config_file):
raise MBErr('config file not found at %s' % self.args.config_file)
try:
contents = ast.literal_eval(self.ReadFile(self.args.config_file))
except SyntaxError as e:
raise MBErr('Failed to parse config file "%s": %s' %
(self.args.config_file, e))
self.common_dev_configs = contents['common_dev_configs']
self.configs = contents['configs']
self.masters = contents['masters']
self.mixins = contents['mixins']
self.private_configs = contents['private_configs']
self.unsupported_configs = contents['unsupported_configs']
def ConfigFromArgs(self):
if self.args.config:
if self.args.master or self.args.builder:
raise MBErr('Can not specific both -c/--config and -m/--master or '
'-b/--builder')
return self.args.config
if not self.args.master or not self.args.builder:
raise MBErr('Must specify either -c/--config or '
'(-m/--master and -b/--builder)')
if not self.args.master in self.masters:
raise MBErr('Master name "%s" not found in "%s"' %
(self.args.master, self.args.config_file))
if not self.args.builder in self.masters[self.args.master]:
raise MBErr('Builder name "%s" not found under masters[%s] in "%s"' %
(self.args.builder, self.args.master, self.args.config_file))
return self.masters[self.args.master][self.args.builder]
def FlattenConfig(self, config):
mixins = self.configs[config]
vals = {
'type': None,
'gn_args': [],
'gyp_config': [],
'gyp_defines': '',
'gyp_crosscompile': False,
}
visited = []
self.FlattenMixins(mixins, vals, visited)
return vals
def FlattenMixins(self, mixins, vals, visited):
for m in mixins:
if m not in self.mixins:
raise MBErr('Unknown mixin "%s"' % m)
# TODO: check for cycles in mixins.
visited.append(m)
mixin_vals = self.mixins[m]
if 'type' in mixin_vals:
vals['type'] = mixin_vals['type']
if 'gn_args' in mixin_vals:
if vals['gn_args']:
vals['gn_args'] += ' ' + mixin_vals['gn_args']
else:
vals['gn_args'] = mixin_vals['gn_args']
if 'gyp_config' in mixin_vals:
vals['gyp_config'] = mixin_vals['gyp_config']
if 'gyp_crosscompile' in mixin_vals:
vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
if 'gyp_defines' in mixin_vals:
if vals['gyp_defines']:
vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
else:
vals['gyp_defines'] = mixin_vals['gyp_defines']
if 'mixins' in mixin_vals:
self.FlattenMixins(mixin_vals['mixins'], vals, visited)
return vals
def ClobberIfNeeded(self, vals):
path = self.args.path[0]
build_dir = self.ToAbsPath(path)
mb_type_path = os.path.join(build_dir, 'mb_type')
needs_clobber = False
new_mb_type = vals['type']
if self.Exists(build_dir):
if self.Exists(mb_type_path):
old_mb_type = self.ReadFile(mb_type_path)
if old_mb_type != new_mb_type:
self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
(old_mb_type, new_mb_type, path))
needs_clobber = True
else:
# There is no 'mb_type' file in the build directory, so this probably
# means that the prior build(s) were not done through mb, and we
# have no idea if this was a GYP build or a GN build. Clobber it
# to be safe.
self.Print("%s/mb_type missing, clobbering to be safe" % path)
needs_clobber = True
if needs_clobber:
self.RemoveDirectory(build_dir)
self.MaybeMakeDirectory(build_dir)
self.WriteFile(mb_type_path, new_mb_type)
def RunGNGen(self, vals):
path = self.args.path[0]
cmd = self.GNCmd('gen', path, vals['gn_args'])
swarming_targets = []
if self.args.swarming_targets_file:
# We need GN to generate the list of runtime dependencies for
# the compile targets listed (one per line) in the file so
# we can run them via swarming. We use ninja_to_gn.pyl to convert
# the compile targets to the matching GN labels.
contents = self.ReadFile(self.args.swarming_targets_file)
swarming_targets = contents.splitlines()
gn_isolate_map = ast.literal_eval(self.ReadFile(os.path.join(
self.chromium_src_dir, 'testing', 'buildbot', 'gn_isolate_map.pyl')))
gn_labels = []
for target in swarming_targets:
if not target in gn_isolate_map:
raise MBErr('test target "%s" not found in %s' %
(target, '//testing/buildbot/gn_isolate_map.pyl'))
gn_labels.append(gn_isolate_map[target]['label'])
gn_runtime_deps_path = self.ToAbsPath(path, 'runtime_deps')
# Since GN hasn't run yet, the build directory may not even exist.
self.MaybeMakeDirectory(self.ToAbsPath(path))
self.WriteFile(gn_runtime_deps_path, '\n'.join(gn_labels) + '\n')
cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
ret, _, _ = self.Run(cmd)
if ret:
# If `gn gen` failed, we should exit early rather than trying to
# generate isolates. Run() will have already logged any error output.
self.Print('GN gen failed: %d' % ret)
return ret
for target in swarming_targets:
if gn_isolate_map[target]['type'] == 'gpu_browser_test':
runtime_deps_target = 'browser_tests'
elif gn_isolate_map[target]['type'] == 'script':
# For script targets, the build target is usually a group,
# for which gn generates the runtime_deps next to the stamp file
# for the label, which lives under the obj/ directory.
label = gn_isolate_map[target]['label']
runtime_deps_target = 'obj/%s.stamp' % label.replace(':', '/')
else:
runtime_deps_target = target
if sys.platform == 'win32':
deps_path = self.ToAbsPath(path,
runtime_deps_target + '.exe.runtime_deps')
else:
deps_path = self.ToAbsPath(path,
runtime_deps_target + '.runtime_deps')
if not self.Exists(deps_path):
raise MBErr('did not generate %s' % deps_path)
command, extra_files = self.GetIsolateCommand(target, vals,
gn_isolate_map)
runtime_deps = self.ReadFile(deps_path).splitlines()
isolate_path = self.ToAbsPath(path, target + '.isolate')
self.WriteFile(isolate_path,
pprint.pformat({
'variables': {
'command': command,
'files': sorted(runtime_deps + extra_files),
}
}) + '\n')
self.WriteJSON(
{
'args': [
'--isolated',
self.ToSrcRelPath('%s%s%s.isolated' % (path, os.sep, target)),
'--isolate',
self.ToSrcRelPath('%s%s%s.isolate' % (path, os.sep, target)),
],
'dir': self.chromium_src_dir,
'version': 1,
},
isolate_path + 'd.gen.json',
)
return ret
def GNCmd(self, subcommand, path, gn_args=''):
if self.platform == 'linux2':
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'linux64',
'gn')
elif self.platform == 'darwin':
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'mac',
'gn')
else:
gn_path = os.path.join(self.chromium_src_dir, 'buildtools', 'win',
'gn.exe')
cmd = [gn_path, subcommand, path]
gn_args = gn_args.replace("$(goma_dir)", self.args.goma_dir)
if gn_args:
cmd.append('--args=%s' % gn_args)
return cmd
def RunGYPGen(self, vals):
path = self.args.path[0]
output_dir, gyp_config = self.ParseGYPConfigPath(path)
if gyp_config != vals['gyp_config']:
raise MBErr('The last component of the path (%s) must match the '
'GYP configuration specified in the config (%s), and '
'it does not.' % (gyp_config, vals['gyp_config']))
cmd = self.GYPCmd(output_dir, vals['gyp_defines'], config=gyp_config)
env = None
if vals['gyp_crosscompile']:
if self.args.verbose:
self.Print('Setting GYP_CROSSCOMPILE=1 in the environment')
env = os.environ.copy()
env['GYP_CROSSCOMPILE'] = '1'
ret, _, _ = self.Run(cmd, env=env)
return ret
def RunGYPAnalyze(self, vals):
output_dir, gyp_config = self.ParseGYPConfigPath(self.args.path[0])
if gyp_config != vals['gyp_config']:
raise MBErr('The last component of the path (%s) must match the '
'GYP configuration specified in the config (%s), and '
'it does not.' % (gyp_config, vals['gyp_config']))
if self.args.verbose:
inp = self.ReadInputJSON(['files', 'targets'])
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
cmd = self.GYPCmd(output_dir, vals['gyp_defines'], config=gyp_config)
cmd.extend(['-f', 'analyzer',
'-G', 'config_path=%s' % self.args.input_path[0],
'-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
ret, _, _ = self.Run(cmd)
if not ret and self.args.verbose:
outp = json.loads(self.ReadFile(self.args.output_path[0]))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return ret
def GetIsolateCommand(self, target, vals, gn_isolate_map):
# This needs to mirror the settings in //build/config/ui.gni:
# use_x11 = is_linux && !use_ozone.
# TODO(dpranke): Figure out how to keep this in sync better.
use_x11 = (sys.platform == 'linux2' and
not 'target_os="android"' in vals['gn_args'] and
not 'use_ozone=true' in vals['gn_args'])
asan = 'is_asan=true' in vals['gn_args']
msan = 'is_msan=true' in vals['gn_args']
tsan = 'is_tsan=true' in vals['gn_args']
executable_suffix = '.exe' if sys.platform == 'win32' else ''
test_type = gn_isolate_map[target]['type']
cmdline = []
extra_files = []
if use_x11 and test_type == 'windowed_test_launcher':
extra_files = [
'xdisplaycheck',
'../../testing/test_env.py',
'../../testing/xvfb.py',
]
cmdline = [
'../../testing/xvfb.py',
'.',
'./' + str(target),
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type in ('windowed_test_launcher', 'console_test_launcher'):
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
'./' + str(target) + executable_suffix,
'--brave-new-test-launcher',
'--test-launcher-bot-mode',
'--asan=%d' % asan,
'--msan=%d' % msan,
'--tsan=%d' % tsan,
]
elif test_type == 'gpu_browser_test':
extra_files = [
'../../testing/test_env.py'
]
gtest_filter = gn_isolate_map[target]['gtest_filter']
cmdline = [
'../../testing/test_env.py',
'./browser_tests' + executable_suffix,
'--test-launcher-bot-mode',
'--enable-gpu',
'--test-launcher-jobs=1',
'--gtest_filter=%s' % gtest_filter,
]
elif test_type == 'script':
extra_files = [
'../../testing/test_env.py'
]
cmdline = [
'../../testing/test_env.py',
] + ['../../' + self.ToSrcRelPath(gn_isolate_map[target]['script'])]
elif test_type in ('raw'):
extra_files = []
cmdline = [
'./' + str(target) + executable_suffix,
] + gn_isolate_map[target].get('args')
else:
self.WriteFailureAndRaise('No command line for %s found (test type %s).'
% (target, test_type), output_path=None)
return cmdline, extra_files
def ToAbsPath(self, build_path, *comps):
return os.path.join(self.chromium_src_dir,
self.ToSrcRelPath(build_path),
*comps)
def ToSrcRelPath(self, path):
"""Returns a relative path from the top of the repo."""
# TODO: Support normal paths in addition to source-absolute paths.
assert(path.startswith('//'))
return path[2:].replace('/', os.sep)
def ParseGYPConfigPath(self, path):
rpath = self.ToSrcRelPath(path)
output_dir, _, config = rpath.rpartition('/')
self.CheckGYPConfigIsSupported(config, path)
return output_dir, config
def CheckGYPConfigIsSupported(self, config, path):
if config not in ('Debug', 'Release'):
if (sys.platform in ('win32', 'cygwin') and
config not in ('Debug_x64', 'Release_x64')):
raise MBErr('Unknown or unsupported config type "%s" in "%s"' %
config, path)
def GYPCmd(self, output_dir, gyp_defines, config):
gyp_defines = gyp_defines.replace("$(goma_dir)", self.args.goma_dir)
cmd = [
sys.executable,
os.path.join('build', 'gyp_chromium'),
'-G',
'output_dir=' + output_dir,
'-G',
'config=' + config,
]
for d in shlex.split(gyp_defines):
cmd += ['-D', d]
return cmd
def RunGNAnalyze(self, vals):
# analyze runs before 'gn gen' now, so we need to run gn gen
# in order to ensure that we have a build directory.
ret = self.RunGNGen(vals)
if ret:
return ret
inp = self.ReadInputJSON(['files', 'targets'])
if self.args.verbose:
self.Print()
self.Print('analyze input:')
self.PrintJSON(inp)
self.Print()
output_path = self.args.output_path[0]
# Bail out early if a GN file was modified, since 'gn refs' won't know
# what to do about it.
if any(f.endswith('.gn') or f.endswith('.gni') for f in inp['files']):
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# Bail out early if 'all' was asked for, since 'gn refs' won't recognize it.
if 'all' in inp['targets']:
self.WriteJSON({'status': 'Found dependency (all)'}, output_path)
return 0
# This shouldn't normally happen, but could due to unusual race conditions,
# like a try job that gets scheduled before a patch lands but runs after
# the patch has landed.
if not inp['files']:
self.Print('Warning: No files modified in patch, bailing out early.')
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
return 0
ret = 0
response_file = self.TempFile()
response_file.write('\n'.join(inp['files']) + '\n')
response_file.close()
matching_targets = []
try:
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all', '--as=output']
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
build_dir = self.ToSrcRelPath(self.args.path[0]) + os.sep
for output in out.splitlines():
build_output = output.replace(build_dir, '')
if build_output in inp['targets']:
matching_targets.append(build_output)
cmd = self.GNCmd('refs', self.args.path[0]) + [
'@%s' % response_file.name, '--all']
ret, out, _ = self.Run(cmd, force_verbose=False)
if ret and not 'The input matches no targets' in out:
self.WriteFailureAndRaise('gn refs returned %d: %s' % (ret, out),
output_path)
for label in out.splitlines():
build_target = label[2:]
# We want to accept 'chrome/android:chrome_public_apk' and
# just 'chrome_public_apk'. This may result in too many targets
# getting built, but we can adjust that later if need be.
for input_target in inp['targets']:
if (input_target == build_target or
build_target.endswith(':' + input_target)):
matching_targets.append(input_target)
finally:
self.RemoveFile(response_file.name)
if matching_targets:
# TODO: it could be that a target X might depend on a target Y
# and both would be listed in the input, but we would only need
# to specify target X as a build_target (whereas both X and Y are
# targets). I'm not sure if that optimization is generally worth it.
self.WriteJSON({'targets': sorted(set(matching_targets)),
'build_targets': sorted(set(matching_targets)),
'status': 'Found dependency'}, output_path)
else:
self.WriteJSON({'targets': [],
'build_targets': [],
'status': 'No dependency'}, output_path)
if self.args.verbose:
outp = json.loads(self.ReadFile(output_path))
self.Print()
self.Print('analyze output:')
self.PrintJSON(outp)
self.Print()
return 0
def ReadInputJSON(self, required_keys):
path = self.args.input_path[0]
output_path = self.args.output_path[0]
if not self.Exists(path):
self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
try:
inp = json.loads(self.ReadFile(path))
except Exception as e:
self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
(path, e), output_path)
for k in required_keys:
if not k in inp:
self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
output_path)
return inp
def WriteFailureAndRaise(self, msg, output_path):
if output_path:
self.WriteJSON({'error': msg}, output_path, force_verbose=True)
raise MBErr(msg)
def WriteJSON(self, obj, path, force_verbose=False):
try:
self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
force_verbose=force_verbose)
except Exception as e:
raise MBErr('Error %s writing to the output path "%s"' %
(e, path))
def PrintCmd(self, cmd):
if cmd[0] == sys.executable:
cmd = ['python'] + cmd[1:]
self.Print(*[pipes.quote(c) for c in cmd])
def PrintJSON(self, obj):
self.Print(json.dumps(obj, indent=2, sort_keys=True))
def Print(self, *args, **kwargs):
# This function largely exists so it can be overridden for testing.
print(*args, **kwargs)
def Run(self, cmd, env=None, force_verbose=True):
# This function largely exists so it can be overridden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.PrintCmd(cmd)
if self.args.dryrun:
return 0, '', ''
ret, out, err = self.Call(cmd, env=env)
if self.args.verbose or force_verbose:
if out:
self.Print(out, end='')
if err:
self.Print(err, end='', file=sys.stderr)
return ret, out, err
def Call(self, cmd, env=None):
p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
return p.returncode, out, err
def ExpandUser(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.expanduser(path)
def Exists(self, path):
# This function largely exists so it can be overridden for testing.
return os.path.exists(path)
def MaybeMakeDirectory(self, path):
try:
os.makedirs(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def ReadFile(self, path):
# This function largely exists so it can be overriden for testing.
with open(path) as fp:
return fp.read()
def RemoveFile(self, path):
# This function largely exists so it can be overriden for testing.
os.remove(path)
def RemoveDirectory(self, abs_path):
if sys.platform == 'win32':
# In other places in chromium, we often have to retry this command
# because we're worried about other processes still holding on to
# file handles, but when MB is invoked, it will be early enough in the
# build that their should be no other processes to interfere. We
# can change this if need be.
self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
else:
shutil.rmtree(abs_path, ignore_errors=True)
def TempFile(self, mode='w'):
# This function largely exists so it can be overriden for testing.
return tempfile.NamedTemporaryFile(mode=mode, delete=False)
def WriteFile(self, path, contents, force_verbose=False):
# This function largely exists so it can be overriden for testing.
if self.args.dryrun or self.args.verbose or force_verbose:
self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
with open(path, 'w') as fp:
return fp.write(contents)
class MBErr(Exception):
pass
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except MBErr as e:
print(e)
sys.exit(1)
except KeyboardInterrupt:
print("interrupted, exiting", stream=sys.stderr)
sys.exit(130)
| |
# Python Profiler v3
# Copyright (c) 2015-2017 David R Walker
# TODO:
# [x] Record only functions in StackLines
# [ ] Handle per-line hotspots as separate structure (not nested) - ?
# [ ] Handle timeline as separate structure
# [x] Use unique stack IDs to dedupe stack tuples
# [ ] Merge profile data method
# [ ] add custom metadata values to profile data (e.g. url, op, user id) for filtering / grouping
# [ ] filter/merge profile data by metadata
# [x] Expose randomize parameter for stochastic sampling
# [x] Add rate control (remove interval)
# - is this more or less misleading if we don't adjust for profiler overhead to achieve rate?
# - not adjusting for drift might be handy for estimating profiler performance/overheads
# [x] Finish linux platform driver (get thread CPU times seems to be unfinished!!)
# [ ] Windows platform driver
# [ ] Tidy up platform drivers and make a nice platform choosing function
# [ ] Convert into proper Python module + split into submodules
# [ ] Basic (temp) dump function (flat) - replace with proper collated version from stack tree
# [ ] Filter out long tail option (collate items with low ticks as 'Other') to remove noise
# [ ] Post process to build stack/call graph (have exporters work from this graph instead of raw data) - ?
# [ ] Record process ID in addition to thread?
# [ ] Option to merge processes
# [ ] Option to merge threads
# [ ] Test performance / optimize on various platforms
# [ ] Serialize (+append?) to file (lock file?)
# [ ] Load from file
# [ ] HTML5 exporter with drill-down
# [ ] Import/exporter framework
# [ ] Export to standard profiler formats (e.g. python, callgrind, firefox ThreadProfile json)
# [ ] Make Python 3 compatible
# [ ] Decorator to wrap a function with profiler
# [ ] Function to watch a function in profiler? (e.g. store code object in dict and check)
# [ ] Option to filter out standard (and custom) libraries? (path prefixes?)
# [ ] Figure out how to play nicely with time.sleep(), etc. - do we need to patch it?
# - EINTR / silent signal interrupts
# - breaks sleep/timeout behaviour in programs - provide optional monkey patches?
# - or just accept that signals break waits, and is fixed eventually by PEP475
# ('serious' code should be handling EINTR anyway?)
# [ ] Figure out how to avoid having to patch thread, wherever possible
# - maybe spawn a test thread on module import to detect if thread IDs match ?
# [x] Make interval private on profiler (or don't store)
# [x] Move all running time stats etc. into _profile_data - already done
import os
import time
import random
from contextlib import contextmanager
# - Scheduler ------------------------------------------------------------------
# Base class for repeated periodic function call
class IntervalScheduler(object):
default_rate = 1
def __init__(self, interval_func, interval=0.01, stochastic=False, func_args=(), func_kwargs={}):
self.interval = interval
self._random = None
if stochastic:
# Our own Random to avoid side effects on shared PRNG
self._random = random.Random()
self._running = False
self._interval_func = interval_func
self._func_args = func_args
self._func_kwargs = func_kwargs
self._init()
def start(self):
if not self.is_running():
self._start()
self._running = True
def stop(self):
if self.is_running():
self._stop()
self._running = False
def is_running(self):
return self._running
def get_next_interval(self):
if self._random:
return (2.0 * self._random.random() * self.interval)
else:
return self.interval
def tick(self, frame):
self._interval_func(*self._func_args, _interrupted_frame=frame, **self._func_kwargs)
# Sub-classes should override the following methods to implement a scheduler
# that will call self.tick() every self.interval seconds.
# If the scheduler interupts a Python frame, it should pass the frame that was
# interrupted to tick(), otherwise it should pass in None.
def _init(self):
pass
def _start(self):
raise NotImplementedError()
def _stop(self):
raise NotImplementedError()
# Uses a separate sleeping thread, which wakes periodically and calls self.tick()
class ThreadIntervalScheduler(IntervalScheduler):
default_rate = 100
def _init(self):
import threading
self._thread = None
self._stopping = False
self._event = threading.Event()
def _start(self):
import threading
self._event.clear()
def thread_func():
while not self._event.is_set():
self._event.wait(timeout=self.get_next_interval())
self.tick(None)
self._thread = threading.Thread(target=thread_func, name='profiler')
self._thread.daemon = True
self._thread.start()
def _stop(self):
self._event.set()
self._thread.join()
self._stopping = False
import signal
# Signals the main thread every interval, which calls the tick() method when
# the timer event is triggered.
# Note that signal handlers are blocked during system calls, library calls, etc.
# in the main thread.
# We compensate for this by keeping track of real, user cpu, and system cpu
# usage between ticks on each thread.
# We prefer ITIMER_REAL, because that will be triggered immediately upon
# returning from a long-blocking system call, so we can add the ticks to the
# most appropriate function.
# However, if the main thread is blocked for a significant period, this will
# reduce the accuracy of samples in other threads, because only the main
# thread handles signals. In such situations, the ThreadIntervalScheduler might
# be more accurate.
# We don't specify an interval and reschedule the next tick ourselves. This
# allows us to dynamically change the sample interval to avoid aliasing, and
# prevents the signal interrupting itself, which can lead to stack errors,
# some strange behaviour when threads are being join()ed, and polluting the
# profile data with stack data from the profiler.
class SignalIntervalScheduler(IntervalScheduler):
default_rate = 1000
timer = signal.ITIMER_REAL
signal = signal.SIGALRM
def _start(self):
def signal_handler(signum, frame):
self.tick(frame)
if self._run:
signal.setitimer(self.timer, self.get_next_interval(), 0)
signal.signal(self.signal, signal_handler)
signal.siginterrupt(self.signal, False)
self._run = True
signal.setitimer(self.timer, self.get_next_interval(), 0)
def _stop(self):
self._run = False
signal.setitimer(self.timer, 0, 0)
# - Platform-specific stuff ----------------------------------------------------
import thread
import threading
class ThreadPlatform(object):
def __init__(self):
self.name = ''
self.lock = threading.Lock()
self._registered_threads = {}
self._original_start_new_thread = thread.start_new_thread
self.platform_init()
def _patch_thread(self):
assert threading.current_thread().name == 'MainThread'
with self.lock:
self._registered_threads[threading.current_thread().ident] = self.get_current_thread_id()
def start_new_thread_wrapper(func, args, kwargs={}):
def thread_func(func, args, kwargs):
system_tid = self.get_current_thread_id()
with self.lock:
self._registered_threads[threading.current_thread().ident] = system_tid
return func(*args, **kwargs)
return self._original_start_new_thread(thread_func, (func, args, kwargs))
thread.start_new_thread = start_new_thread_wrapper
threading._start_new_thread = start_new_thread_wrapper
def _unpatch_thread(self):
with self.lock:
self._registered_threads = {}
thread.start_new_thread = _original_start_new_thread
threading._start_new_thread = _original_start_new_thread
def _get_patched_thread_id(self, python_ident):
#with self.lock:
return self._registered_threads.get(python_ident)
def platform_init(self):
raise NotImplementedError()
def get_thread_id_from_python_ident(self, python_ident):
raise NotImplementedError()
def get_current_thread_id(self):
raise NotImplementedError()
def get_thread_cpu_time(self, thread_id=None):
raise NotImplementedError()
# Single-threaded CPU times using os.times(),
# which actually gives CPU times for the whole
# process.
# Will give bad results if there are actually
# other threads running!
class SingleThreadedPlatform(ThreadPlatform):
def platform_init(self):
pass
def get_thread_id_from_python_ident(self):
return 0
def get_current_thread_id(self):
return 0
def get_thread_cpu_time(self, thread_id=None):
time_info = os.times()
return time_info[0] + time_info[1]
class MacPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('libc'))
self._mach_thread_self = libc.mach_thread_self
self._mach_thread_self.restype = ctypes.c_uint
# TODO: check these field definitions
class time_value_t(ctypes.Structure):
_fields_ = [
("seconds", ctypes.c_int),
("microseconds",ctypes.c_int)
]
class thread_basic_info(ctypes.Structure):
_fields_ = [
("user_time", time_value_t),
("system_time",time_value_t),
("cpu_usage",ctypes.c_int),
("policy",ctypes.c_int),
("run_state",ctypes.c_int),
("flags",ctypes.c_int),
("suspend_count",ctypes.c_int),
("sleep_time",ctypes.c_int)
]
thread_info = libc.thread_info
thread_info.restype = ctypes.c_int
thread_info.argtypes = [
ctypes.c_uint,
ctypes.c_int,
ctypes.POINTER(thread_basic_info),
ctypes.POINTER(ctypes.c_uint)
]
self._thread_info = thread_info
self._THREAD_BASIC_INFO = 3
self._out_info = thread_basic_info()
self._count = ctypes.c_uint(ctypes.sizeof(self._out_info) / ctypes.sizeof(ctypes.c_uint))
self._patch_thread()
def get_thread_id_from_python_ident(self, python_ident):
return self._get_patched_thread_id(python_ident)
def get_current_thread_id(self):
return self._mach_thread_self()
def get_thread_cpu_time(self, python_ident=None):
import ctypes
# TODO: Optimize with shared structs, sizes, to minimize allocs per tick
if python_ident is None:
thread_id = self.get_current_thread_id()
else:
thread_id = self.get_thread_id_from_python_ident(python_ident)
out_info = self._out_info
result = self._thread_info(
thread_id,
self._THREAD_BASIC_INFO,
ctypes.byref(out_info),
ctypes.byref(self._count),
)
if result != 0:
return 0.0
user_time = out_info.user_time.seconds + out_info.user_time.microseconds / 1000000.0
system_time = out_info.system_time.seconds + out_info.system_time.microseconds / 1000000.0
return user_time + system_time
class LinuxPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
pthread = ctypes.CDLL(ctypes.util.find_library('pthread'))
libc = ctypes.CDLL(ctypes.util.find_library('c'))
pthread_t = ctypes.c_ulong
clockid_t = ctypes.c_long
time_t = ctypes.c_long
NANOSEC = 1.0 / 1e9
CLOCK_THREAD_CPUTIME_ID = 3 # from linux/time.h
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t),
('tv_nsec', ctypes.c_long),
]
# wrap pthread_self()
pthread_self = pthread.pthread_self
pthread.argtypes = []
pthread_self.restype = pthread_t
# wrap pthread_getcpuclockid()
pthread_getcpuclockid = pthread.pthread_getcpuclockid
pthread_getcpuclockid.argtypes = [pthread_t, ctypes.POINTER(clockid_t)]
pthread_getcpuclockid.restype = clockid_t
# wrap clock_gettime()
clock_gettime = libc.clock_gettime
clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
clock_gettime.restype = ctypes.c_int
def get_current_thread_id():
return pthread_self()
def get_thread_cpu_time(thread_id=None):
if thread_id is None:
thread_id = pthread_self()
# First, get the thread's CPU clock ID
clock_id = clockid_t()
error = pthread_getcpuclockid(thread_id, ctypes.pointer(clock_id))
if error:
return None
# Now get time from clock...
result = timespec()
error = clock_gettime(clock_id, ctypes.pointer(result))
if error:
return None
cpu_time = result.tv_sec + result.tv_nsec * NANOSEC
return cpu_time
self._get_current_thread_id = get_current_thread_id
self._get_thread_cpu_time = get_thread_cpu_time
def get_current_thread_id(self):
return self._get_current_thread_id()
def get_thread_cpu_time(thread_id=None):
return self._get_thread_cpu_time(thread_id)
import sys
if sys.platform == 'darwin':
thread_platform = MacPThreadPlatform()
elif sys.platform == 'linux':
thread_platform = LinuxPThreadPlatform()
# TODO: Windows support
else:
try:
import thread
except ImportError:
pass
else:
import warnings
warnings.warn('Multi-threaded CPU times not supported on this platform!')
thread_platform = SingleThreadedPlatform()
# - Sample data ----------------------------------------------------------------
import collections
StackLine = collections.namedtuple('StackLine', ['type', 'name', 'file', 'line', 'data'])
def stack_line_from_frame(frame, stype='func', data=None):
code = frame.f_code
return StackLine(stype, code.co_name, code.co_filename, code.co_firstlineno, data)
class SampleData(object):
__slots__ = ['rtime', 'cputime', 'ticks']
def __init__(self):
self.rtime = 0.0 # Real / wall-clock time
self.cputime = 0.0 # User CPU time (single thread)
self.ticks = 0 # Actual number of samples
def __str__(self):
return 'SampleData<r=%.3f, cpu=%.3f, t=%d>' % (
self.rtime,
self.cputime,
self.ticks
)
def __repr__(self):
return str(self)
class RawProfileData(object):
def __init__(self):
self.stack_line_id_map = {} # Maps StackLines to IDs
self.stack_tuple_id_map = {} # Map tuples of StackLine IDs to IDs
self.stack_data = {} # Maps stack ID tuples to SampleData
self.time_running = 0.0 # Total amount of time sampling has been active
self.total_ticks = 0 # Total number of samples we've taken
def add_sample_data(self, stack_list, rtime, cputime, ticks):
sm = self.stack_line_id_map
sd = self.stack_line_id_map.setdefault
stack_tuple = tuple(
sd(stack_line, len(sm))
for stack_line in stack_list
)
stack_tuple_id = self.stack_tuple_id_map.setdefault(
stack_tuple,
len(self.stack_tuple_id_map),
)
if stack_tuple_id in self.stack_data:
sample_data = self.stack_data[stack_tuple_id]
else:
sample_data = self.stack_data[stack_tuple_id] = SampleData()
sample_data.rtime += rtime
sample_data.cputime += cputime
sample_data.ticks += ticks
self.total_ticks += ticks
def dump(self, sort='rtime'):
assert sort in SampleData.__slots__
# Quick util function to dump raw data in a vaguely-useful format
# TODO: replace with proper text exporter with sort parameters, etc.
print '%s:\n\n %d samples taken in %.3fs:\n' % (
self.__class__.__name__,
self.total_ticks,
self.time_running,
)
print ' Ordered by: %s\n' % sort
# Invert stack -> ID map
stack_line_map = dict([
(v, k)
for k, v
in self.stack_line_id_map.items()
])
stack_map = dict([
(v, k)
for k, v
in self.stack_tuple_id_map.items()
])
lines = [
(getattr(sample_data, sort), stack_id, sample_data)
for stack_id, sample_data
in self.stack_data.items()
]
lines.sort()
lines.reverse()
print ' ticks rtime cputime filename:lineno(function)'
for _, stack_id, sample_data in lines:
stack = stack_map[stack_id]
stack_line = stack_line_map[stack[0]]
print ' %7d % 8.3f % 8.3f %s:%d(%s) : %r' % (
sample_data.ticks,
sample_data.rtime,
sample_data.cputime,
os.path.basename(stack_line.file),
stack_line.line,
stack_line.name,
stack,
)
print
class ThreadClock(object):
__slots__ = ['rtime', 'cputime']
def __init__(self):
self.rtime = 0.0
self.cputime = 0.0
class Profiler(object):
_scheduler_map = {
'signal':SignalIntervalScheduler,
'thread':ThreadIntervalScheduler
}
def __init__(
self,
scheduler_type='signal', # Which scheduler to use
collect_stacks=True, # Collect full call-tree data?
rate=None,
stochastic=False,
):
self.collect_stacks = collect_stacks
assert (
scheduler_type in self._scheduler_map
or isinstance(scheduler_type, IntervalScheduler)
), 'Unknown scheduler type'
self.scheduler_type = scheduler_type
if isinstance(scheduler_type, str):
scheduler_type = self._scheduler_map[scheduler_type]
if rate is None:
rate = scheduler_type.default_rate
self._scheduler = scheduler_type(
self.sample,
interval=1.0/rate,
stochastic=stochastic,
)
self.reset()
def reset(self):
self._profile_data = RawProfileData()
self._thread_clocks = {} # Maps from thread ID to ThreadClock
self._last_tick = 0
self.total_samples = 0
self.sampling_time = 0.0
self._empty_stack = [StackLine(None, 'null', '', 0, None)]
self._start_time = 0.0
def sample(self, _interrupted_frame=None):
sample_time = time.time()
current_frames = sys._current_frames()
current_thread = thread.get_ident()
for thread_ident, frame in current_frames.items():
if thread_ident == current_thread:
frame = _interrupted_frame
if frame is not None:
# 1.7 %
stack = [stack_line_from_frame(frame)]
if self.collect_stacks:
frame = frame.f_back
while frame is not None:
stack.append(stack_line_from_frame(frame))
frame = frame.f_back
stack.append(StackLine('thread', str(thread_ident), '', 0, None)) # todo: include thread name?
# todo: include PID?
# todo: include custom metadata/labels?
# 2.0 %
if thread_ident in self._thread_clocks:
thread_clock = self._thread_clocks[thread_ident]
cputime = thread_platform.get_thread_cpu_time(thread_ident)
else:
thread_clock = self._thread_clocks[thread_ident] = ThreadClock()
cputime = thread_platform.get_thread_cpu_time(thread_ident)
# ~5.5%
self._profile_data.add_sample_data(
stack,
sample_time - self.last_tick,
cputime - thread_clock.cputime,
1
)
thread_clock.cputime = cputime
else:
self._profile_data.add_sample_data(
self._empty_stack, sample_time - self.last_tick, 0.0, 1
)
self.last_tick = sample_time
self.total_samples += 1
self.sampling_time += time.time() - sample_time
def start(self):
import threading
# reset thread clocks...
self._thread_clocks = {}
for thread in threading.enumerate():
thread_clock = ThreadClock()
self._thread_clocks[thread.ident] = thread_clock
cputime = thread_platform.get_thread_cpu_time(thread.ident)
thread_clock.cputime = cputime
self._start_time = self.last_tick = time.time()
self._scheduler.start()
@contextmanager
def activated(self):
try:
self.start()
yield self
finally:
self.stop()
def stop(self):
self._scheduler.stop()
self._profile_data.time_running += time.time() - self._start_time
self._start_time = 0.0
def busy(rate=100):
import time
profiler = Profiler(rate=rate)
with profiler.activated():
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
return profiler
| |
"""Support for Hass.io."""
from datetime import timedelta
import logging
import os
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.homeassistant import SERVICE_CHECK_CONFIG
import homeassistant.config as conf_util
from homeassistant.const import (
ATTR_NAME,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_HOMEASSISTANT_STOP,
EVENT_CORE_CONFIG_UPDATE,
)
from homeassistant.core import DOMAIN as HASS_DOMAIN, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from homeassistant.util.dt import utcnow
from .auth import async_setup_auth_view
from .addon_panel import async_setup_addon_panel
from .discovery import async_setup_discovery_view
from .handler import HassIO, HassioAPIError
from .http import HassIOView
from .ingress import async_setup_ingress_view
_LOGGER = logging.getLogger(__name__)
DOMAIN = "hassio"
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
CONF_FRONTEND_REPO = "development_repo"
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): vol.Schema({vol.Optional(CONF_FRONTEND_REPO): cv.isdir})},
extra=vol.ALLOW_EXTRA,
)
DATA_HOMEASSISTANT_VERSION = "hassio_hass_version"
HASSIO_UPDATE_INTERVAL = timedelta(minutes=55)
SERVICE_ADDON_START = "addon_start"
SERVICE_ADDON_STOP = "addon_stop"
SERVICE_ADDON_RESTART = "addon_restart"
SERVICE_ADDON_STDIN = "addon_stdin"
SERVICE_HOST_SHUTDOWN = "host_shutdown"
SERVICE_HOST_REBOOT = "host_reboot"
SERVICE_SNAPSHOT_FULL = "snapshot_full"
SERVICE_SNAPSHOT_PARTIAL = "snapshot_partial"
SERVICE_RESTORE_FULL = "restore_full"
SERVICE_RESTORE_PARTIAL = "restore_partial"
ATTR_ADDON = "addon"
ATTR_INPUT = "input"
ATTR_SNAPSHOT = "snapshot"
ATTR_ADDONS = "addons"
ATTR_FOLDERS = "folders"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_PASSWORD = "password"
SCHEMA_NO_DATA = vol.Schema({})
SCHEMA_ADDON = vol.Schema({vol.Required(ATTR_ADDON): cv.slug})
SCHEMA_ADDON_STDIN = SCHEMA_ADDON.extend(
{vol.Required(ATTR_INPUT): vol.Any(dict, cv.string)}
)
SCHEMA_SNAPSHOT_FULL = vol.Schema(
{vol.Optional(ATTR_NAME): cv.string, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_SNAPSHOT_PARTIAL = SCHEMA_SNAPSHOT_FULL.extend(
{
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
SCHEMA_RESTORE_FULL = vol.Schema(
{vol.Required(ATTR_SNAPSHOT): cv.slug, vol.Optional(ATTR_PASSWORD): cv.string}
)
SCHEMA_RESTORE_PARTIAL = SCHEMA_RESTORE_FULL.extend(
{
vol.Optional(ATTR_HOMEASSISTANT): cv.boolean,
vol.Optional(ATTR_FOLDERS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_ADDONS): vol.All(cv.ensure_list, [cv.string]),
}
)
MAP_SERVICE_API = {
SERVICE_ADDON_START: ("/addons/{addon}/start", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STOP: ("/addons/{addon}/stop", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_RESTART: ("/addons/{addon}/restart", SCHEMA_ADDON, 60, False),
SERVICE_ADDON_STDIN: ("/addons/{addon}/stdin", SCHEMA_ADDON_STDIN, 60, False),
SERVICE_HOST_SHUTDOWN: ("/host/shutdown", SCHEMA_NO_DATA, 60, False),
SERVICE_HOST_REBOOT: ("/host/reboot", SCHEMA_NO_DATA, 60, False),
SERVICE_SNAPSHOT_FULL: ("/snapshots/new/full", SCHEMA_SNAPSHOT_FULL, 300, True),
SERVICE_SNAPSHOT_PARTIAL: (
"/snapshots/new/partial",
SCHEMA_SNAPSHOT_PARTIAL,
300,
True,
),
SERVICE_RESTORE_FULL: (
"/snapshots/{snapshot}/restore/full",
SCHEMA_RESTORE_FULL,
300,
True,
),
SERVICE_RESTORE_PARTIAL: (
"/snapshots/{snapshot}/restore/partial",
SCHEMA_RESTORE_PARTIAL,
300,
True,
),
}
@callback
@bind_hass
def get_homeassistant_version(hass):
"""Return latest available Home Assistant version.
Async friendly.
"""
return hass.data.get(DATA_HOMEASSISTANT_VERSION)
@callback
@bind_hass
def is_hassio(hass):
"""Return true if hass.io is loaded.
Async friendly.
"""
return DOMAIN in hass.config.components
async def async_setup(hass, config):
"""Set up the Hass.io component."""
# Check local setup
for env in ("HASSIO", "HASSIO_TOKEN"):
if os.environ.get(env):
continue
_LOGGER.error("Missing %s environment variable.", env)
return False
host = os.environ["HASSIO"]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hass.data[DOMAIN] = hassio = HassIO(hass.loop, websession, host)
if not await hassio.is_connected():
_LOGGER.warning("Not connected with Hass.io / system to busy!")
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
data = await store.async_load()
if data is None:
data = {}
refresh_token = None
if "hassio_user" in data:
user = await hass.auth.async_get_user(data["hassio_user"])
if user and user.refresh_tokens:
refresh_token = list(user.refresh_tokens.values())[0]
# Migrate old hass.io users to be admin.
if not user.is_admin:
await hass.auth.async_update_user(user, group_ids=[GROUP_ID_ADMIN])
if refresh_token is None:
user = await hass.auth.async_create_system_user("Hass.io", [GROUP_ID_ADMIN])
refresh_token = await hass.auth.async_create_refresh_token(user)
data["hassio_user"] = user.id
await store.async_save(data)
# This overrides the normal API call that would be forwarded
development_repo = config.get(DOMAIN, {}).get(CONF_FRONTEND_REPO)
if development_repo is not None:
hass.http.register_static_path(
"/api/hassio/app", os.path.join(development_repo, "hassio/build"), False
)
hass.http.register_view(HassIOView(host, websession))
if "frontend" in hass.config.components:
await hass.components.panel_custom.async_register_panel(
frontend_url_path="hassio",
webcomponent_name="hassio-main",
sidebar_title="Hass.io",
sidebar_icon="hass:home-assistant",
js_url="/api/hassio/app/entrypoint.js",
embed_iframe=True,
require_admin=True,
)
await hassio.update_hass_api(config.get("http", {}), refresh_token.token)
async def push_config(_):
"""Push core config to Hass.io."""
await hassio.update_hass_timezone(str(hass.config.time_zone))
hass.bus.async_listen(EVENT_CORE_CONFIG_UPDATE, push_config)
await push_config(None)
async def async_service_handler(service):
"""Handle service calls for Hass.io."""
api_command = MAP_SERVICE_API[service.service][0]
data = service.data.copy()
addon = data.pop(ATTR_ADDON, None)
snapshot = data.pop(ATTR_SNAPSHOT, None)
payload = None
# Pass data to hass.io API
if service.service == SERVICE_ADDON_STDIN:
payload = data[ATTR_INPUT]
elif MAP_SERVICE_API[service.service][3]:
payload = data
# Call API
try:
await hassio.send_command(
api_command.format(addon=addon, snapshot=snapshot),
payload=payload,
timeout=MAP_SERVICE_API[service.service][2],
)
except HassioAPIError as err:
_LOGGER.error("Error on Hass.io API: %s", err)
for service, settings in MAP_SERVICE_API.items():
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=settings[1]
)
async def update_homeassistant_version(now):
"""Update last available Home Assistant version."""
try:
data = await hassio.get_homeassistant_info()
hass.data[DATA_HOMEASSISTANT_VERSION] = data["last_version"]
except HassioAPIError as err:
_LOGGER.warning("Can't read last version: %s", err)
hass.helpers.event.async_track_point_in_utc_time(
update_homeassistant_version, utcnow() + HASSIO_UPDATE_INTERVAL
)
# Fetch last version
await update_homeassistant_version(None)
async def async_handle_core_service(call):
"""Service handler for handling core services."""
if call.service == SERVICE_HOMEASSISTANT_STOP:
await hassio.stop_homeassistant()
return
try:
errors = await conf_util.async_check_ha_config_file(hass)
except HomeAssistantError:
return
if errors:
_LOGGER.error(errors)
hass.components.persistent_notification.async_create(
"Config error. See [the logs](/developer-tools/logs) for details.",
"Config validating",
f"{HASS_DOMAIN}.check_config",
)
return
if call.service == SERVICE_HOMEASSISTANT_RESTART:
await hassio.restart_homeassistant()
# Mock core services
for service in (
SERVICE_HOMEASSISTANT_STOP,
SERVICE_HOMEASSISTANT_RESTART,
SERVICE_CHECK_CONFIG,
):
hass.services.async_register(HASS_DOMAIN, service, async_handle_core_service)
# Init discovery Hass.io feature
async_setup_discovery_view(hass, hassio)
# Init auth Hass.io feature
async_setup_auth_view(hass)
# Init ingress Hass.io feature
async_setup_ingress_view(hass, host)
# Init add-on ingress panels
await async_setup_addon_panel(hass, hassio)
return True
| |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Bigtable HappyBase connection module."""
import warnings
import six
from gcloud.bigtable.client import Client
from gcloud.bigtable.happybase.table import Table
from gcloud.bigtable.table import Table as _LowLevelTable
# Constants reproduced here for HappyBase compatibility, though values
# are all null.
COMPAT_MODES = None
THRIFT_TRANSPORTS = None
THRIFT_PROTOCOLS = None
DEFAULT_HOST = None
DEFAULT_PORT = None
DEFAULT_TRANSPORT = None
DEFAULT_COMPAT = None
DEFAULT_PROTOCOL = None
_LEGACY_ARGS = frozenset(('host', 'port', 'compat', 'transport', 'protocol'))
_WARN = warnings.warn
def _get_cluster(timeout=None):
"""Gets cluster for the default project.
Creates a client with the inferred credentials and project ID from
the local environment. Then uses :meth:`.Client.list_clusters` to
get the unique cluster owned by the project.
If the request fails for any reason, or if there isn't exactly one cluster
owned by the project, then this function will fail.
:type timeout: int
:param timeout: (Optional) The socket timeout in milliseconds.
:rtype: :class:`gcloud.bigtable.cluster.Cluster`
:returns: The unique cluster owned by the project inferred from
the environment.
:raises: :class:`ValueError <exceptions.ValueError>` if there is a failed
zone or any number of clusters other than one.
"""
client_kwargs = {'admin': True}
if timeout is not None:
client_kwargs['timeout_seconds'] = timeout / 1000.0
client = Client(**client_kwargs)
try:
client.start()
clusters, failed_zones = client.list_clusters()
finally:
client.stop()
if len(failed_zones) != 0:
raise ValueError('Determining cluster via ListClusters encountered '
'failed zones.')
if len(clusters) == 0:
raise ValueError('This client doesn\'t have access to any clusters.')
if len(clusters) > 1:
raise ValueError('This client has access to more than one cluster. '
'Please directly pass the cluster you\'d '
'like to use.')
return clusters[0]
class Connection(object):
"""Connection to Cloud Bigtable backend.
.. note::
If you pass a ``cluster``, it will be :meth:`.Cluster.copy`-ed before
being stored on the new connection. This also copies the
:class:`.Client` that created the :class:`.Cluster` instance and the
:class:`Credentials <oauth2client.client.Credentials>` stored on the
client.
The arguments ``host``, ``port``, ``compat``, ``transport`` and
``protocol`` are allowed (as keyword arguments) for compatibility with
HappyBase. However, they will not be used in anyway, and will cause a
warning if passed.
:type timeout: int
:param timeout: (Optional) The socket timeout in milliseconds.
:type autoconnect: bool
:param autoconnect: (Optional) Whether the connection should be
:meth:`open`-ed during construction.
:type table_prefix: str
:param table_prefix: (Optional) Prefix used to construct table names.
:type table_prefix_separator: str
:param table_prefix_separator: (Optional) Separator used with
``table_prefix``. Defaults to ``_``.
:type cluster: :class:`gcloud.bigtable.cluster.Cluster`
:param cluster: (Optional) A Cloud Bigtable cluster. The instance also
owns a client for making gRPC requests to the Cloud
Bigtable API. If not passed in, defaults to creating client
with ``admin=True`` and using the ``timeout`` here for the
``timeout_seconds`` argument to the :class:`.Client``
constructor. The credentials for the client
will be the implicit ones loaded from the environment.
Then that client is used to retrieve all the clusters
owned by the client's project.
:type kwargs: dict
:param kwargs: Remaining keyword arguments. Provided for HappyBase
compatibility.
:raises: :class:`ValueError <exceptions.ValueError>` if any of the unused
parameters are specified with a value other than the defaults.
"""
_cluster = None
def __init__(self, timeout=None, autoconnect=True, table_prefix=None,
table_prefix_separator='_', cluster=None, **kwargs):
self._handle_legacy_args(kwargs)
if table_prefix is not None:
if not isinstance(table_prefix, six.string_types):
raise TypeError('table_prefix must be a string', 'received',
table_prefix, type(table_prefix))
if not isinstance(table_prefix_separator, six.string_types):
raise TypeError('table_prefix_separator must be a string',
'received', table_prefix_separator,
type(table_prefix_separator))
self.table_prefix = table_prefix
self.table_prefix_separator = table_prefix_separator
if cluster is None:
self._cluster = _get_cluster(timeout=timeout)
else:
if timeout is not None:
raise ValueError('Timeout cannot be used when an existing '
'cluster is passed')
self._cluster = cluster.copy()
if autoconnect:
self.open()
self._initialized = True
@staticmethod
def _handle_legacy_args(arguments_dict):
"""Check legacy HappyBase arguments and warn if set.
:type arguments_dict: dict
:param arguments_dict: Unused keyword arguments.
:raises: :class:`TypeError <exceptions.TypeError>` if a keyword other
than ``host``, ``port``, ``compat``, ``transport`` or
``protocol`` is used.
"""
common_args = _LEGACY_ARGS.intersection(six.iterkeys(arguments_dict))
if common_args:
all_args = ', '.join(common_args)
message = ('The HappyBase legacy arguments %s were used. These '
'arguments are unused by gcloud.' % (all_args,))
_WARN(message)
for arg_name in common_args:
arguments_dict.pop(arg_name)
if arguments_dict:
unexpected_names = arguments_dict.keys()
raise TypeError('Received unexpected arguments', unexpected_names)
def open(self):
"""Open the underlying transport to Cloud Bigtable.
This method opens the underlying HTTP/2 gRPC connection using a
:class:`.Client` bound to the :class:`.Cluster` owned by
this connection.
"""
self._cluster._client.start()
def close(self):
"""Close the underlying transport to Cloud Bigtable.
This method closes the underlying HTTP/2 gRPC connection using a
:class:`.Client` bound to the :class:`.Cluster` owned by
this connection.
"""
self._cluster._client.stop()
def __del__(self):
if self._cluster is not None:
self.close()
def _table_name(self, name):
"""Construct a table name by optionally adding a table name prefix.
:type name: str
:param name: The name to have a prefix added to it.
:rtype: str
:returns: The prefixed name, if the current connection has a table
prefix set.
"""
if self.table_prefix is None:
return name
return self.table_prefix + self.table_prefix_separator + name
def table(self, name, use_prefix=True):
"""Table factory.
:type name: str
:param name: The name of the table to be created.
:type use_prefix: bool
:param use_prefix: Whether to use the table prefix (if any).
:rtype: `Table <gcloud.bigtable.happybase.table.Table>`
:returns: Table instance owned by this connection.
"""
if use_prefix:
name = self._table_name(name)
return Table(name, self)
def tables(self):
"""Return a list of table names available to this connection.
.. note::
This lists every table in the cluster owned by this connection,
**not** every table that a given user may have access to.
.. note::
If ``table_prefix`` is set on this connection, only returns the
table names which match that prefix.
:rtype: list
:returns: List of string table names.
"""
low_level_table_instances = self._cluster.list_tables()
table_names = [table_instance.table_id
for table_instance in low_level_table_instances]
# Filter using prefix, and strip prefix from names
if self.table_prefix is not None:
prefix = self._table_name('')
offset = len(prefix)
table_names = [name[offset:] for name in table_names
if name.startswith(prefix)]
return table_names
def delete_table(self, name, disable=False):
"""Delete the specified table.
:type name: str
:param name: The name of the table to be deleted. If ``table_prefix``
is set, a prefix will be added to the ``name``.
:type disable: bool
:param disable: Whether to first disable the table if needed. This
is provided for compatibility with HappyBase, but is
not relevant for Cloud Bigtable since it has no concept
of enabled / disabled tables.
:raises: :class:`ValueError <exceptions.ValueError>`
if ``disable=True``.
"""
if disable:
raise ValueError('The disable argument should not be used in '
'delete_table(). Cloud Bigtable has no concept '
'of enabled / disabled tables.')
name = self._table_name(name)
_LowLevelTable(name, self._cluster).delete()
def enable_table(self, name):
"""Enable the specified table.
Cloud Bigtable has no concept of enabled / disabled tables so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API has no concept of '
'enabled or disabled tables.')
def disable_table(self, name):
"""Disable the specified table.
Cloud Bigtable has no concept of enabled / disabled tables so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API has no concept of '
'enabled or disabled tables.')
def is_table_enabled(self, name):
"""Return whether the specified table is enabled.
Cloud Bigtable has no concept of enabled / disabled tables so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API has no concept of '
'enabled or disabled tables.')
def compact_table(self, name, major=False):
"""Compact the specified table.
Cloud Bigtable does not support compacting a table, so this
method does not work. It is provided simply for compatibility.
:raises: :class:`NotImplementedError <exceptions.NotImplementedError>`
always
"""
raise NotImplementedError('The Cloud Bigtable API does not support '
'compacting a table.')
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SimpleRNN layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training.rmsprop import RMSPropOptimizer
@keras_parameterized.run_all_keras_modes
class SimpleRNNLayerTest(keras_parameterized.TestCase):
def test_return_sequences_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
def test_dynamic_behavior_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer = keras.layers.SimpleRNN(units, input_shape=(None, embedding_dim))
model = keras.models.Sequential()
model.add(layer)
model.compile(RMSPropOptimizer(0.01), 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
def test_dropout_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
def test_implementation_mode_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
for mode in [0, 1, 2]:
testing_utils.layer_test(
keras.layers.SimpleRNN,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
def test_constraints_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
k_constraint = keras.constraints.max_norm(0.01)
r_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_constraint=k_constraint,
recurrent_constraint=r_constraint,
bias_constraint=b_constraint)
layer.build((None, None, embedding_dim))
self.assertEqual(layer.cell.kernel.constraint, k_constraint)
self.assertEqual(layer.cell.recurrent_kernel.constraint, r_constraint)
self.assertEqual(layer.cell.bias.constraint, b_constraint)
def test_with_masking_layer_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
inputs = np.random.random((2, 3, 4))
targets = np.abs(np.random.random((2, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = keras.models.Sequential()
model.add(keras.layers.Masking(input_shape=(3, 4)))
model.add(layer_class(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(0.01))
model.fit(inputs, targets, epochs=1, batch_size=2, verbose=1)
def test_from_config_SimpleRNN(self):
layer_class = keras.layers.SimpleRNN
for stateful in (False, True):
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
def test_statefulness_SimpleRNN(self):
num_samples = 2
timesteps = 3
embedding_dim = 4
units = 2
layer_class = keras.layers.SimpleRNN
model = keras.models.Sequential()
model.add(
keras.layers.Embedding(
4,
embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(
units, return_sequences=False, stateful=True, weights=None)
model.add(layer)
model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
self.assertEqual(out1.shape, (num_samples, units))
# train once so that the states change
model.train_on_batch(
np.ones((num_samples, timesteps)), np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
self.assertNotEqual(out1.max(), out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out2.max(), out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
np.testing.assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
self.assertNotEqual(out4.max(), out5.max())
# Check masking
layer.reset_states()
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
layer.reset_states()
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
np.testing.assert_allclose(out7, out6, atol=1e-5)
class SimpleRNNLayerGraphOnlyTest(test.TestCase):
# b/120919032
@tf_test_util.run_deprecated_v1
def test_regularizers_SimpleRNN(self):
embedding_dim = 4
layer_class = keras.layers.SimpleRNN
layer = layer_class(
5,
return_sequences=False,
weights=None,
input_shape=(None, embedding_dim),
kernel_regularizer=keras.regularizers.l1(0.01),
recurrent_regularizer=keras.regularizers.l1(0.01),
bias_regularizer='l2',
activity_regularizer='l1')
layer.build((None, None, 2))
self.assertEqual(len(layer.losses), 3)
x = keras.backend.variable(np.ones((2, 3, 2)))
layer(x)
self.assertEqual(len(layer.get_losses_for(x)), 1)
if __name__ == '__main__':
test.main()
| |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a backup service that uses Swift as the backend
**Related Flags**
:backup_swift_url: The URL of the Swift endpoint (default: None, use catalog).
:swift_catalog_info: Info to match when looking for swift in the service '
catalog.
:backup_swift_object_size: The size in bytes of the Swift objects used
for volume backups (default: 52428800).
:backup_swift_retry_attempts: The number of retries to make for Swift
operations (default: 10).
:backup_swift_retry_backoff: The backoff time in seconds between retrying
failed Swift operations (default: 10).
:backup_compression_algorithm: Compression algorithm to use for volume
backups. Supported options are:
None (to disable), zlib and bz2 (default: zlib)
"""
import hashlib
import json
import os
import socket
import eventlet
from oslo.utils import excutils
from oslo.utils import timeutils
from oslo.utils import units
from oslo_config import cfg
import six
from swiftclient import client as swift
from cinder.backup.driver import BackupDriver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
swiftbackup_service_opts = [
cfg.StrOpt('backup_swift_url',
default=None,
help='The URL of the Swift endpoint'),
cfg.StrOpt('swift_catalog_info',
default='object-store:swift:publicURL',
help='Info to match when looking for swift in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type> - '
'Only used if backup_swift_url is unset'),
cfg.StrOpt('backup_swift_auth',
default='per_user',
help='Swift authentication mechanism'),
cfg.StrOpt('backup_swift_auth_version',
default='1',
help='Swift authentication version. Specify "1" for auth 1.0'
', or "2" for auth 2.0'),
cfg.StrOpt('backup_swift_tenant',
default=None,
help='Swift tenant/account name. Required when connecting'
' to an auth 2.0 system'),
cfg.StrOpt('backup_swift_user',
default=None,
help='Swift user name'),
cfg.StrOpt('backup_swift_key',
default=None,
help='Swift key for authentication'),
cfg.StrOpt('backup_swift_container',
default='volumebackups',
help='The default Swift container to use'),
cfg.IntOpt('backup_swift_object_size',
default=52428800,
help='The size in bytes of Swift backup objects'),
cfg.IntOpt('backup_swift_retry_attempts',
default=3,
help='The number of retries to make for Swift operations'),
cfg.IntOpt('backup_swift_retry_backoff',
default=2,
help='The backoff time in seconds between Swift retries'),
cfg.StrOpt('backup_compression_algorithm',
default='zlib',
help='Compression algorithm (None to disable)'),
cfg.BoolOpt('backup_swift_enable_progress_timer',
default=True,
help='Enable or Disable the timer to send the periodic '
'progress notifications to Ceilometer when backing '
'up the volume to the Swift backend storage. The '
'default value is True to enable the timer.'),
]
CONF = cfg.CONF
CONF.register_opts(swiftbackup_service_opts)
class SwiftBackupDriver(BackupDriver):
"""Provides backup, restore and delete of backup objects within Swift."""
DRIVER_VERSION = '1.0.0'
DRIVER_VERSION_MAPPING = {'1.0.0': '_restore_v1'}
def _get_compressor(self, algorithm):
try:
if algorithm.lower() in ('none', 'off', 'no'):
return None
elif algorithm.lower() in ('zlib', 'gzip'):
import zlib as compressor
return compressor
elif algorithm.lower() in ('bz2', 'bzip2'):
import bz2 as compressor
return compressor
except ImportError:
pass
err = _('unsupported compression algorithm: %s') % algorithm
raise ValueError(unicode(err))
def __init__(self, context, db_driver=None):
super(SwiftBackupDriver, self).__init__(context, db_driver)
if CONF.backup_swift_url is None:
self.swift_url = None
info = CONF.swift_catalog_info
try:
service_type, service_name, endpoint_type = info.split(':')
except ValueError:
raise exception.BackupDriverException(_(
"Failed to parse the configuration option "
"'swift_catalog_info', must be in the form "
"<service_type>:<service_name>:<endpoint_type>"))
for entry in context.service_catalog:
if entry.get('type') == service_type:
self.swift_url = entry.get(
'endpoints')[0].get(endpoint_type)
else:
self.swift_url = '%s%s' % (CONF.backup_swift_url,
context.project_id)
if self.swift_url is None:
raise exception.BackupDriverException(_(
"Could not determine which Swift endpoint to use. This can "
" either be set in the service catalog or with the "
" cinder.conf config option 'backup_swift_url'."))
LOG.debug("Using swift URL %s", self.swift_url)
self.az = CONF.storage_availability_zone
self.data_block_size_bytes = CONF.backup_swift_object_size
self.backup_timer_interval = CONF.backup_timer_interval
self.data_block_num = CONF.backup_object_number_per_notification
self.enable_progress_timer = CONF.backup_swift_enable_progress_timer
self.swift_attempts = CONF.backup_swift_retry_attempts
self.swift_backoff = CONF.backup_swift_retry_backoff
self.compressor = \
self._get_compressor(CONF.backup_compression_algorithm)
LOG.debug('Connect to %s in "%s" mode' % (CONF.backup_swift_url,
CONF.backup_swift_auth))
if CONF.backup_swift_auth == 'single_user':
if CONF.backup_swift_user is None:
LOG.error(_LE("single_user auth mode enabled, "
"but %(param)s not set")
% {'param': 'backup_swift_user'})
raise exception.ParameterNotFound(param='backup_swift_user')
self.conn = swift.Connection(
authurl=CONF.backup_swift_url,
auth_version=CONF.backup_swift_auth_version,
tenant_name=CONF.backup_swift_tenant,
user=CONF.backup_swift_user,
key=CONF.backup_swift_key,
retries=self.swift_attempts,
starting_backoff=self.swift_backoff)
else:
self.conn = swift.Connection(retries=self.swift_attempts,
preauthurl=self.swift_url,
preauthtoken=self.context.auth_token,
starting_backoff=self.swift_backoff)
def _create_container(self, context, backup):
backup_id = backup['id']
container = backup['container']
LOG.debug('_create_container started, container: %(container)s,'
'backup: %(backup_id)s' %
{'container': container, 'backup_id': backup_id})
if container is None:
container = CONF.backup_swift_container
self.db.backup_update(context, backup_id, {'container': container})
# NOTE(gfidente): accordingly to the Object Storage API reference, we
# do not need to check if a container already exists, container PUT
# requests are idempotent and a code of 202 (Accepted) is returned when
# the container already existed.
self.conn.put_container(container)
return container
def _generate_swift_object_name_prefix(self, backup):
az = 'az_%s' % self.az
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
timestamp = timeutils.strtime(fmt="%Y%m%d%H%M%S")
prefix = volume + '/' + timestamp + '/' + backup_name
LOG.debug('_generate_swift_object_name_prefix: %s' % prefix)
return prefix
def _generate_object_names(self, backup):
prefix = backup['service_metadata']
swift_objects = self.conn.get_container(backup['container'],
prefix=prefix,
full_listing=True)[1]
swift_object_names = [swift_obj['name'] for swift_obj in swift_objects]
LOG.debug('generated object list: %s' % swift_object_names)
return swift_object_names
def _metadata_filename(self, backup):
swift_object_name = backup['service_metadata']
filename = '%s_metadata' % swift_object_name
return filename
def _write_metadata(self, backup, volume_id, container, object_list,
volume_meta):
filename = self._metadata_filename(backup)
LOG.debug('_write_metadata started, container name: %(container)s,'
' metadata filename: %(filename)s' %
{'container': container, 'filename': filename})
metadata = {}
metadata['version'] = self.DRIVER_VERSION
metadata['backup_id'] = backup['id']
metadata['volume_id'] = volume_id
metadata['backup_name'] = backup['display_name']
metadata['backup_description'] = backup['display_description']
metadata['created_at'] = str(backup['created_at'])
metadata['objects'] = object_list
metadata['volume_meta'] = volume_meta
metadata_json = json.dumps(metadata, sort_keys=True, indent=2)
reader = six.StringIO(metadata_json)
etag = self.conn.put_object(container, filename, reader,
content_length=reader.len)
md5 = hashlib.md5(metadata_json).hexdigest()
if etag != md5:
err = _('error writing metadata file to swift, MD5 of metadata'
' file in swift [%(etag)s] is not the same as MD5 of '
'metadata file sent to swift [%(md5)s]') % {'etag': etag,
'md5': md5}
raise exception.InvalidBackup(reason=err)
LOG.debug('_write_metadata finished')
def _read_metadata(self, backup):
container = backup['container']
filename = self._metadata_filename(backup)
LOG.debug('_read_metadata started, container name: %(container)s, '
'metadata filename: %(filename)s' %
{'container': container, 'filename': filename})
(_resp, body) = self.conn.get_object(container, filename)
metadata = json.loads(body)
LOG.debug('_read_metadata finished (%s)' % metadata)
return metadata
def _prepare_backup(self, backup):
"""Prepare the backup process and return the backup metadata."""
backup_id = backup['id']
volume_id = backup['volume_id']
volume = self.db.volume_get(self.context, volume_id)
if volume['size'] <= 0:
err = _('volume size %d is invalid.') % volume['size']
raise exception.InvalidVolume(reason=err)
try:
container = self._create_container(self.context, backup)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
object_prefix = self._generate_swift_object_name_prefix(backup)
backup['service_metadata'] = object_prefix
self.db.backup_update(self.context, backup_id, {'service_metadata':
object_prefix})
volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s to swift,'
' volume size: %(volume_size_bytes)d, swift object names'
' prefix %(object_prefix)s, availability zone:'
' %(availability_zone)s' %
{
'volume_id': volume_id,
'volume_size_bytes': volume_size_bytes,
'object_prefix': object_prefix,
'availability_zone': availability_zone,
})
object_meta = {'id': 1, 'list': [], 'prefix': object_prefix,
'volume_meta': None}
return object_meta, container, volume_size_bytes
def _backup_chunk(self, backup, container, data, data_offset, object_meta):
"""Backup data chunk based on the object metadata and offset."""
object_prefix = object_meta['prefix']
object_list = object_meta['list']
object_id = object_meta['id']
object_name = '%s-%05d' % (object_prefix, object_id)
obj = {}
obj[object_name] = {}
obj[object_name]['offset'] = data_offset
obj[object_name]['length'] = len(data)
LOG.debug('reading chunk of data from volume')
if self.compressor is not None:
algorithm = CONF.backup_compression_algorithm.lower()
obj[object_name]['compression'] = algorithm
data_size_bytes = len(data)
data = self.compressor.compress(data)
comp_size_bytes = len(data)
LOG.debug('compressed %(data_size_bytes)d bytes of data '
'to %(comp_size_bytes)d bytes using '
'%(algorithm)s' %
{
'data_size_bytes': data_size_bytes,
'comp_size_bytes': comp_size_bytes,
'algorithm': algorithm,
})
else:
LOG.debug('not compressing data')
obj[object_name]['compression'] = 'none'
reader = six.StringIO(data)
LOG.debug('About to put_object')
try:
etag = self.conn.put_object(container, object_name, reader,
content_length=len(data))
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
LOG.debug('swift MD5 for %(object_name)s: %(etag)s' %
{'object_name': object_name, 'etag': etag, })
md5 = hashlib.md5(data).hexdigest()
obj[object_name]['md5'] = md5
LOG.debug('backup MD5 for %(object_name)s: %(md5)s' %
{'object_name': object_name, 'md5': md5})
if etag != md5:
err = _('error writing object to swift, MD5 of object in '
'swift %(etag)s is not the same as MD5 of object sent '
'to swift %(md5)s') % {'etag': etag, 'md5': md5}
raise exception.InvalidBackup(reason=err)
object_list.append(obj)
object_id += 1
object_meta['list'] = object_list
object_meta['id'] = object_id
LOG.debug('Calling eventlet.sleep(0)')
eventlet.sleep(0)
def _finalize_backup(self, backup, container, object_meta):
"""Finalize the backup by updating its metadata on Swift."""
object_list = object_meta['list']
object_id = object_meta['id']
volume_meta = object_meta['volume_meta']
try:
self._write_metadata(backup,
backup['volume_id'],
container,
object_list,
volume_meta)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
self.db.backup_update(self.context, backup['id'],
{'object_count': object_id})
LOG.debug('backup %s finished.' % backup['id'])
def _backup_metadata(self, backup, object_meta):
"""Backup volume metadata.
NOTE(dosaboy): the metadata we are backing up is obtained from a
versioned api so we should not alter it in any way here.
We must also be sure that the service that will perform
the restore is compatible with version used.
"""
json_meta = self.get_metadata(backup['volume_id'])
if not json_meta:
LOG.debug("No volume metadata to backup")
return
object_meta["volume_meta"] = json_meta
def _send_progress_end(self, context, backup, object_meta):
object_meta['backup_percent'] = 100
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def _send_progress_notification(self, context, backup, object_meta,
total_block_sent_num, total_volume_size):
backup_percent = total_block_sent_num * 100 / total_volume_size
object_meta['backup_percent'] = backup_percent
volume_utils.notify_about_backup_usage(context,
backup,
"createprogress",
extra_usage_info=
object_meta)
def backup(self, backup, volume_file, backup_metadata=True):
"""Backup the given volume to Swift."""
(object_meta, container,
volume_size_bytes) = self._prepare_backup(backup)
counter = 0
total_block_sent_num = 0
# There are two mechanisms to send the progress notification.
# 1. The notifications are periodically sent in a certain interval.
# 2. The notifications are sent after a certain number of chunks.
# Both of them are working simultaneously during the volume backup,
# when swift is taken as the backup backend.
def _notify_progress():
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
timer = loopingcall.FixedIntervalLoopingCall(
_notify_progress)
if self.enable_progress_timer:
timer.start(interval=self.backup_timer_interval)
while True:
data = volume_file.read(self.data_block_size_bytes)
data_offset = volume_file.tell()
if data == '':
break
self._backup_chunk(backup, container, data,
data_offset, object_meta)
total_block_sent_num += self.data_block_num
counter += 1
if counter == self.data_block_num:
# Send the notification to Ceilometer when the chunk
# number reaches the data_block_num. The backup percentage
# is put in the metadata as the extra information.
self._send_progress_notification(self.context, backup,
object_meta,
total_block_sent_num,
volume_size_bytes)
# reset the counter
counter = 0
# Stop the timer.
timer.stop()
# All the data have been sent, the backup_percent reaches 100.
self._send_progress_end(self.context, backup, object_meta)
if backup_metadata:
try:
self._backup_metadata(backup, object_meta)
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.exception(
_LE("Backup volume metadata to swift failed: %s") %
six.text_type(err))
self.delete(backup)
self._finalize_backup(backup, container, object_meta)
def _restore_v1(self, backup, volume_id, metadata, volume_file):
"""Restore a v1 swift volume backup from swift."""
backup_id = backup['id']
LOG.debug('v1 swift volume backup restore of %s started', backup_id)
container = backup['container']
metadata_objects = metadata['objects']
metadata_object_names = sum((obj.keys() for obj in metadata_objects),
[])
LOG.debug('metadata_object_names = %s' % metadata_object_names)
prune_list = [self._metadata_filename(backup)]
swift_object_names = [swift_object_name for swift_object_name in
self._generate_object_names(backup)
if swift_object_name not in prune_list]
if sorted(swift_object_names) != sorted(metadata_object_names):
err = _('restore_backup aborted, actual swift object list in '
'swift does not match object list stored in metadata')
raise exception.InvalidBackup(reason=err)
for metadata_object in metadata_objects:
object_name = metadata_object.keys()[0]
LOG.debug('restoring object from swift. backup: %(backup_id)s, '
'container: %(container)s, swift object name: '
'%(object_name)s, volume: %(volume_id)s' %
{
'backup_id': backup_id,
'container': container,
'object_name': object_name,
'volume_id': volume_id,
})
try:
(_resp, body) = self.conn.get_object(container, object_name)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
compression_algorithm = metadata_object[object_name]['compression']
decompressor = self._get_compressor(compression_algorithm)
if decompressor is not None:
LOG.debug('decompressing data using %s algorithm' %
compression_algorithm)
decompressed = decompressor.decompress(body)
volume_file.write(decompressed)
else:
volume_file.write(body)
# force flush every write to avoid long blocking write on close
volume_file.flush()
# Be tolerant to IO implementations that do not support fileno()
try:
fileno = volume_file.fileno()
except IOError:
LOG.info(_LI("volume_file does not support "
"fileno() so skipping"
"fsync()"))
else:
os.fsync(fileno)
# Restoring a backup to a volume can take some time. Yield so other
# threads can run, allowing for among other things the service
# status to be updated
eventlet.sleep(0)
LOG.debug('v1 swift volume backup restore of %s finished',
backup_id)
def restore(self, backup, volume_id, volume_file):
"""Restore the given volume backup from swift."""
backup_id = backup['id']
container = backup['container']
object_prefix = backup['service_metadata']
LOG.debug('starting restore of backup %(object_prefix)s from swift'
' container: %(container)s, to volume %(volume_id)s, '
'backup: %(backup_id)s' %
{
'object_prefix': object_prefix,
'container': container,
'volume_id': volume_id,
'backup_id': backup_id,
})
try:
metadata = self._read_metadata(backup)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
metadata_version = metadata['version']
LOG.debug('Restoring swift backup version %s', metadata_version)
try:
restore_func = getattr(self, self.DRIVER_VERSION_MAPPING.get(
metadata_version))
except TypeError:
err = (_('No support to restore swift backup version %s')
% metadata_version)
raise exception.InvalidBackup(reason=err)
restore_func(backup, volume_id, metadata, volume_file)
volume_meta = metadata.get('volume_meta', None)
try:
if volume_meta:
self.put_metadata(volume_id, volume_meta)
else:
LOG.debug("No volume metadata in this backup")
except exception.BackupMetadataUnsupportedVersion:
msg = _("Metadata restore failed due to incompatible version")
LOG.error(msg)
raise exception.BackupOperationError(msg)
LOG.debug('restore %(backup_id)s to %(volume_id)s finished.' %
{'backup_id': backup_id, 'volume_id': volume_id})
def delete(self, backup):
"""Delete the given backup from swift."""
container = backup['container']
LOG.debug('delete started, backup: %s, container: %s, prefix: %s',
backup['id'], container, backup['service_metadata'])
if container is not None:
swift_object_names = []
try:
swift_object_names = self._generate_object_names(backup)
except Exception:
LOG.warn(_LW('swift error while listing objects, continuing'
' with delete'))
for swift_object_name in swift_object_names:
try:
self.conn.delete_object(container, swift_object_name)
except socket.error as err:
raise exception.SwiftConnectionFailed(reason=err)
except Exception:
LOG.warn(_LW('swift error while deleting object %s, '
'continuing with delete')
% swift_object_name)
else:
LOG.debug('deleted swift object: %(swift_object_name)s'
' in container: %(container)s' %
{
'swift_object_name': swift_object_name,
'container': container
})
# Deleting a backup's objects from swift can take some time.
# Yield so other threads can run
eventlet.sleep(0)
LOG.debug('delete %s finished' % backup['id'])
def get_backup_driver(context):
return SwiftBackupDriver(context)
| |
import matplotlib
matplotlib.use('Agg')
import os
import sys
import numpy as np
import json
import matplotlib.pyplot as plt
import caffe
from caffe import layers as L
from caffe import params as P
from vqa_data_provider_layer import VQADataProvider
from visualize_tools import exec_validation, drawgraph
import config
def qlstm(mode, batchsize, T, question_vocab_size):
n = caffe.NetSpec()
mode_str = json.dumps({'mode':mode, 'batchsize':batchsize})
n.data, n.cont, n.img_feature_raw, n.label, n.glove = L.Python(\
module='vqa_data_provider_layer', layer='VQADataProviderLayer', param_str=mode_str, ntop=5 )
n.img_feature = L.Convolution(n.img_feature_raw, kernel_size=1, stride=1,
num_output=2048, pad=0, weight_filler=dict(type='xavier'))
n.embed_ba = L.Embed(n.data, input_dim=question_vocab_size, num_output=300, \
weight_filler=dict(type='uniform',min=-0.08,max=0.08))
n.embed = L.TanH(n.embed_ba)
concat_word_embed = [n.embed, n.glove]
n.concat_embed = L.Concat(*concat_word_embed, concat_param={'axis': 2}) # T x N x 600
# LSTM1
n.lstm1 = L.LSTM(\
n.concat_embed, n.cont,\
recurrent_param=dict(\
num_output=1024,\
weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
bias_filler=dict(type='constant',value=0)))
tops1 = L.Slice(n.lstm1, ntop=T, slice_param={'axis':0})
for i in range(T-1):
n.__setattr__('slice_first'+str(i), tops1[int(i)])
n.__setattr__('silence_data_first'+str(i), L.Silence(tops1[int(i)],ntop=0))
n.lstm1_out = tops1[T-1]
n.lstm1_reshaped = L.Reshape(n.lstm1_out,\
reshape_param=dict(\
shape=dict(dim=[-1,1024])))
n.lstm1_reshaped_droped = L.Dropout(n.lstm1_reshaped,dropout_param={'dropout_ratio':0.3})
n.lstm1_droped = L.Dropout(n.lstm1,dropout_param={'dropout_ratio':0.3})
# LSTM2
n.lstm2 = L.LSTM(\
n.lstm1_droped, n.cont,\
recurrent_param=dict(\
num_output=1024,\
weight_filler=dict(type='uniform',min=-0.08,max=0.08),\
bias_filler=dict(type='constant',value=0)))
tops2 = L.Slice(n.lstm2, ntop=T, slice_param={'axis':0})
for i in range(T-1):
n.__setattr__('slice_second'+str(i), tops2[int(i)])
n.__setattr__('silence_data_second'+str(i), L.Silence(tops2[int(i)],ntop=0))
n.lstm2_out = tops2[T-1]
n.lstm2_reshaped = L.Reshape(n.lstm2_out,\
reshape_param=dict(\
shape=dict(dim=[-1,1024])))
n.lstm2_reshaped_droped = L.Dropout(n.lstm2_reshaped,dropout_param={'dropout_ratio':0.3})
concat_botom = [n.lstm1_reshaped_droped, n.lstm2_reshaped_droped]
n.lstm_12 = L.Concat(*concat_botom)
n.q_emb_tanh_droped_resh = L.Reshape(n.lstm_12,reshape_param=dict(shape=dict(dim=[-1,2048,1,1])))
n.q_emb_tanh_droped_resh_tiled_1 = L.Tile(n.q_emb_tanh_droped_resh, axis=2, tiles=14)
n.q_emb_tanh_droped_resh_tiled = L.Tile(n.q_emb_tanh_droped_resh_tiled_1, axis=3, tiles=14)
n.i_emb_tanh_droped_resh = L.Reshape(n.img_feature,reshape_param=dict(shape=dict(dim=[-1,2048,14,14])))
n.blcf = L.CompactBilinear(n.q_emb_tanh_droped_resh_tiled, n.i_emb_tanh_droped_resh, compact_bilinear_param=dict(num_output=16000,sum_pool=False))
n.blcf_sign_sqrt = L.SignedSqrt(n.blcf)
n.blcf_sign_sqrt_l2 = L.L2Normalize(n.blcf_sign_sqrt)
n.blcf_droped = L.Dropout(n.blcf_sign_sqrt_l2,dropout_param={'dropout_ratio':0.1})
# multi-channel attention
n.att_conv1 = L.Convolution(n.blcf_droped, kernel_size=1, stride=1, num_output=512, pad=0, weight_filler=dict(type='xavier'))
n.att_conv1_relu = L.ReLU(n.att_conv1)
n.att_conv2 = L.Convolution(n.att_conv1_relu, kernel_size=1, stride=1, num_output=2, pad=0, weight_filler=dict(type='xavier'))
n.att_reshaped = L.Reshape(n.att_conv2,reshape_param=dict(shape=dict(dim=[-1,2,14*14])))
n.att_softmax = L.Softmax(n.att_reshaped, axis=2)
n.att = L.Reshape(n.att_softmax,reshape_param=dict(shape=dict(dim=[-1,2,14,14])))
att_maps = L.Slice(n.att, ntop=2, slice_param={'axis':1})
n.att_map0 = att_maps[0]
n.att_map1 = att_maps[1]
dummy = L.DummyData(shape=dict(dim=[batchsize, 1]), data_filler=dict(type='constant', value=1), ntop=1)
n.att_feature0 = L.SoftAttention(n.i_emb_tanh_droped_resh, n.att_map0, dummy)
n.att_feature1 = L.SoftAttention(n.i_emb_tanh_droped_resh, n.att_map1, dummy)
n.att_feature0_resh = L.Reshape(n.att_feature0, reshape_param=dict(shape=dict(dim=[-1,2048])))
n.att_feature1_resh = L.Reshape(n.att_feature1, reshape_param=dict(shape=dict(dim=[-1,2048])))
n.att_feature = L.Concat(n.att_feature0_resh, n.att_feature1_resh)
# merge attention and lstm with compact bilinear pooling
n.att_feature_resh = L.Reshape(n.att_feature, reshape_param=dict(shape=dict(dim=[-1,4096,1,1])))
n.lstm_12_resh = L.Reshape(n.lstm_12, reshape_param=dict(shape=dict(dim=[-1,2048,1,1])))
n.bc_att_lstm = L.CompactBilinear(n.att_feature_resh, n.lstm_12_resh,
compact_bilinear_param=dict(num_output=16000,sum_pool=False))
n.bc_sign_sqrt = L.SignedSqrt(n.bc_att_lstm)
n.bc_sign_sqrt_l2 = L.L2Normalize(n.bc_sign_sqrt)
n.bc_dropped = L.Dropout(n.bc_sign_sqrt_l2, dropout_param={'dropout_ratio':0.1})
n.bc_dropped_resh = L.Reshape(n.bc_dropped, reshape_param=dict(shape=dict(dim=[-1, 16000])))
n.prediction = L.InnerProduct(n.bc_dropped_resh, num_output=3000, weight_filler=dict(type='xavier'))
n.loss = L.SoftmaxWithLoss(n.prediction, n.label)
return n.to_proto()
def make_answer_vocab(adic, vocab_size):
"""
Returns a dictionary that maps words to indices.
"""
adict = {'':0}
nadict = {'':1000000}
vid = 1
for qid in list(adic.keys()):
answer_obj = adic[qid]
answer_list = [ans['answer'] for ans in answer_obj]
for q_ans in answer_list:
# create dict
if q_ans in adict:
nadict[q_ans] += 1
else:
nadict[q_ans] = 1
adict[q_ans] = vid
vid +=1
# debug
nalist = []
for k,v in sorted(list(nadict.items()), key=lambda x:x[1]):
nalist.append((k,v))
# remove words that appear less than once
n_del_ans = 0
n_valid_ans = 0
adict_nid = {}
for i, w in enumerate(nalist[:-vocab_size]):
del adict[w[0]]
n_del_ans += w[1]
for i, w in enumerate(nalist[-vocab_size:]):
n_valid_ans += w[1]
adict_nid[w[0]] = i
return adict_nid
def make_question_vocab(qdic):
"""
Returns a dictionary that maps words to indices.
"""
vdict = {'':0}
vid = 1
for qid in list(qdic.keys()):
# sequence to list
q_str = qdic[qid]['qstr']
q_list = VQADataProvider.seq_to_list(q_str)
# create dict
for w in q_list:
if w not in vdict:
vdict[w] = vid
vid +=1
return vdict
def make_vocab_files():
"""
Produce the question and answer vocabulary files.
"""
print(('making question vocab...', config.QUESTION_VOCAB_SPACE))
qdic, _ = VQADataProvider.load_data(config.QUESTION_VOCAB_SPACE)
question_vocab = make_question_vocab(qdic)
print(('making answer vocab...', config.ANSWER_VOCAB_SPACE))
_, adic = VQADataProvider.load_data(config.ANSWER_VOCAB_SPACE)
answer_vocab = make_answer_vocab(adic, config.NUM_OUTPUT_UNITS)
return question_vocab, answer_vocab
def main():
if not os.path.exists('./result'):
os.makedirs('./result')
question_vocab, answer_vocab = {}, {}
if os.path.exists('./result/vdict.json') and os.path.exists('./result/adict.json'):
print('restoring vocab')
with open('./result/vdict.json','r') as f:
question_vocab = json.load(f)
with open('./result/adict.json','r') as f:
answer_vocab = json.load(f)
else:
question_vocab, answer_vocab = make_vocab_files()
with open('./result/vdict.json','w') as f:
json.dump(question_vocab, f)
with open('./result/adict.json','w') as f:
json.dump(answer_vocab, f)
print(('question vocab size:', len(question_vocab)))
print(('answer vocab size:', len(answer_vocab)))
with open('./result/proto_train.prototxt', 'w') as f:
f.write(str(qlstm(config.TRAIN_DATA_SPLITS, config.BATCH_SIZE, \
config.MAX_WORDS_IN_QUESTION, len(question_vocab))))
with open('./result/proto_test.prototxt', 'w') as f:
f.write(str(qlstm('val', config.VAL_BATCH_SIZE, \
config.MAX_WORDS_IN_QUESTION, len(question_vocab))))
caffe.set_device(config.GPU_ID)
caffe.set_mode_gpu()
solver = caffe.get_solver('./qlstm_solver.prototxt')
train_loss = np.zeros(config.MAX_ITERATIONS)
results = []
for it in range(config.MAX_ITERATIONS):
solver.step(1)
# store the train loss
train_loss[it] = solver.net.blobs['loss'].data
if it % config.PRINT_INTERVAL == 0:
print(('Iteration:', it))
c_mean_loss = train_loss[it-config.PRINT_INTERVAL:it].mean()
print(('Train loss:', c_mean_loss))
if it != 0 and it % config.VALIDATE_INTERVAL == 0:
solver.test_nets[0].save('./result/tmp.caffemodel')
print('Validating...')
test_loss, acc_overall, acc_per_ques, acc_per_ans = exec_validation(config.GPU_ID, 'val', it=it)
print(('Test loss:', test_loss))
print(('Accuracy:', acc_overall))
results.append([it, c_mean_loss, test_loss, acc_overall, acc_per_ques, acc_per_ans])
best_result_idx = np.array([x[3] for x in results]).argmax()
print(('Best accuracy of', results[best_result_idx][3], 'was at iteration', results[best_result_idx][0]))
drawgraph(results)
if __name__ == '__main__':
main()
| |
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
from nnabla.testing import assert_allclose
ctxs = list_context('Dropout')
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("p", [-0.01, 1])
def test_dropout_p_boundaries(p, ctx, func_name):
with nn.context_scope(ctx):
x = nn.Variable((2, 3))
with pytest.raises(RuntimeError):
# Dropout cannot take p < 0 and 1 <= 1.
y = F.dropout(x, p)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("p", [p / 10. for p in range(1, 9)] + [0])
def test_dropout_forward_backward(p, seed, ctx, func_name):
from nbla_test_utils import cap_ignore_region
# Note: each backward execution requires a forward execution in NNabla.
with nn.context_scope(ctx):
# Create inputs
rng = np.random.RandomState(seed)
inputs = [
cap_ignore_region(
rng.randn(2, 3, 4).astype(np.float32) * 2,
(-1e-3, 1e-3))] # Ensure there is no zero.
x = nn.Variable(inputs[0].shape, need_grad=True)
x.d = inputs[0]
init_dx = rng.randn(*x.shape).astype(x.data.dtype)
init_dy = rng.randn(*x.shape).astype(x.data.dtype)
# Construct graph
y = F.dropout(x, p)
# Reference parameter
scale = 1. / (1. - p)
# Test forward
y.forward(clear_buffer=True)
mask = (y.d != 0)
ref_y = x.d * mask * scale
assert_allclose(y.d, ref_y)
assert y.parent.name == func_name
# Test backward
x.g[...] = init_dx
y.backward(init_dy, clear_buffer=True)
ref_dx = init_dy * mask * scale
assert_allclose(x.g, init_dx + ref_dx)
# Test accumulation
y.forward(clear_no_need_grad=True)
mask = (y.d != 0)
x.g[...] = 1
y.g = init_dy
y.parent.backward([x], [y], [False])
ref_dx = init_dy * mask * scale
assert_allclose(x.g, ref_dx)
# Test accum=False with NaN gradient
y.forward(clear_no_need_grad=True)
x.g = np.float32('nan')
y.parent.backward([x], [y], [False])
assert not np.any(np.isnan(x.g))
# Test need_grad
y.forward(clear_no_need_grad=True)
x.g[...] = 0
x.need_grad = False
y.backward(init_dy)
assert np.all(x.g == 0)
def ref_dropout_backward(dy, mask, p):
return dy * mask / (1 - p)
def ref_dropout_double_backward(dy, mask, p):
''' This function returns the reference result of the derivative of
y + dy/dx where y = F.Dropout. Because the second derivative of Dropout
is 0, this returns y' + 0.
'''
return ref_dropout_backward(dy, mask, p) # + np.zeros(dy.shape)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("p", [p / 10. for p in range(1, 9)] + [0])
def test_dropout_double_backward(p, seed, ctx, func_name):
from nnabla.backward_functions import registry
from nnabla._dropout_workaround import _get_dropout_mask
# dropout_backward depends on Dropout. The dependency must be kept by the
# the execution order.
# 1. Dropout::forward (A mask of dropout is calculated.)
# 2. The forward of dropout_backward (The mask is used.)
# 3. The backward of dropout_backward (The mask is used.)
# 4. Dropout::backward (The mask is used, and then cleared.)
# This order must be kept when using nnabla.grad. In the current
# implementation, GradEndFunction keeps this order.
atol_f = 1e-4
with nn.context_scope(ctx):
rng = np.random.RandomState(seed)
init_x = rng.randn(2, 3, 4).astype(np.float32) * 2
init_dy = rng.randn(*init_x.shape).astype(init_x.dtype)
init_dy_for_grad = rng.randn(*init_x.shape).astype(init_x.dtype)
init_dx = rng.randn(*init_x.shape).astype(init_x.dtype)
init_for_dx2 = rng.randn(*init_x.shape).astype(init_x.dtype)
#
# A. Test mask passing
#
# Skip p=0 because, in the case, dropout does not happen. mask does not
# change the results.
if p != 0:
with pytest.raises(RuntimeError):
x = nn.Variable.from_numpy_array(init_x).apply(need_grad=True)
dy = nn.Variable.from_numpy_array(
init_dy).apply(need_grad=True)
# y = F.dropout(x, p, seed) # Dropout is required to compute mask.
dx = registry['Dropout']([dy, x], p, seed)
# Note: y.forward() is required for dx.forward(). However this test
# is skipped because the random results are randomly matched
# between dx.forward() with and without y.forward(). Therefore
# The test result is not reproduced.
#
# B. Unit test of dropout_backward
#
# Graph construction
x = nn.Variable.from_numpy_array(init_x).apply(need_grad=True)
dy = nn.Variable.from_numpy_array(init_dy).apply(need_grad=True)
y = F.dropout(x, p, seed) # Dropout is required to compute mask.
dx = registry['Dropout']([dy, x], p, seed)
# Execution
y.forward() # Dropout is required to compute mask.
# (y!=0) cannot be used when x includes 0.
mask = _get_dropout_mask(x).d
dx.forward()
# Note: dropout_backward is a composite function. dx.parent is just
# a just composing function like MulScalar. Unit tests using
# dx.parent.forward and dx.parent.backward are meaningless.
# By the same reason, test of accumulation is nonsense.
# Reference
ref_dx = ref_dropout_backward(init_dy, mask, p)
# Test
assert_allclose(dx.d, ref_dx, atol=atol_f,
err_msg="Wrong output values of dropout_backward.")
#
# C. Test the forward of dropout_backward by using nnabla.grad
#
# Graph construction
x = nn.Variable.from_numpy_array(init_x).apply(need_grad=True)
y = F.dropout(x, p, seed)
dx = nn.grad(y, x, grad_outputs=[init_dy_for_grad])[0]
# Note: In NNabla 1.22.0, if use grad_outputs=X, nn.grad separate
# np.ndarray X into small arrays by self._force_list.
# For example, X = np.array([[5, 6], [7, 8]]) is separated
# into [np.array([5, 6]), np.array(7, 8)]. Then Mul2 inserted by
# nn.grad uses np.array([5, 6]) as dy, and broadcasts it to
# the np.array([[5, 6], [5, 6]]). Finally, the forward execution
# is finished, but the result values are wrong.
# Execution
dx.forward(clear_buffer=True)
# Reference
mask = _get_dropout_mask(x).d
ref_dx = ref_dropout_backward(init_dy_for_grad, mask, p)
# Test
assert_allclose(dx.d, ref_dx, atol=atol_f,
err_msg="Wrong output values of Dropout of nn.grad.")
#
# D. Test the backward of dropout_backward by using nnabla.grad
#
# The numerical grad by using scipy.approx_fprime cannot be performed
# because Dropout has randomness and changes the results during
# the repeated forward computation.
# Graph construction
x = nn.Variable.from_numpy_array(init_x).apply(need_grad=True)
y = F.dropout(x, p, seed)
dx = nn.grad(y, x, grad_outputs=[init_dy_for_grad])[0]
y_dx = y + dx # replaceable with F.sink(y, dx, one_input_grad=False)
# Execution
x.g = init_dx # Accumulation
y_dx.forward(clear_no_need_grad=True)
mask = _get_dropout_mask(x).d # Store mask before the clear
y_dx.backward(init_for_dx2, clear_buffer=True)
# Reference
ref_dx = ref_dropout_double_backward(init_for_dx2, mask, p) + init_dx
# Test
assert_allclose(x.g, ref_dx, atol=atol_f,
err_msg="Wrong output values of double backward of "
"Dropout by nn.grad.")
#
# E. Test the backward with and without accumulation
#
# Let dx = dropout_backward(dy). Because dropout_backward is implemented
# as a composite function, dx.parent cannot determine the backward
# function of dropout_backward. Therefore the tests of accumulation
# by using dx.parent(..., accum=[False]) cannot be performed here.
# It is not problem because the accumulation of each composing function
# is expected to be tested independently.
#
# Note: Under the depth-first search of NNabla graph engine,
# GradEndFunction determines the order of accumulation such that
# x.g += (the backward propagation from y.g)
# x.g += (the backward propagation from dx.g).
# So the test D could fail when accumulation of the
# double-backward path fails.
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("p", [0.5])
def test_dropout_grad_dependency(p, seed, ctx, func_name):
from nnabla._dropout_workaround import _get_dropout_mask
# Test whether the memory clearance by grad_depends_on_inputs/outputs does
# something bad during graph execution such as the clearance values which
# is planned to be used. This test is performed by changing the
# inputs/outputs of Dropout to intermediate variables in the same manner of
# nbla_test_utils.py.
atol_f = 1e-4
with nn.context_scope(ctx):
rng = np.random.RandomState(seed)
init_x = rng.randn(2, 3, 4).astype(np.float32) * 2
init_dy_for_grad = rng.randn(*init_x.shape).astype(init_x.dtype)
init_dx = rng.randn(*init_x.shape).astype(init_x.dtype)
init_for_dx2 = rng.randn(*init_x.shape).astype(init_x.dtype)
# Graph construction
x = nn.Variable.from_numpy_array(init_x).apply(need_grad=True)
x_interm = F.identity(x)
y_interm = F.dropout(x_interm, p, seed)
y = F.identity(y_interm)
dx_interm = nn.grad(y, x, grad_outputs=[init_dy_for_grad])[0]
dx = F.identity(dx_interm)
y_dx = y + dx # replaceable with F.sink(y, dx, one_input_grad=False)
# Execution
x.g = init_dx # Accumulation
y_dx.forward(clear_no_need_grad=True)
mask = _get_dropout_mask(x_interm).d # Store mask before the clear
y_dx.backward(init_for_dx2, clear_buffer=True)
# Reference
ref_dx = ref_dropout_double_backward(init_for_dx2, mask, p) + init_dx
# Test
assert_allclose(x.g, ref_dx, atol=atol_f,
err_msg="Wrong output values of double backward of "
"Dropout by nn.grad.")
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [-1, 313])
@pytest.mark.parametrize("p", [0.5] + [0])
def test_dropout_recompute(p, seed, ctx, func_name):
from nbla_test_utils import recomputation_test
rng = np.random.RandomState(0)
x = nn.Variable((2, 3, 4))
func_args = [p, seed]
recomputation_test(rng=rng, func=F.dropout, vinputs=[x],
func_args=func_args, func_kwargs={}, ctx=ctx)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import unittest
import warnings
from numbers import Number
from pathlib import Path
from collections import OrderedDict
import numpy as np
from pymatgen.analysis.phase_diagram import (
CompoundPhaseDiagram,
GrandPotentialPhaseDiagram,
GrandPotPDEntry,
PDEntry,
PDPlotter,
PhaseDiagram,
ReactionDiagram,
TransformedPDEntry,
tet_coord,
triangular_coord,
uniquelines,
BasePhaseDiagram,
)
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import DummySpecies, Element
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.entries.entry_tools import EntrySet
module_dir = Path(__file__).absolute().parent
class PDEntryTest(unittest.TestCase):
def setUp(self):
comp = Composition("LiFeO2")
self.entry = PDEntry(comp, 53)
self.gpentry = GrandPotPDEntry(self.entry, {Element("O"): 1.5})
def test_get_energy(self):
self.assertEqual(self.entry.energy, 53, "Wrong energy!")
self.assertEqual(self.gpentry.energy, 50, "Wrong energy!")
def test_get_chemical_energy(self):
self.assertEqual(self.gpentry.chemical_energy, 3, "Wrong energy!")
def test_get_energy_per_atom(self):
self.assertEqual(self.entry.energy_per_atom, 53.0 / 4, "Wrong energy per atom!")
self.assertEqual(self.gpentry.energy_per_atom, 50.0 / 2, "Wrong energy per atom!")
def test_get_name(self):
self.assertEqual(self.entry.name, "LiFeO2", "Wrong name!")
self.assertEqual(self.gpentry.name, "LiFeO2", "Wrong name!")
def test_get_composition(self):
comp = self.entry.composition
expected_comp = Composition("LiFeO2")
self.assertEqual(comp, expected_comp, "Wrong composition!")
comp = self.gpentry.composition
expected_comp = Composition("LiFe")
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.entry.is_element)
self.assertFalse(self.gpentry.is_element)
def test_to_from_dict(self):
d = self.entry.as_dict()
gpd = self.gpentry.as_dict()
entry = PDEntry.from_dict(d)
self.assertEqual(entry.name, "LiFeO2", "Wrong name!")
self.assertEqual(entry.energy_per_atom, 53.0 / 4)
gpentry = GrandPotPDEntry.from_dict(gpd)
self.assertEqual(gpentry.name, "LiFeO2", "Wrong name!")
self.assertEqual(gpentry.energy_per_atom, 50.0 / 2)
d_anon = d.copy()
del d_anon["name"]
try:
entry = PDEntry.from_dict(d_anon)
except KeyError:
self.fail("Should not need to supply name!")
def test_str(self):
self.assertIsNotNone(str(self.entry))
def test_read_csv(self):
entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.assertEqual(entries.chemsys, {"Li", "Fe", "O"}, "Wrong elements!")
self.assertEqual(len(entries), 490, "Wrong number of entries!")
class TransformedPDEntryTest(unittest.TestCase):
def setUp(self):
comp = Composition("LiFeO2")
entry = PDEntry(comp, 53)
terminal_compositions = ["Li2O", "FeO", "LiO8"]
terminal_compositions = [Composition(c) for c in terminal_compositions]
sp_mapping = OrderedDict()
for i, comp in enumerate(terminal_compositions):
sp_mapping[comp] = DummySpecies("X" + chr(102 + i))
self.transformed_entry = TransformedPDEntry(entry, sp_mapping)
def test_get_energy(self):
self.assertEqual(self.transformed_entry.energy, 53, "Wrong energy!")
self.assertAlmostEqual(self.transformed_entry.original_entry.energy, 53.0, 11)
def test_get_energy_per_atom(self):
self.assertAlmostEqual(self.transformed_entry.energy_per_atom, 53.0 / (23 / 15), 11)
def test_get_name(self):
self.assertEqual(self.transformed_entry.name, "LiFeO2", "Wrong name!")
def test_get_composition(self):
comp = self.transformed_entry.composition
expected_comp = Composition({DummySpecies("Xf"): 14 / 30, DummySpecies("Xg"): 1.0, DummySpecies("Xh"): 2 / 30})
self.assertEqual(comp, expected_comp, "Wrong composition!")
def test_is_element(self):
self.assertFalse(self.transformed_entry.is_element)
def test_to_from_dict(self):
d = self.transformed_entry.as_dict()
entry = TransformedPDEntry.from_dict(d)
self.assertEqual(entry.name, "LiFeO2", "Wrong name!")
self.assertAlmostEqual(entry.energy_per_atom, 53.0 / (23 / 15), 11)
def test_str(self):
self.assertIsNotNone(str(self.transformed_entry))
def test_normalize(self):
norm_entry = self.transformed_entry.normalize(mode="atom")
expected_comp = Composition(
{DummySpecies("Xf"): 7 / 23, DummySpecies("Xg"): 15 / 23, DummySpecies("Xh"): 1 / 23}
)
self.assertEqual(norm_entry.composition, expected_comp, "Wrong composition!")
class PhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = PhaseDiagram(self.entries)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
# Ensure that a bad set of entries raises a PD error. Remove all Li
# from self.entries.
entries = filter(
lambda e: (not e.composition.is_element) or e.composition.elements[0] != Element("Li"),
self.entries,
)
self.assertRaises(ValueError, PhaseDiagram, entries)
def test_dim1(self):
# Ensure that dim 1 PDs can eb generated.
for el in ["Li", "Fe", "O2"]:
entries = [e for e in self.entries if e.composition.reduced_formula == el]
pd = PhaseDiagram(entries)
self.assertEqual(len(pd.stable_entries), 1)
for e in entries:
decomp, ehull = pd.get_decomp_and_e_above_hull(e)
self.assertGreaterEqual(ehull, 0)
plotter = PDPlotter(pd)
lines, stable_entries, unstable_entries = plotter.pd_plot_data
self.assertEqual(lines[0][1], [0, 0])
def test_ordering(self):
# Test sorting of elements
entries = [ComputedEntry(Composition(formula), 0) for formula in ["O", "N", "Fe"]]
pd = PhaseDiagram(entries)
sorted_elements = (Element("Fe"), Element("N"), Element("O"))
self.assertEqual(tuple(pd.elements), sorted_elements)
entries.reverse()
pd = PhaseDiagram(entries)
self.assertEqual(tuple(pd.elements), sorted_elements)
# Test manual specification of order
ordering = [Element(elt_string) for elt_string in ["O", "N", "Fe"]]
pd = PhaseDiagram(entries, elements=ordering)
self.assertEqual(tuple(pd.elements), tuple(ordering))
def test_stable_entries(self):
stable_formulas = [ent.composition.reduced_formula for ent in self.pd.stable_entries]
expected_stable = [
"Fe2O3",
"Li5FeO4",
"LiFeO2",
"Fe3O4",
"Li",
"Fe",
"Li2O",
"O2",
"FeO",
]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, formula + " not in stable entries!")
def test_get_formation_energy(self):
stable_formation_energies = {
ent.composition.reduced_formula: self.pd.get_form_energy(ent) for ent in self.pd.stable_entries
}
expected_formation_energies = {
"Li5FeO4": -164.8117344866667,
"Li2O2": -14.119232793333332,
"Fe2O3": -16.574164339999996,
"FeO": -5.7141519966666685,
"Li": 0.0,
"LiFeO2": -7.732752316666666,
"Li2O": -6.229303868333332,
"Fe": 0.0,
"Fe3O4": -22.565714456666683,
"Li2FeO3": -45.67166036000002,
"O2": 0.0,
}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula], 7)
def test_all_entries_hulldata(self):
self.assertEqual(len(self.pd.all_entries_hulldata), 490)
def test_planar_inputs(self):
e1 = PDEntry("H", 0)
e2 = PDEntry("He", 0)
e3 = PDEntry("Li", 0)
e4 = PDEntry("Be", 0)
e5 = PDEntry("B", 0)
e6 = PDEntry("Rb", 0)
pd = PhaseDiagram([e1, e2, e3, e4, e5, e6], map(Element, ["Rb", "He", "B", "Be", "Li", "H"]))
self.assertEqual(len(pd.facets), 1)
def test_str(self):
self.assertIsNotNone(str(self.pd))
def test_get_e_above_hull(self):
for entry in self.pd.stable_entries:
self.assertLess(
self.pd.get_e_above_hull(entry),
1e-11,
"Stable entries should have e above hull of zero!",
)
for entry in self.pd.all_entries:
if entry not in self.pd.stable_entries:
e_ah = self.pd.get_e_above_hull(entry)
self.assertTrue(isinstance(e_ah, Number))
self.assertGreaterEqual(e_ah, 0)
def test_get_equilibrium_reaction_energy(self):
for entry in self.pd.stable_entries:
self.assertLessEqual(
self.pd.get_equilibrium_reaction_energy(entry),
0,
"Stable entries should have negative equilibrium reaction energy!",
)
def test_get_phase_separation_energy(self):
for entry in self.pd.unstable_entries:
if entry.composition.fractional_composition not in [
e.composition.fractional_composition for e in self.pd.stable_entries
]:
self.assertGreaterEqual(
self.pd.get_phase_separation_energy(entry),
0,
"Unstable entries should have positive decomposition energy!",
)
else:
if entry.is_element:
el_ref = self.pd.el_refs[entry.composition.elements[0]]
e_d = entry.energy_per_atom - el_ref.energy_per_atom
self.assertAlmostEqual(self.pd.get_phase_separation_energy(entry), e_d, 7)
# NOTE the remaining materials would require explicit tests as they
# could be either positive or negative
pass
for entry in self.pd.stable_entries:
if entry.composition.is_element:
self.assertEqual(
self.pd.get_phase_separation_energy(entry),
0,
"Stable elemental entries should have decomposition energy of zero!",
)
else:
self.assertLessEqual(
self.pd.get_phase_separation_energy(entry),
0,
"Stable entries should have negative decomposition energy!",
)
self.assertAlmostEqual(
self.pd.get_phase_separation_energy(entry, stable_only=True),
self.pd.get_equilibrium_reaction_energy(entry),
7,
(
"Using `stable_only=True` should give decomposition energy equal to "
"equilibrium reaction energy!"
),
)
# Test that we get correct behaviour with a polymorph
toy_entries = {
"Li": 0.0,
"Li2O": -5,
"LiO2": -4,
"O2": 0.0,
}
toy_pd = PhaseDiagram([PDEntry(c, e) for c, e in toy_entries.items()])
# stable entry
self.assertAlmostEqual(
toy_pd.get_phase_separation_energy(PDEntry("Li2O", -5)),
-1.0,
7,
)
# polymorph
self.assertAlmostEqual(
toy_pd.get_phase_separation_energy(PDEntry("Li2O", -4)),
-2.0 / 3.0,
7,
)
# Test that the method works for novel entries
novel_stable_entry = PDEntry("Li5FeO4", -999)
self.assertLess(
self.pd.get_phase_separation_energy(novel_stable_entry),
0,
"Novel stable entries should have negative decomposition energy!",
)
novel_unstable_entry = PDEntry("Li5FeO4", 999)
self.assertGreater(
self.pd.get_phase_separation_energy(novel_unstable_entry),
0,
"Novel unstable entries should have positive decomposition energy!",
)
duplicate_entry = PDEntry("Li2O", -14.31361175)
scaled_dup_entry = PDEntry("Li4O2", -14.31361175 * 2)
stable_entry = [e for e in self.pd.stable_entries if e.name == "Li2O"][0]
self.assertEqual(
self.pd.get_phase_separation_energy(duplicate_entry),
self.pd.get_phase_separation_energy(stable_entry),
"Novel duplicates of stable entries should have same decomposition energy!",
)
self.assertEqual(
self.pd.get_phase_separation_energy(scaled_dup_entry),
self.pd.get_phase_separation_energy(stable_entry),
"Novel scaled duplicates of stable entries should have same decomposition energy!",
)
def test_get_decomposition(self):
for entry in self.pd.stable_entries:
self.assertEqual(
len(self.pd.get_decomposition(entry.composition)),
1,
"Stable composition should have only 1 decomposition!",
)
dim = len(self.pd.elements)
for entry in self.pd.all_entries:
ndecomp = len(self.pd.get_decomposition(entry.composition))
self.assertTrue(
ndecomp > 0 and ndecomp <= dim,
"The number of decomposition phases can at most be equal to the number of components.",
)
# Just to test decomp for a ficitious composition
ansdict = {
entry.composition.formula: amt for entry, amt in self.pd.get_decomposition(Composition("Li3Fe7O11")).items()
}
expected_ans = {
"Fe2 O2": 0.0952380952380949,
"Li1 Fe1 O2": 0.5714285714285714,
"Fe6 O8": 0.33333333333333393,
}
for k, v in expected_ans.items():
self.assertAlmostEqual(ansdict[k], v, 7)
def test_get_transition_chempots(self):
for el in self.pd.elements:
self.assertLessEqual(len(self.pd.get_transition_chempots(el)), len(self.pd.facets))
def test_get_element_profile(self):
for el in self.pd.elements:
for entry in self.pd.stable_entries:
if not (entry.composition.is_element):
self.assertLessEqual(
len(self.pd.get_element_profile(el, entry.composition)),
len(self.pd.facets),
)
expected = [
{
"evolution": 1.0,
"chempot": -4.2582781416666666,
"reaction": "Li2O + 0.5 O2 -> Li2O2",
},
{
"evolution": 0,
"chempot": -5.0885906699999968,
"reaction": "Li2O -> Li2O",
},
{
"evolution": -1.0,
"chempot": -10.487582010000001,
"reaction": "Li2O -> 2 Li + 0.5 O2",
},
]
result = self.pd.get_element_profile(Element("O"), Composition("Li2O"))
for d1, d2 in zip(expected, result):
self.assertAlmostEqual(d1["evolution"], d2["evolution"])
self.assertAlmostEqual(d1["chempot"], d2["chempot"])
self.assertEqual(d1["reaction"], str(d2["reaction"]))
def test_get_get_chempot_range_map(self):
elements = [el for el in self.pd.elements if el.symbol != "Fe"]
self.assertEqual(len(self.pd.get_chempot_range_map(elements)), 10)
def test_getmu_vertices_stability_phase(self):
results = self.pd.getmu_vertices_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(len(results), 6)
test_equality = False
for c in results:
if (
abs(c[Element("O")] + 7.115) < 1e-2
and abs(c[Element("Fe")] + 6.596) < 1e-2
and abs(c[Element("Li")] + 3.931) < 1e-2
):
test_equality = True
self.assertTrue(test_equality, "there is an expected vertex missing in the list")
def test_getmu_range_stability_phase(self):
results = self.pd.get_chempot_range_stability_phase(Composition("LiFeO2"), Element("O"))
self.assertAlmostEqual(results[Element("O")][1], -4.4501812249999997)
self.assertAlmostEqual(results[Element("Fe")][0], -6.5961470999999996)
self.assertAlmostEqual(results[Element("Li")][0], -3.6250022625000007)
def test_get_hull_energy(self):
for entry in self.pd.stable_entries:
h_e = self.pd.get_hull_energy(entry.composition)
self.assertAlmostEqual(h_e, entry.energy)
n_h_e = self.pd.get_hull_energy(entry.composition.fractional_composition)
self.assertAlmostEqual(n_h_e, entry.energy_per_atom)
def test_1d_pd(self):
entry = PDEntry("H", 0)
pd = PhaseDiagram([entry])
decomp, e = pd.get_decomp_and_e_above_hull(PDEntry("H", 1))
self.assertAlmostEqual(e, 1)
self.assertAlmostEqual(decomp[entry], 1.0)
def test_get_critical_compositions_fractional(self):
c1 = Composition("Fe2O3").fractional_composition
c2 = Composition("Li3FeO4").fractional_composition
c3 = Composition("Li2O").fractional_composition
comps = self.pd.get_critical_compositions(c1, c2)
expected = [
Composition("Fe2O3").fractional_composition,
Composition("Li0.3243244Fe0.1621621O0.51351349"),
Composition("Li3FeO4").fractional_composition,
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [
Composition("Fe0.4O0.6"),
Composition("LiFeO2").fractional_composition,
Composition("Li5FeO4").fractional_composition,
Composition("Li2O").fractional_composition,
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
def test_get_critical_compositions(self):
c1 = Composition("Fe2O3")
c2 = Composition("Li3FeO4")
c3 = Composition("Li2O")
comps = self.pd.get_critical_compositions(c1, c2)
expected = [
Composition("Fe2O3"),
Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4,
Composition("Li3FeO4"),
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
comps = self.pd.get_critical_compositions(c1, c3)
expected = [
Composition("Fe2O3"),
Composition("LiFeO2"),
Composition("Li5FeO4") / 3,
Composition("Li2O"),
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# Don't fail silently if input compositions aren't in phase diagram
# Can be very confusing if you're working with a GrandPotentialPD
self.assertRaises(
ValueError,
self.pd.get_critical_compositions,
Composition("Xe"),
Composition("Mn"),
)
# For the moment, should also fail even if compositions are in the gppd
# because it isn't handled properly
gppd = GrandPotentialPhaseDiagram(self.pd.all_entries, {"Xe": 1}, self.pd.elements + [Element("Xe")])
self.assertRaises(
ValueError,
gppd.get_critical_compositions,
Composition("Fe2O3"),
Composition("Li3FeO4Xe"),
)
# check that the function still works though
comps = gppd.get_critical_compositions(c1, c2)
expected = [
Composition("Fe2O3"),
Composition("Li0.3243244Fe0.1621621O0.51351349") * 7.4,
Composition("Li3FeO4"),
]
for crit, exp in zip(comps, expected):
self.assertTrue(crit.almost_equals(exp, rtol=0, atol=1e-5))
# case where the endpoints are identical
self.assertEqual(self.pd.get_critical_compositions(c1, c1 * 2), [c1, c1 * 2])
def test_get_composition_chempots(self):
c1 = Composition("Fe3.1O4")
c2 = Composition("Fe3.2O4.1Li0.01")
e1 = self.pd.get_hull_energy(c1)
e2 = self.pd.get_hull_energy(c2)
cp = self.pd.get_composition_chempots(c1)
calc_e2 = e1 + sum(cp[k] * v for k, v in (c2 - c1).items())
self.assertAlmostEqual(e2, calc_e2)
def test_get_all_chempots(self):
c1 = Composition("Fe3.1O4")
c2 = Composition("FeO")
cp1 = self.pd.get_all_chempots(c1)
cpresult = {
Element("Li"): -4.077061954999998,
Element("Fe"): -6.741593864999999,
Element("O"): -6.969907375000003,
}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp1["Fe3O4-FeO-LiFeO2"][elem], energy)
cp2 = self.pd.get_all_chempots(c2)
cpresult = {
Element("O"): -7.115354140000001,
Element("Fe"): -6.5961471,
Element("Li"): -3.9316151899999987,
}
for elem, energy in cpresult.items():
self.assertAlmostEqual(cp2["FeO-LiFeO2-Fe"][elem], energy)
def test_to_from_dict(self):
# test round-trip for other entry types such as ComputedEntry
entry = ComputedEntry("H", 0.0, 0.0, entry_id="test")
pd = PhaseDiagram([entry])
d = pd.as_dict()
pd_roundtrip = PhaseDiagram.from_dict(d)
self.assertEqual(pd.all_entries[0].entry_id, pd_roundtrip.all_entries[0].entry_id)
class GrandPotentialPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -5})
self.pd6 = GrandPotentialPhaseDiagram(self.entries, {Element("O"): -6})
def test_stable_entries(self):
stable_formulas = [ent.original_entry.composition.reduced_formula for ent in self.pd.stable_entries]
expected_stable = ["Li5FeO4", "Li2FeO3", "LiFeO2", "Fe2O3", "Li2O2"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas, "{} not in stable entries!".format(formula))
self.assertEqual(len(self.pd6.stable_entries), 4)
def test_get_formation_energy(self):
stable_formation_energies = {
ent.original_entry.composition.reduced_formula: self.pd.get_form_energy(ent)
for ent in self.pd.stable_entries
}
expected_formation_energies = {
"Fe2O3": 0.0,
"Li5FeO4": -5.305515040000046,
"Li2FeO3": -2.3424741500000152,
"LiFeO2": -0.43026396250000154,
"Li2O2": 0.0,
}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(
energy,
stable_formation_energies[formula],
7,
"Calculated formation for {} is not correct!".format(formula),
)
def test_str(self):
self.assertIsNotNone(str(self.pd))
class BasePhaseDiagramTest(PhaseDiagramTest):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = BasePhaseDiagram.from_entries(self.entries)
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
pass
def test_as_dict_from_dict(self):
dd = self.pd.as_dict()
new_pd = BasePhaseDiagram.from_dict(dd)
new_dd = new_pd.as_dict()
self.assertEqual(new_dd, dd)
class CompoundPhaseDiagramTest(unittest.TestCase):
def setUp(self):
self.entries = EntrySet.from_csv(str(module_dir / "pdentries_test.csv"))
self.pd = CompoundPhaseDiagram(self.entries, [Composition("Li2O"), Composition("Fe2O3")])
def test_stable_entries(self):
stable_formulas = [ent.name for ent in self.pd.stable_entries]
expected_stable = ["Fe2O3", "Li5FeO4", "LiFeO2", "Li2O"]
for formula in expected_stable:
self.assertTrue(formula in stable_formulas)
def test_get_formation_energy(self):
stable_formation_energies = {ent.name: self.pd.get_form_energy(ent) for ent in self.pd.stable_entries}
expected_formation_energies = {
"Li5FeO4": -7.0773284399999739,
"Fe2O3": 0,
"LiFeO2": -0.47455929750000081,
"Li2O": 0,
}
for formula, energy in expected_formation_energies.items():
self.assertAlmostEqual(energy, stable_formation_energies[formula], 7)
def test_str(self):
self.assertIsNotNone(str(self.pd))
class ReactionDiagramTest(unittest.TestCase):
def setUp(self):
module_dir = os.path.dirname(os.path.abspath(__file__))
self.entries = list(EntrySet.from_csv(os.path.join(module_dir, "reaction_entries_test.csv")).entries)
for e in self.entries:
if e.composition.reduced_formula == "VPO5":
entry1 = e
elif e.composition.reduced_formula == "H4(CO)3":
entry2 = e
self.rd = ReactionDiagram(entry1=entry1, entry2=entry2, all_entries=self.entries[2:])
def test_get_compound_pd(self):
self.rd.get_compound_pd()
def test_formula(self):
for e in self.rd.rxn_entries:
self.assertIn(Element.V, e.composition)
self.assertIn(Element.O, e.composition)
self.assertIn(Element.C, e.composition)
self.assertIn(Element.P, e.composition)
self.assertIn(Element.H, e.composition)
# formed_formula = [e.composition.reduced_formula for e in
# self.rd.rxn_entries]
# expected_formula = [
# 'V0.12707182P0.12707182H0.0441989C0.03314917O0.66850829',
# 'V0.125P0.125H0.05C0.0375O0.6625',
# 'V0.12230216P0.12230216H0.05755396C0.04316547O0.65467626',
# 'V0.11340206P0.11340206H0.08247423C0.06185567O0.62886598',
# 'V0.11267606P0.11267606H0.08450704C0.06338028O0.62676056',
# 'V0.11229947P0.11229947H0.0855615C0.06417112O0.62566845',
# 'V0.09677419P0.09677419H0.12903226C0.09677419O0.58064516',
# 'V0.05882353P0.05882353H0.23529412C0.17647059O0.47058824',
# 'V0.04225352P0.04225352H0.28169014C0.21126761O0.42253521']
#
# for formula in expected_formula:
# self.assertTrue(formula in formed_formula, "%s not in %s" % (formed_formula, expected_formula))
class PDPlotterTest(unittest.TestCase):
def setUp(self):
entries = list(EntrySet.from_csv(os.path.join(module_dir, "pdentries_test.csv")))
self.pd_ternary = PhaseDiagram(entries)
self.plotter_ternary_mpl = PDPlotter(self.pd_ternary, backend="matplotlib")
self.plotter_ternary_plotly = PDPlotter(self.pd_ternary, backend="plotly")
entrieslio = [e for e in entries if "Fe" not in e.composition]
self.pd_binary = PhaseDiagram(entrieslio)
self.plotter_binary_mpl = PDPlotter(self.pd_binary, backend="matplotlib")
self.plotter_binary_plotly = PDPlotter(self.pd_binary, backend="plotly")
entries.append(PDEntry("C", 0))
self.pd_quaternary = PhaseDiagram(entries)
self.plotter_quaternary_mpl = PDPlotter(self.pd_quaternary, backend="matplotlib")
self.plotter_quaternary_plotly = PDPlotter(self.pd_quaternary, backend="plotly")
def test_pd_plot_data(self):
(lines, labels, unstable_entries) = self.plotter_ternary_mpl.pd_plot_data
self.assertEqual(len(lines), 22)
self.assertEqual(
len(labels),
len(self.pd_ternary.stable_entries),
"Incorrect number of lines generated!",
)
self.assertEqual(
len(unstable_entries),
len(self.pd_ternary.all_entries) - len(self.pd_ternary.stable_entries),
"Incorrect number of lines generated!",
)
(lines, labels, unstable_entries) = self.plotter_quaternary_mpl.pd_plot_data
self.assertEqual(len(lines), 33)
self.assertEqual(len(labels), len(self.pd_quaternary.stable_entries))
self.assertEqual(
len(unstable_entries),
len(self.pd_quaternary.all_entries) - len(self.pd_quaternary.stable_entries),
)
(lines, labels, unstable_entries) = self.plotter_binary_mpl.pd_plot_data
self.assertEqual(len(lines), 3)
self.assertEqual(len(labels), len(self.pd_binary.stable_entries))
def test_mpl_plots(self):
# Some very basic ("non")-tests. Just to make sure the methods are callable.
self.plotter_binary_mpl.get_plot().close()
self.plotter_ternary_mpl.get_plot().close()
self.plotter_quaternary_mpl.get_plot().close()
self.plotter_ternary_mpl.get_contour_pd_plot().close()
self.plotter_ternary_mpl.get_chempot_range_map_plot([Element("Li"), Element("O")]).close()
self.plotter_ternary_mpl.plot_element_profile(Element("O"), Composition("Li2O")).close()
def test_plotly_plots(self):
# Also very basic tests. Ensures callability and 2D vs 3D properties.
self.plotter_binary_plotly.get_plot()
self.plotter_ternary_plotly.get_plot()
self.plotter_quaternary_plotly.get_plot()
class UtilityFunctionTest(unittest.TestCase):
def test_unique_lines(self):
testdata = [
[5, 53, 353],
[399, 20, 52],
[399, 400, 20],
[13, 399, 52],
[21, 400, 353],
[393, 5, 353],
[400, 393, 353],
[393, 400, 399],
[393, 13, 5],
[13, 393, 399],
[400, 17, 20],
[21, 17, 400],
]
expected_ans = {
(5, 393),
(21, 353),
(353, 400),
(5, 13),
(17, 20),
(21, 400),
(17, 400),
(52, 399),
(393, 399),
(20, 52),
(353, 393),
(5, 353),
(5, 53),
(13, 399),
(393, 400),
(13, 52),
(53, 353),
(17, 21),
(13, 393),
(20, 399),
(399, 400),
(20, 400),
}
self.assertEqual(uniquelines(testdata), expected_ans)
def test_triangular_coord(self):
coord = [0.5, 0.5]
coord = triangular_coord(coord)
self.assertTrue(np.allclose(coord, [0.75, 0.4330127]))
def test_tet_coord(self):
coord = [0.5, 0.5, 0.5]
coord = tet_coord(coord)
self.assertTrue(np.allclose(coord, [1.0, 0.57735027, 0.40824829]))
if __name__ == "__main__":
unittest.main()
| |
from unittest.mock import ANY
from uuid import uuid4
import graphene
import pytest
from graphene.utils.str_converters import to_camel_case
from saleor.product.error_codes import ProductErrorCode
from saleor.product.models import ProductVariant
from saleor.product.utils.attributes import associate_attribute_values_to_instance
from tests.api.utils import get_graphql_content
def test_fetch_variant(staff_api_client, product, permission_manage_products):
query = """
query ProductVariantDetails($id: ID!) {
productVariant(id: $id) {
id
attributes {
attribute {
id
name
slug
values {
id
name
slug
}
}
values {
id
name
slug
}
}
costPrice {
currency
amount
}
images {
id
}
name
priceOverride {
currency
amount
}
product {
id
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(query, variables)
content = get_graphql_content(response)
data = content["data"]["productVariant"]
assert data["name"] == variant.name
def test_create_variant(
staff_api_client, product, product_type, permission_manage_products
):
query = """
mutation createVariant (
$productId: ID!,
$sku: String!,
$priceOverride: Decimal,
$costPrice: Decimal,
$quantity: Int!,
$attributes: [AttributeValueInput]!,
$weight: WeightScalar,
$trackInventory: Boolean!) {
productVariantCreate(
input: {
product: $productId,
sku: $sku,
priceOverride: $priceOverride,
costPrice: $costPrice,
quantity: $quantity,
attributes: $attributes,
trackInventory: $trackInventory,
weight: $weight
}) {
productErrors {
field
message
}
productVariant {
name
sku
attributes {
attribute {
slug
}
values {
slug
}
}
quantity
priceOverride {
currency
amount
localized
}
costPrice {
currency
amount
localized
}
weight {
value
unit
}
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
price_override = 1.32
cost_price = 3.22
quantity = 10
weight = 10.22
variant_slug = product_type.variant_attributes.first().slug
variant_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().pk
)
variant_value = "test-value"
variables = {
"productId": product_id,
"sku": sku,
"quantity": quantity,
"costPrice": cost_price,
"priceOverride": price_override,
"weight": weight,
"attributes": [{"id": variant_id, "values": [variant_value]}],
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)["data"]["productVariantCreate"]
assert not content["productErrors"]
data = content["productVariant"]
assert data["name"] == variant_value
assert data["quantity"] == quantity
assert data["costPrice"]["amount"] == cost_price
assert data["priceOverride"]["amount"] == price_override
assert data["sku"] == sku
assert data["attributes"][0]["attribute"]["slug"] == variant_slug
assert data["attributes"][0]["values"][0]["slug"] == variant_value
assert data["weight"]["unit"] == "kg"
assert data["weight"]["value"] == weight
def test_create_product_variant_not_all_attributes(
staff_api_client, product, product_type, color_attribute, permission_manage_products
):
query = """
mutation createVariant (
$productId: ID!,
$sku: String!,
$attributes: [AttributeValueInput]!) {
productVariantCreate(
input: {
product: $productId,
sku: $sku,
attributes: $attributes
}) {
productErrors {
field
code
message
}
}
}
"""
product_id = graphene.Node.to_global_id("Product", product.pk)
sku = "1"
variant_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().pk
)
variant_value = "test-value"
product_type.variant_attributes.add(color_attribute)
variables = {
"productId": product_id,
"sku": sku,
"attributes": [{"id": variant_id, "values": [variant_value]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["productVariantCreate"]["productErrors"]
assert content["data"]["productVariantCreate"]["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.REQUIRED.name,
"message": ANY,
}
assert not product.variants.filter(sku=sku).exists()
def test_create_product_variant_duplicated_attributes(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
query = """
mutation createVariant (
$productId: ID!,
$sku: String!,
$attributes: [AttributeValueInput]!
) {
productVariantCreate(
input: {
product: $productId,
sku: $sku,
attributes: $attributes
}) {
productErrors {
field
code
message
}
}
}
"""
product = product_with_variant_with_two_attributes
product_id = graphene.Node.to_global_id("Product", product.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
sku = str(uuid4())[:12]
variables = {
"productId": product_id,
"sku": sku,
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["small"]},
],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
assert content["data"]["productVariantCreate"]["productErrors"]
assert content["data"]["productVariantCreate"]["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.UNIQUE.name,
"message": ANY,
}
assert not product.variants.filter(sku=sku).exists()
def test_create_product_variant_update_with_new_attributes(
staff_api_client, permission_manage_products, product, size_attribute
):
query = """
mutation VariantUpdate(
$id: ID!
$attributes: [AttributeValueInput]
$costPrice: Decimal
$priceOverride: Decimal
$sku: String
$quantity: Int
$trackInventory: Boolean!
) {
productVariantUpdate(
id: $id
input: {
attributes: $attributes
costPrice: $costPrice
priceOverride: $priceOverride
sku: $sku
quantity: $quantity
trackInventory: $trackInventory
}
) {
errors {
field
message
}
productVariant {
id
attributes {
attribute {
id
name
slug
values {
id
name
slug
__typename
}
__typename
}
value {
id
name
slug
__typename
}
__typename
}
}
}
}
"""
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variant_id = graphene.Node.to_global_id(
"ProductVariant", product.variants.first().pk
)
variables = {
"attributes": [{"id": size_attribute_id, "values": ["XXXL"]}],
"costPrice": 10,
"id": variant_id,
"priceOverride": 0,
"quantity": 4,
"sku": "21599567",
"trackInventory": True,
}
data = get_graphql_content(
staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
)["data"]["productVariantUpdate"]
assert not data["errors"]
assert data["productVariant"]["id"] == variant_id
attributes = data["productVariant"]["attributes"]
assert len(attributes) == 1
assert attributes[0]["attribute"]["id"] == size_attribute_id
assert attributes[0]["value"]["name"] == "XXXL"
assert attributes[0]["value"]["slug"] == "xxxl"
def test_update_product_variant(staff_api_client, product, permission_manage_products):
query = """
mutation updateVariant (
$id: ID!,
$sku: String!,
$costPrice: Decimal,
$quantity: Int!,
$trackInventory: Boolean!) {
productVariantUpdate(
id: $id,
input: {
sku: $sku,
costPrice: $costPrice,
quantity: $quantity,
trackInventory: $trackInventory
}) {
productVariant {
name
sku
quantity
costPrice {
currency
amount
localized
}
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
sku = "test sku"
cost_price = 3.3
quantity = 123
variables = {
"id": variant_id,
"sku": sku,
"quantity": quantity,
"costPrice": cost_price,
"trackInventory": True,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]["productVariant"]
assert data["name"] == variant.name
assert data["quantity"] == quantity
assert data["costPrice"]["amount"] == cost_price
assert data["sku"] == sku
@pytest.mark.parametrize("field", ("cost_price", "price_override"))
def test_update_product_variant_unset_amounts(
staff_api_client, product, permission_manage_products, field
):
"""Ensure setting nullable amounts to null is properly handled
(setting the amount to none) and doesn't override the currency.
"""
query = """
mutation updateVariant (
$id: ID!,
$sku: String!,
$costPrice: Decimal,
$priceOverride: Decimal) {
productVariantUpdate(
id: $id,
input: {
sku: $sku,
costPrice: $costPrice,
priceOverride: $priceOverride
}) {
productVariant {
name
sku
quantity
costPrice {
currency
amount
localized
}
priceOverride {
currency
amount
localized
}
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
sku = variant.sku
camel_case_field_name = to_camel_case(field)
variables = {"id": variant_id, "sku": sku, camel_case_field_name: None}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
assert variant.currency is not None
assert getattr(variant, field) is None
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]["productVariant"]
assert data[camel_case_field_name] is None
QUERY_UPDATE_VARIANT_ATTRIBUTES = """
mutation updateVariant (
$id: ID!,
$sku: String,
$attributes: [AttributeValueInput]!) {
productVariantUpdate(
id: $id,
input: {
sku: $sku,
attributes: $attributes
}) {
errors {
field
message
}
productErrors {
field
code
}
}
}
"""
def test_update_product_variant_not_all_attributes(
staff_api_client, product, product_type, color_attribute, permission_manage_products
):
"""Ensures updating a variant with missing attributes (all attributes must
be provided) raises an error. We expect the color attribute
to be flagged as missing."""
query = QUERY_UPDATE_VARIANT_ATTRIBUTES
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
sku = "test sku"
attr_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().id
)
variant_value = "test-value"
product_type.variant_attributes.add(color_attribute)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [{"id": attr_id, "values": [variant_value]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
assert len(content["data"]["productVariantUpdate"]["errors"]) == 1
assert content["data"]["productVariantUpdate"]["errors"][0] == {
"field": "attributes",
"message": "All attributes must take a value",
}
assert not product.variants.filter(sku=sku).exists()
def test_update_product_variant_with_current_attribut(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["small"]},
],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant.refresh_from_db()
assert variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
def test_update_product_variant_with_new_attribute(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
variant = product.variants.first()
sku = str(uuid4())[:12]
assert not variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variables = {
"id": variant_id,
"sku": sku,
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["big"]},
],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert not data["errors"]
variant.refresh_from_db()
assert variant.sku == sku
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "big"
def test_update_product_variant_with_duplicated_attribute(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
variant = product.variants.first()
variant2 = product.variants.first()
variant2.pk = None
variant2.sku = str(uuid4())[:12]
variant2.save()
associate_attribute_values_to_instance(
variant2, color_attribute, color_attribute.values.last()
)
associate_attribute_values_to_instance(
variant2, size_attribute, size_attribute.values.last()
)
assert variant.attributes.first().values.first().slug == "red"
assert variant.attributes.last().values.first().slug == "small"
assert variant2.attributes.first().values.first().slug == "blue"
assert variant2.attributes.last().values.first().slug == "big"
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variables = {
"id": variant_id,
"attributes": [
{"id": color_attribute_id, "values": ["blue"]},
{"id": size_attribute_id, "values": ["big"]},
],
}
response = staff_api_client.post_graphql(
QUERY_UPDATE_VARIANT_ATTRIBUTES,
variables,
permissions=[permission_manage_products],
)
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert data["productErrors"][0] == {
"field": "attributes",
"code": ProductErrorCode.UNIQUE.name,
}
@pytest.mark.parametrize(
"values, message",
(
([], "size expects a value but none were given"),
(["one", "two"], "A variant attribute cannot take more than one value"),
([" "], "Attribute values cannot be blank"),
),
)
def test_update_product_variant_requires_values(
staff_api_client, variant, product_type, permission_manage_products, values, message
):
"""Ensures updating a variant with invalid values raise an error.
- No values
- Blank value
- More than one value
"""
sku = "updated"
query = QUERY_UPDATE_VARIANT_ATTRIBUTES
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
attr_id = graphene.Node.to_global_id(
"Attribute", product_type.variant_attributes.first().id
)
variables = {
"id": variant_id,
"attributes": [{"id": attr_id, "values": values}],
"sku": sku,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
variant.refresh_from_db()
content = get_graphql_content(response)
assert (
len(content["data"]["productVariantUpdate"]["errors"]) == 1
), f"expected: {message}"
assert content["data"]["productVariantUpdate"]["errors"][0] == {
"field": "attributes",
"message": message,
}
assert not variant.product.variants.filter(sku=sku).exists()
def test_delete_variant(staff_api_client, product, permission_manage_products):
query = """
mutation variantDelete($id: ID!) {
productVariantDelete(id: $id) {
productVariant {
sku
id
}
}
}
"""
variant = product.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["productVariant"]["sku"] == variant.sku
with pytest.raises(variant._meta.model.DoesNotExist):
variant.refresh_from_db()
def _fetch_all_variants(client, permissions=None):
query = """
query fetchAllVariants {
productVariants(first: 10) {
totalCount
edges {
node {
id
}
}
}
}
"""
response = client.post_graphql(
query, {}, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariants"]
def test_fetch_all_variants_staff_user(
staff_api_client, unavailable_product_with_variant, permission_manage_products
):
data = _fetch_all_variants(
staff_api_client, permissions=[permission_manage_products]
)
variant = unavailable_product_with_variant.variants.first()
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
assert data["totalCount"] == 1
assert data["edges"][0]["node"]["id"] == variant_id
def test_fetch_all_variants_customer(user_api_client, unavailable_product_with_variant):
data = _fetch_all_variants(user_api_client)
assert data["totalCount"] == 0
def test_fetch_all_variants_anonymous_user(
api_client, unavailable_product_with_variant
):
data = _fetch_all_variants(api_client)
assert data["totalCount"] == 0
def _fetch_variant(client, variant, permissions=None):
query = """
query ProductVariantDetails($variantId: ID!) {
productVariant(id: $variantId) {
id
product {
id
}
}
}
"""
variables = {"variantId": graphene.Node.to_global_id("ProductVariant", variant.id)}
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariant"]
def test_fetch_unpublished_variant_staff_user(
staff_api_client, unavailable_product_with_variant, permission_manage_products
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_variant(
staff_api_client, variant, permissions=[permission_manage_products]
)
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
product_id = graphene.Node.to_global_id(
"Product", unavailable_product_with_variant.pk
)
assert data["id"] == variant_id
assert data["product"]["id"] == product_id
def test_fetch_unpublished_variant_customer(
user_api_client, unavailable_product_with_variant
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_variant(user_api_client, variant)
assert data is None
def test_fetch_unpublished_variant_anonymous_user(
api_client, unavailable_product_with_variant
):
variant = unavailable_product_with_variant.variants.first()
data = _fetch_variant(api_client, variant)
assert data is None
PRODUCT_VARIANT_BULK_CREATE_MUTATION = """
mutation ProductVariantBulkCreate(
$variants: [ProductVariantBulkCreateInput]!, $productId: ID!
) {
productVariantBulkCreate(variants: $variants, product: $productId) {
bulkProductErrors {
field
message
code
index
}
productVariants{
id
sku
}
count
}
}
"""
def test_product_variant_bulk_create_by_attribute_id(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
attribute_value_count = size_attribute.values.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
attribut_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
attribute_value = size_attribute.values.last()
sku = str(uuid4())[:12]
variants = [
{
"sku": sku,
"quantity": 1000,
"costPrice": None,
"priceOverride": None,
"weight": 2.5,
"trackInventory": True,
"attributes": [{"id": attribut_id, "values": [attribute_value.name]}],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert product_variant_count + 1 == ProductVariant.objects.count()
assert attribute_value_count == size_attribute.values.count()
product_variant = ProductVariant.objects.get(sku=sku)
assert not product_variant.cost_price
assert not product_variant.price_override
def test_product_variant_bulk_create_empty_attribute(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
variants = [{"sku": str(uuid4())[:12], "attributes": []}]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert product_variant_count + 1 == ProductVariant.objects.count()
def test_product_variant_bulk_create_with_new_attribute_value(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
attribute_value_count = size_attribute.values.count()
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
product_id = graphene.Node.to_global_id("Product", product.pk)
attribute_value = size_attribute.values.last()
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": [attribute_value.name]}],
},
{
"sku": str(uuid4())[:12],
"attributes": [{"id": size_attribute_id, "values": ["Test-attribute"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 2
assert product_variant_count + 2 == ProductVariant.objects.count()
assert attribute_value_count + 1 == size_attribute.values.count()
def test_product_variant_bulk_create_negative_quantity(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
variants = [
{
"sku": str(uuid4())[:12],
"quantity": -1000,
"attributes": [{"id": size_attribute_id, "values": ["Test-value"]}],
},
{
"sku": str(uuid4())[:12],
"quantity": 100,
"attributes": [{"id": size_attribute_id, "values": ["Test-value2"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "quantity"
assert error["code"] == ProductErrorCode.INVALID.name
assert error["index"] == 0
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_duplicated_sku(
staff_api_client,
product,
product_with_default_variant,
size_attribute,
permission_manage_products,
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
sku = product.variants.first().sku
sku2 = product_with_default_variant.variants.first().sku
assert not sku == sku2
variants = [
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value"]}],
},
{
"sku": sku2,
"attributes": [{"id": size_attribute_id, "values": ["Test-valuee"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 2
errors = data["bulkProductErrors"]
for index, error in enumerate(errors):
assert error["field"] == "sku"
assert error["code"] == ProductErrorCode.UNIQUE.name
assert error["index"] == index
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_duplicated_sku_in_input(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
sku = str(uuid4())[:12]
variants = [
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value"]}],
},
{
"sku": sku,
"attributes": [{"id": size_attribute_id, "values": ["Test-value2"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "sku"
assert error["code"] == ProductErrorCode.UNIQUE.name
assert error["index"] == 1
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_many_errors(
staff_api_client, product, size_attribute, permission_manage_products
):
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.pk)
non_existent_attribute_pk = 0
invalid_attribute_id = graphene.Node.to_global_id(
"Attribute", non_existent_attribute_pk
)
sku = product.variants.first().sku
variants = [
{
"sku": str(uuid4())[:12],
"quantity": -1000,
"attributes": [{"id": size_attribute_id, "values": ["Test-value1"]}],
},
{
"sku": str(uuid4())[:12],
"quantity": 100,
"attributes": [{"id": size_attribute_id, "values": ["Test-value4"]}],
},
{
"sku": sku,
"quantity": 100,
"attributes": [{"id": size_attribute_id, "values": ["Test-value2"]}],
},
{
"sku": str(uuid4())[:12],
"quantity": 100,
"attributes": [{"id": invalid_attribute_id, "values": ["Test-value3"]}],
},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 3
errors = data["bulkProductErrors"]
expected_errors = [
{
"field": "quantity",
"index": 0,
"code": ProductErrorCode.INVALID.name,
"message": ANY,
},
{
"field": "sku",
"index": 2,
"code": ProductErrorCode.UNIQUE.name,
"message": ANY,
},
{
"field": "attributes",
"index": 3,
"code": ProductErrorCode.NOT_FOUND.name,
"message": ANY,
},
]
for expected_error in expected_errors:
assert expected_error in errors
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_two_variants_duplicated_attribute_value(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["small"]},
],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "attributes"
assert error["code"] == ProductErrorCode.UNIQUE.name
assert error["index"] == 0
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_two_variants_duplicated_attribute_value_in_input(
staff_api_client,
product_with_variant_with_two_attributes,
permission_manage_products,
color_attribute,
size_attribute,
):
product = product_with_variant_with_two_attributes
product_id = graphene.Node.to_global_id("Product", product.pk)
product_variant_count = ProductVariant.objects.count()
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
attributes = [
{"id": color_attribute_id, "values": [color_attribute.values.last().slug]},
{"id": size_attribute_id, "values": [size_attribute.values.last().slug]},
]
variants = [
{"sku": str(uuid4())[:12], "attributes": attributes},
{"sku": str(uuid4())[:12], "attributes": attributes},
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert len(data["bulkProductErrors"]) == 1
error = data["bulkProductErrors"][0]
assert error["field"] == "attributes"
assert error["code"] == ProductErrorCode.UNIQUE.name
assert error["index"] == 1
assert product_variant_count == ProductVariant.objects.count()
def test_product_variant_bulk_create_two_variants_duplicated_one_attribute_value(
staff_api_client,
product_with_variant_with_two_attributes,
color_attribute,
size_attribute,
permission_manage_products,
):
product = product_with_variant_with_two_attributes
product_variant_count = ProductVariant.objects.count()
product_id = graphene.Node.to_global_id("Product", product.pk)
color_attribute_id = graphene.Node.to_global_id("Attribute", color_attribute.id)
size_attribute_id = graphene.Node.to_global_id("Attribute", size_attribute.id)
variants = [
{
"sku": str(uuid4())[:12],
"attributes": [
{"id": color_attribute_id, "values": ["red"]},
{"id": size_attribute_id, "values": ["big"]},
],
}
]
variables = {"productId": product_id, "variants": variants}
staff_api_client.user.user_permissions.add(permission_manage_products)
response = staff_api_client.post_graphql(
PRODUCT_VARIANT_BULK_CREATE_MUTATION, variables
)
content = get_graphql_content(response)
data = content["data"]["productVariantBulkCreate"]
assert not data["bulkProductErrors"]
assert data["count"] == 1
assert product_variant_count + 1 == ProductVariant.objects.count()
| |
"""Support for HomematicIP Cloud cover devices."""
from __future__ import annotations
from homematicip.aio.device import (
AsyncBlindModule,
AsyncDinRailBlind4,
AsyncFullFlushBlind,
AsyncFullFlushShutter,
AsyncGarageDoorModuleTormatic,
AsyncHoermannDrivesModule,
)
from homematicip.aio.group import AsyncExtendedLinkedShutterGroup
from homematicip.base.enums import DoorCommand, DoorState
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
HMIP_COVER_OPEN = 0
HMIP_COVER_CLOSED = 1
HMIP_SLATS_OPEN = 0
HMIP_SLATS_CLOSED = 1
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP cover from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncBlindModule):
entities.append(HomematicipBlindModule(hap, device))
elif isinstance(device, AsyncDinRailBlind4):
for channel in range(1, 5):
entities.append(
HomematicipMultiCoverSlats(hap, device, channel=channel)
)
elif isinstance(device, AsyncFullFlushBlind):
entities.append(HomematicipCoverSlats(hap, device))
elif isinstance(device, AsyncFullFlushShutter):
entities.append(HomematicipCoverShutter(hap, device))
elif isinstance(
device, (AsyncHoermannDrivesModule, AsyncGarageDoorModuleTormatic)
):
entities.append(HomematicipGarageDoorModule(hap, device))
for group in hap.home.groups:
if isinstance(group, AsyncExtendedLinkedShutterGroup):
entities.append(HomematicipCoverShutterGroup(hap, group))
if entities:
async_add_entities(entities)
class HomematicipBlindModule(HomematicipGenericEntity, CoverEntity):
"""Representation of the HomematicIP blind module."""
@property
def current_cover_position(self) -> int:
"""Return current position of cover."""
if self._device.primaryShadingLevel is not None:
return int((1 - self._device.primaryShadingLevel) * 100)
return None
@property
def current_cover_tilt_position(self) -> int:
"""Return current tilt position of cover."""
if self._device.secondaryShadingLevel is not None:
return int((1 - self._device.secondaryShadingLevel) * 100)
return None
async def async_set_cover_position(self, **kwargs) -> None:
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION]
# HmIP cover is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_primary_shading_level(primaryShadingLevel=level)
async def async_set_cover_tilt_position(self, **kwargs) -> None:
"""Move the cover to a specific tilt position."""
position = kwargs[ATTR_TILT_POSITION]
# HmIP slats is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_secondary_shading_level(
primaryShadingLevel=self._device.primaryShadingLevel,
secondaryShadingLevel=level,
)
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
if self._device.primaryShadingLevel is not None:
return self._device.primaryShadingLevel == HMIP_COVER_CLOSED
return None
async def async_open_cover(self, **kwargs) -> None:
"""Open the cover."""
await self._device.set_primary_shading_level(
primaryShadingLevel=HMIP_COVER_OPEN
)
async def async_close_cover(self, **kwargs) -> None:
"""Close the cover."""
await self._device.set_primary_shading_level(
primaryShadingLevel=HMIP_COVER_CLOSED
)
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the device if in motion."""
await self._device.stop()
async def async_open_cover_tilt(self, **kwargs) -> None:
"""Open the slats."""
await self._device.set_secondary_shading_level(
primaryShadingLevel=self._device.primaryShadingLevel,
secondaryShadingLevel=HMIP_SLATS_OPEN,
)
async def async_close_cover_tilt(self, **kwargs) -> None:
"""Close the slats."""
await self._device.set_secondary_shading_level(
primaryShadingLevel=self._device.primaryShadingLevel,
secondaryShadingLevel=HMIP_SLATS_CLOSED,
)
async def async_stop_cover_tilt(self, **kwargs) -> None:
"""Stop the device if in motion."""
await self._device.stop()
class HomematicipMultiCoverShutter(HomematicipGenericEntity, CoverEntity):
"""Representation of the HomematicIP cover shutter."""
def __init__(
self,
hap: HomematicipHAP,
device,
channel=1,
is_multi_channel=True,
) -> None:
"""Initialize the multi cover entity."""
super().__init__(
hap, device, channel=channel, is_multi_channel=is_multi_channel
)
@property
def current_cover_position(self) -> int:
"""Return current position of cover."""
if self._device.functionalChannels[self._channel].shutterLevel is not None:
return int(
(1 - self._device.functionalChannels[self._channel].shutterLevel) * 100
)
return None
async def async_set_cover_position(self, **kwargs) -> None:
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION]
# HmIP cover is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_shutter_level(level, self._channel)
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
if self._device.functionalChannels[self._channel].shutterLevel is not None:
return (
self._device.functionalChannels[self._channel].shutterLevel
== HMIP_COVER_CLOSED
)
return None
async def async_open_cover(self, **kwargs) -> None:
"""Open the cover."""
await self._device.set_shutter_level(HMIP_COVER_OPEN, self._channel)
async def async_close_cover(self, **kwargs) -> None:
"""Close the cover."""
await self._device.set_shutter_level(HMIP_COVER_CLOSED, self._channel)
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the device if in motion."""
await self._device.set_shutter_stop(self._channel)
class HomematicipCoverShutter(HomematicipMultiCoverShutter, CoverEntity):
"""Representation of the HomematicIP cover shutter."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the multi cover entity."""
super().__init__(hap, device, is_multi_channel=False)
class HomematicipMultiCoverSlats(HomematicipMultiCoverShutter, CoverEntity):
"""Representation of the HomematicIP multi cover slats."""
def __init__(
self,
hap: HomematicipHAP,
device,
channel=1,
is_multi_channel=True,
) -> None:
"""Initialize the multi slats entity."""
super().__init__(
hap, device, channel=channel, is_multi_channel=is_multi_channel
)
@property
def current_cover_tilt_position(self) -> int:
"""Return current tilt position of cover."""
if self._device.functionalChannels[self._channel].slatsLevel is not None:
return int(
(1 - self._device.functionalChannels[self._channel].slatsLevel) * 100
)
return None
async def async_set_cover_tilt_position(self, **kwargs) -> None:
"""Move the cover to a specific tilt position."""
position = kwargs[ATTR_TILT_POSITION]
# HmIP slats is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_slats_level(level, self._channel)
async def async_open_cover_tilt(self, **kwargs) -> None:
"""Open the slats."""
await self._device.set_slats_level(HMIP_SLATS_OPEN, self._channel)
async def async_close_cover_tilt(self, **kwargs) -> None:
"""Close the slats."""
await self._device.set_slats_level(HMIP_SLATS_CLOSED, self._channel)
async def async_stop_cover_tilt(self, **kwargs) -> None:
"""Stop the device if in motion."""
await self._device.set_shutter_stop(self._channel)
class HomematicipCoverSlats(HomematicipMultiCoverSlats, CoverEntity):
"""Representation of the HomematicIP cover slats."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the multi slats entity."""
super().__init__(hap, device, is_multi_channel=False)
class HomematicipGarageDoorModule(HomematicipGenericEntity, CoverEntity):
"""Representation of the HomematicIP Garage Door Module."""
@property
def current_cover_position(self) -> int:
"""Return current position of cover."""
door_state_to_position = {
DoorState.CLOSED: 0,
DoorState.OPEN: 100,
DoorState.VENTILATION_POSITION: 10,
DoorState.POSITION_UNKNOWN: None,
}
return door_state_to_position.get(self._device.doorState)
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
return self._device.doorState == DoorState.CLOSED
async def async_open_cover(self, **kwargs) -> None:
"""Open the cover."""
await self._device.send_door_command(DoorCommand.OPEN)
async def async_close_cover(self, **kwargs) -> None:
"""Close the cover."""
await self._device.send_door_command(DoorCommand.CLOSE)
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the cover."""
await self._device.send_door_command(DoorCommand.STOP)
class HomematicipCoverShutterGroup(HomematicipGenericEntity, CoverEntity):
"""Representation of the HomematicIP cover shutter group."""
def __init__(self, hap: HomematicipHAP, device, post: str = "ShutterGroup") -> None:
"""Initialize switching group."""
device.modelType = f"HmIP-{post}"
super().__init__(hap, device, post, is_multi_channel=False)
@property
def current_cover_position(self) -> int:
"""Return current position of cover."""
if self._device.shutterLevel is not None:
return int((1 - self._device.shutterLevel) * 100)
return None
@property
def current_cover_tilt_position(self) -> int:
"""Return current tilt position of cover."""
if self._device.slatsLevel is not None:
return int((1 - self._device.slatsLevel) * 100)
return None
@property
def is_closed(self) -> bool | None:
"""Return if the cover is closed."""
if self._device.shutterLevel is not None:
return self._device.shutterLevel == HMIP_COVER_CLOSED
return None
async def async_set_cover_position(self, **kwargs) -> None:
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION]
# HmIP cover is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_shutter_level(level)
async def async_set_cover_tilt_position(self, **kwargs) -> None:
"""Move the cover to a specific tilt position."""
position = kwargs[ATTR_TILT_POSITION]
# HmIP slats is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_slats_level(level)
async def async_open_cover(self, **kwargs) -> None:
"""Open the cover."""
await self._device.set_shutter_level(HMIP_COVER_OPEN)
async def async_close_cover(self, **kwargs) -> None:
"""Close the cover."""
await self._device.set_shutter_level(HMIP_COVER_CLOSED)
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the group if in motion."""
await self._device.set_shutter_stop()
async def async_open_cover_tilt(self, **kwargs) -> None:
"""Open the slats."""
await self._device.set_slats_level(HMIP_SLATS_OPEN)
async def async_close_cover_tilt(self, **kwargs) -> None:
"""Close the slats."""
await self._device.set_slats_level(HMIP_SLATS_CLOSED)
async def async_stop_cover_tilt(self, **kwargs) -> None:
"""Stop the group if in motion."""
await self._device.set_shutter_stop()
| |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The PyBuilder cli module.
Contains the PyBuilder command-line entrypoint.
"""
import datetime
import optparse
import re
import sys
import traceback
from os.path import sep, normcase as nc
from pybuilder import __version__
from pybuilder import extern
from pybuilder.core import Logger
from pybuilder.errors import PyBuilderException
from pybuilder.execution import ExecutionManager
from pybuilder.python_utils import IS_WIN
from pybuilder.reactor import Reactor
from pybuilder.scaffolding import start_project, update_project
from pybuilder.terminal import (BOLD, BROWN, RED, GREEN, bold, styled_text,
fg, italic, print_text, print_text_line,
print_error, print_error_line, draw_line)
from pybuilder.utils import format_timestamp, get_dist_version_string
PROPERTY_OVERRIDE_PATTERN = re.compile(r'^[a-zA-Z0-9_]+=.*')
_extern = extern
class CommandLineUsageException(PyBuilderException):
def __init__(self, usage, message):
super(CommandLineUsageException, self).__init__(message)
self.usage = usage
class StdOutLogger(Logger):
def _level_to_string(self, level):
if Logger.DEBUG == level:
return "[DEBUG]"
if Logger.INFO == level:
return "[INFO] "
if Logger.WARN == level:
return "[WARN] "
return "[ERROR]"
def _do_log(self, level, message, *arguments):
formatted_message = self._format_message(message, *arguments)
log_level = self._level_to_string(level)
print_text_line("{0} {1}".format(log_level, formatted_message))
class ColoredStdOutLogger(StdOutLogger):
def _level_to_string(self, level):
if Logger.DEBUG == level:
return italic("[DEBUG]")
if Logger.INFO == level:
return bold("[INFO] ")
if Logger.WARN == level:
return styled_text("[WARN] ", BOLD, fg(BROWN))
return styled_text("[ERROR]", BOLD, fg(RED))
def parse_options(args):
parser = optparse.OptionParser(usage="%prog [options] [+|^]task1 [[[+|^]task2] ...]",
version="%prog " + __version__)
def error(msg):
raise CommandLineUsageException(
parser.get_usage() + parser.format_option_help(), msg)
parser.error = error
list_tasks_option = parser.add_option("-t", "--list-tasks",
action="store_true",
dest="list_tasks",
default=False,
help="List all tasks that can be run in the current build configuration")
list_plan_tasks_option = parser.add_option("-T", "--list-plan-tasks",
action="store_true",
dest="list_plan_tasks",
default=False,
help="List tasks that will be run with current execution plan")
start_project_option = parser.add_option("--start-project",
action="store_true",
dest="start_project",
default=False,
help="Initialize build descriptors and Python project structure")
update_project_option = parser.add_option("--update-project",
action="store_true",
dest="update_project",
default=False,
help="Update build descriptors and Python project structure")
project_group = optparse.OptionGroup(
parser, "Project Options", "Customizes the project to build.")
project_group.add_option("-D", "--project-directory",
dest="project_directory",
help="Root directory to execute in",
metavar="<project directory>",
default=".")
project_group.add_option("-O", "--offline",
dest="offline",
help="Attempt to execute the build without network connectivity (may cause build failure)",
default=False,
action="store_true")
project_group.add_option("-E", "--environment",
dest="environments",
help="Activate the given environment for this build. Can be used multiple times",
metavar="<environment>",
action="append",
default=[])
project_group.add_option("-P",
action="append",
dest="property_overrides",
default=[],
metavar="<property>=<value>",
help="Set/ override a property value")
project_group.add_option("-x", "--exclude",
action="append",
dest="exclude_optional_tasks",
default=[],
metavar="<task>",
help="Exclude optional task dependencies")
project_group.add_option("-o", "--exclude-all-optional",
action="store_true",
dest="exclude_all_optional",
default=False,
help="Exclude all optional task dependencies")
project_group.add_option("--force-exclude",
action="append",
dest="exclude_tasks",
default=[],
metavar="<task>",
help="Exclude any task dependencies "
"(dangerous, may break the build in unexpected ways)")
project_group.add_option("--reset-plugins",
action="store_true",
dest="reset_plugins",
default=False,
help="Reset plugins directory prior to running the build")
project_group.add_option("--no-venvs",
action="store_true",
dest="no_venvs",
default=False,
help="Disables the use of Python Virtual Environments")
parser.add_option_group(project_group)
output_group = optparse.OptionGroup(
parser, "Output Options", "Modifies the messages printed during a build.")
output_group.add_option("-X", "--debug",
action="store_true",
dest="debug",
default=False,
help="Print debug messages")
output_group.add_option("-v", "--verbose",
action="store_true",
dest="verbose",
default=False,
help="Enable verbose output")
output_group.add_option("-q", "--quiet",
action="store_true",
dest="quiet",
default=False,
help="Quiet mode; print only warnings and errors")
output_group.add_option("-Q", "--very-quiet",
action="store_true",
dest="very_quiet",
default=False,
help="Very quiet mode; print only errors")
output_group.add_option("-c", "--color",
action="store_true",
dest="force_color",
default=False,
help="Force colored output")
output_group.add_option("-C", "--no-color",
action="store_true",
dest="no_color",
default=False,
help="Disable colored output")
parser.add_option_group(output_group)
options, arguments = parser.parse_args(args=list(args))
if options.list_tasks and options.list_plan_tasks:
parser.error("%s and %s are mutually exclusive" % (list_tasks_option, list_plan_tasks_option))
if options.start_project and options.update_project:
parser.error("%s and %s are mutually exclusive" % (start_project_option, update_project_option))
property_overrides = {}
for pair in options.property_overrides:
if not PROPERTY_OVERRIDE_PATTERN.match(pair):
parser.error("%s is not a property definition." % pair)
key, val = pair.split("=", 1)
property_overrides[key] = val
options.property_overrides = property_overrides
if options.very_quiet:
options.quiet = True
return options, arguments
def init_reactor(logger):
execution_manager = ExecutionManager(logger)
reactor = Reactor(logger, execution_manager)
return reactor
def should_colorize(options):
return options.force_color or (sys.stdout.isatty() and not options.no_color)
def init_logger(options):
threshold = Logger.INFO
if options.debug:
threshold = Logger.DEBUG
elif options.quiet:
threshold = Logger.WARN
if not should_colorize(options):
logger = StdOutLogger(threshold)
else:
if IS_WIN:
import colorama
colorama.init()
logger = ColoredStdOutLogger(threshold)
return logger
def print_build_summary(options, summary):
print_text_line("Build Summary")
print_text_line("%20s: %s" % ("Project", summary.project.name))
print_text_line("%20s: %s%s" % ("Version", summary.project.version, get_dist_version_string(summary.project)))
print_text_line("%20s: %s" % ("Base directory", summary.project.basedir))
print_text_line("%20s: %s" %
("Environments", ", ".join(options.environments)))
task_summary = ""
for task in summary.task_summaries:
task_summary += " %s [%d ms]" % (task.task, task.execution_time)
print_text_line("%20s:%s" % ("Tasks", task_summary))
def print_styled_text(text, options, *style_attributes):
if should_colorize(options):
add_trailing_nl = False
if text[-1] == '\n':
text = text[:-1]
add_trailing_nl = True
text = styled_text(text, *style_attributes)
if add_trailing_nl:
text += '\n'
print_text(text)
def print_styled_text_line(text, options, *style_attributes):
print_styled_text(text + "\n", options, *style_attributes)
def print_build_status(failure_message, options, successful):
draw_line()
if successful:
print_styled_text_line("BUILD SUCCESSFUL", options, BOLD, fg(GREEN))
else:
print_styled_text_line(
"BUILD FAILED - {0}".format(failure_message), options, BOLD, fg(RED))
draw_line()
def print_elapsed_time_summary(start, end):
time_needed = end - start
millis = ((time_needed.days * 24 * 60 * 60) + time_needed.seconds) * 1000 + time_needed.microseconds / 1000
print_text_line("Build finished at %s" % format_timestamp(end))
print_text_line("Build took %d seconds (%d ms)" %
(time_needed.seconds, millis))
def print_summary(successful, summary, start, end, options, failure_message):
print_build_status(failure_message, options, successful)
if successful and summary:
print_build_summary(options, summary)
print_elapsed_time_summary(start, end)
def length_of_longest_string(list_of_strings):
if len(list_of_strings) == 0:
return 0
result = 0
for string in list_of_strings:
length_of_string = len(string)
if length_of_string > result:
result = length_of_string
return result
def task_description(task):
return " ".join(task.description) or "<no description available>"
def print_task_list(tasks, quiet=False):
if quiet:
print_text_line("\n".join([task.name + ":" + task_description(task)
for task in tasks]))
return
column_length = length_of_longest_string(
list(map(lambda task: task.name, tasks)))
column_length += 4
for task in tasks:
task_name = task.name.rjust(column_length)
print_text_line("{0} - {1}".format(task_name, task_description(task)))
if task.dependencies:
whitespace = (column_length + 3) * " "
depends_on_message = "depends on tasks: %s" % " ".join(
[str(dependency) for dependency in task.dependencies])
print_text_line(whitespace + depends_on_message)
def print_list_of_tasks(reactor, quiet=False):
tasks = reactor.get_tasks()
sorted_tasks = sorted(tasks)
if not quiet:
print_text_line('Tasks found for project "%s":' % reactor.project.name)
print_task_list(sorted_tasks, quiet)
def print_plan_list_of_tasks(options, arguments, reactor, quiet=False):
execution_plan = reactor.create_execution_plan(arguments, options.environments)
if not quiet:
print_text_line('Tasks that will be executed for project "%s":' % reactor.project.name)
print_task_list(execution_plan, quiet)
def get_failure_message():
exc_type, exc_obj, exc_tb = sys.exc_info()
filename = None
lineno = None
while exc_tb.tb_next:
exc_tb = exc_tb.tb_next
frame = exc_tb.tb_frame
if hasattr(frame, "f_code"):
code = frame.f_code
filename = code.co_filename
lineno = exc_tb.tb_lineno
filename = nc(filename)
for path in sys.path:
path = nc(path)
if filename.startswith(path) and len(filename) > len(path) and filename[len(path)] == sep:
filename = filename[len(path) + 1:]
break
return "%s%s%s" % ("%s: " % exc_type.__name__ if not isinstance(exc_obj, PyBuilderException) else "",
exc_obj,
" (%s:%d)" % (filename, lineno) if filename else "")
def main(*args):
if not args:
args = sys.argv[1:]
try:
options, arguments = parse_options(args)
except CommandLineUsageException as e:
print_error_line("Usage error: %s\n" % e)
print_error(e.usage)
return 1
start = datetime.datetime.now()
logger = init_logger(options)
reactor = init_reactor(logger)
if options.start_project:
return start_project()
if options.update_project:
return update_project()
if options.list_tasks or options.list_plan_tasks:
try:
reactor.prepare_build(property_overrides=options.property_overrides,
project_directory=options.project_directory,
exclude_optional_tasks=options.exclude_optional_tasks,
exclude_tasks=options.exclude_tasks,
exclude_all_optional=options.exclude_all_optional,
offline=options.offline,
no_venvs=options.no_venvs
)
if options.list_tasks:
print_list_of_tasks(reactor, quiet=options.very_quiet)
if options.list_plan_tasks:
print_plan_list_of_tasks(options, arguments, reactor, quiet=options.very_quiet)
return 0
except PyBuilderException:
print_build_status(get_failure_message(), options, successful=False)
return 1
if not options.very_quiet:
print_styled_text_line(
"PyBuilder version {0}".format(__version__), options, BOLD)
print_text_line("Build started at %s" % format_timestamp(start))
draw_line()
successful = True
failure_message = None
summary = None
try:
try:
reactor.prepare_build(property_overrides=options.property_overrides,
project_directory=options.project_directory,
exclude_optional_tasks=options.exclude_optional_tasks,
exclude_tasks=options.exclude_tasks,
exclude_all_optional=options.exclude_all_optional,
reset_plugins=options.reset_plugins,
offline=options.offline,
no_venvs=options.no_venvs
)
if options.verbose or options.debug:
logger.debug("Verbose output enabled.\n")
reactor.project.set_property("verbose", True)
summary = reactor.build(
environments=options.environments, tasks=arguments)
except KeyboardInterrupt:
raise PyBuilderException("Build aborted")
except (Exception, SystemExit):
successful = False
failure_message = get_failure_message()
if options.debug:
traceback.print_exc(file=sys.stderr)
finally:
end = datetime.datetime.now()
if not options.very_quiet:
print_summary(
successful, summary, start, end, options, failure_message)
if not successful:
return 1
return 0
| |
###############################################################################
# ntplib - Python NTP library.
# Copyright (C) 2009 Charles-Francois Natali <neologix@free.fr>
#
# ntplib is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place, Suite 330, Boston, MA 0.1.2-1307 USA
###############################################################################
"""Pyton NTP library.
Implementation of client-side NTP (RFC-1305), and useful NTP-related
functions.
"""
import socket
import struct
import time
import datetime
# compute delta between system epoch and NTP epoch
SYSTEM_EPOCH = datetime.date(*time.gmtime(0)[0:3])
NTP_EPOCH = datetime.date(1900, 1, 1)
NTP_DELTA = (SYSTEM_EPOCH - NTP_EPOCH).days * 24 * 3600
class NTPException(Exception):
"""Exception raised by this module."""
class NTPPacket(object):
"""NTP packet class.
This class abstracts the stucture of a NTP packet."""
# packet format to pack/unpack
ntp_packet_format = '!B B B b 11I'
def __init__(self, version=2, mode=3, tx_timestamp=0):
self.leap = 0
self.version = version
self.mode = mode
self.stratum = 0
self.poll = 0
self.precision = 0
self.root_delay = 0
self.root_dispersion = 0
self.ref_id = 0
self.ref_timestamp = 0
self.orig_timestamp = 0
self.recv_timestamp = 0
self.tx_timestamp = tx_timestamp
def to_data(self):
"""convert a NTPPacket to a NTP packet that can be sent over network
raise a NTPException in case of invalid field"""
try:
packed = struct.pack(NTPPacket.ntp_packet_format,
(self.leap << 6 | self.version << 3 | self.mode),
self.stratum,
self.poll,
self.precision,
to_int(self.root_delay) << 16 | to_frac(self.root_delay, 16),
to_int(self.root_dispersion) << 16 |
to_frac(self.root_dispersion, 16),
self.ref_id,
to_int(self.ref_timestamp),
to_frac(self.ref_timestamp),
to_int(self.orig_timestamp),
to_frac(self.orig_timestamp),
to_int(self.recv_timestamp),
to_frac(self.recv_timestamp),
to_int(self.tx_timestamp),
to_frac(self.tx_timestamp))
except struct.error:
raise NTPException('Invalid NTP packet fields')
return packed
def from_data(self, data):
"""build a NTPPacket from a NTP packet received from network
raise an NTPException in case of invalid packet format"""
try:
unpacked = struct.unpack(NTPPacket.ntp_packet_format,
data[0:struct.calcsize(NTPPacket.ntp_packet_format)])
except struct.error:
raise NTPException('Invalid NTP packet')
self.leap = unpacked[0] >> 6 & 0x3
self.version = unpacked[0] >> 3 & 0x7
self.mode = unpacked[0] & 0x7
self.stratum = unpacked[1]
self.poll = unpacked[2]
self.precision = unpacked[3]
self.root_delay = float(unpacked[4])/2**16
self.root_dispersion = float(unpacked[5])/2**16
self.ref_id = unpacked[6]
self.ref_timestamp = to_time(unpacked[7], unpacked[8])
self.orig_timestamp = to_time(unpacked[9], unpacked[10])
self.recv_timestamp = to_time(unpacked[11], unpacked[12])
self.tx_timestamp = to_time(unpacked[13], unpacked[14])
class NTPStats(NTPPacket):
"""wrapper for NTPPacket, offering additional statistics like offset and
delay, and timestamps converted to local time"""
def __init__(self, dest_timestamp):
NTPPacket.__init__(self)
self.dest_timestamp = dest_timestamp
@property
def offset(self):
"""NTP offset"""
return ((self.recv_timestamp - self.orig_timestamp) +
(self.tx_timestamp - self.dest_timestamp))/2
@property
def delay(self):
"""NTP delay"""
return ((self.dest_timestamp - self.orig_timestamp) -
(self.tx_timestamp - self.recv_timestamp))
@property
def tx_time(self):
"""tx_timestamp - local time"""
return ntp_to_system_time(self.tx_timestamp)
@property
def recv_time(self):
"""recv_timestamp - local time"""
return ntp_to_system_time(self.recv_timestamp)
@property
def orig_time(self):
"""orig_timestamp - local time"""
return ntp_to_system_time(self.orig_timestamp)
@property
def ref_time(self):
"""ref_timestamp - local time"""
return ntp_to_system_time(self.ref_timestamp)
@property
def dest_time(self):
"""dest_timestamp - local time"""
return ntp_to_system_time(self.dest_timestamp)
class NTPClient(object):
"""Client session - for now, a mere wrapper for NTP requests"""
def request(self, host, version=2, port='ntp'):
"""make a NTP request to a server - return a NTPStats object"""
# lookup server address
addrinfo = socket.getaddrinfo(host, port)[0]
family, sockaddr = addrinfo[0], addrinfo[4]
# create the socket
s = socket.socket(family, socket.SOCK_DGRAM)
s.settimeout(5)
# create the request packet - mode 3 is client
query = NTPPacket(mode=3, version=version,
tx_timestamp=system_to_ntp_time(time.time()))
query_packet = query.to_data()
try:
# send the request
s.sendto(query_packet, sockaddr)
# wait for the response - check the source address
src_addr = (None, None)
while src_addr[0] != sockaddr[0]:
(response_packet, src_addr) = s.recvfrom(256)
# build the destination timestamp
dest_timestamp = system_to_ntp_time(time.time())
finally:
# no matter what happens, we must close the socket
# if an exception was raised, let the application hanle it
s.close()
# construct corresponding statistics
response = NTPStats(dest_timestamp)
response.from_data(response_packet)
return response
def to_int(date):
"""return the integral part of a timestamp"""
return int(date)
def to_frac(date, n=32):
"""return the fractional part of a timestamp - n is the number of bits of
the fractional part"""
return int(abs(date - to_int(date)) * 2**n)
def to_time(integ, frac, n=32):
"""build a timestamp from an integral and fractional part - n is the
number of bits of the fractional part"""
return integ + float(frac)/2**n
def ntp_to_system_time(date):
"""convert a NTP time to system time"""
return date - NTP_DELTA
def system_to_ntp_time(date):
"""convert a system time to a NTP time"""
return date + NTP_DELTA
def leap_to_text(leap):
"""convert a leap value to text"""
leap_table = {
0: 'no warning',
1: 'last minute has 61 seconds',
2: 'last minute has 59 seconds',
3: 'alarm condition (clock not synchronized)',
}
if leap in leap_table:
return leap_table[leap]
else:
raise NTPException('Invalid leap indicator')
def mode_to_text(mode):
"""convert a mode value to text"""
mode_table = {
0: 'unspecified',
1: 'symmetric active',
2: 'symmetric passive',
3: 'client',
4: 'server',
5: 'broadcast',
6: 'reserved for NTP control messages',
7: 'reserved for private use',
}
if mode in mode_table:
return mode_table[mode]
else:
raise NTPException('Invalid mode')
def stratum_to_text(stratum):
"""convert a stratum value to text"""
stratum_table = {
0: 'unspecified',
1: 'primary reference',
}
if stratum in stratum_table:
return stratum_table[stratum]
elif 1 < stratum < 255:
return 'secondary reference (NTP)'
else:
raise NTPException('Invalid stratum')
def ref_id_to_text(ref_id, stratum=2):
"""convert a reference identifier to text according to stratum"""
ref_id_table = {
'DNC': 'DNC routing protocol',
'NIST': 'NIST public modem',
'TSP': 'TSP time protocol',
'DTS': 'Digital Time Service',
'ATOM': 'Atomic clock (calibrated)',
'VLF': 'VLF radio (OMEGA, etc)',
'callsign': 'Generic radio',
'LORC': 'LORAN-C radionavidation',
'GOES': 'GOES UHF environment satellite',
'GPS': 'GPS UHF satellite positioning',
}
fields = (ref_id >> 24 & 0xff, ref_id >> 16 & 0xff,
ref_id >> 8 & 0xff, ref_id & 0xff)
# return the result as a string or dot-formatted IP address
if 0 <= stratum <= 1 :
text = '%c%c%c%c' % fields
if text in ref_id_table:
return ref_id_table[text]
else:
return text
elif 2 <= stratum < 255:
return '%d.%d.%d.%d' % fields
else:
raise NTPException('Invalid reference clock identifier')
| |
#!/usr/bin/env python2
import unittest
from pyvap import Store, UnsolvableError, UndeterminedError, Maybe
from pyvap.constraints import BitwiseAnd
import pyvap.types
class Common(unittest.TestCase):
flip = False
def setUp(self):
self.store = Store()
self._var1 = self.store.IntVar()
self._var2 = self.store.IntVar()
self.a.name = 'a'
self.b.name = 'b'
self.constraint = BitwiseAnd(self.a, self.b)
self.out = self.constraint.result
self.out.name = 'out'
a = property(lambda self: self._var2 if self.flip else self._var1)
b = property(lambda self: self._var1 if self.flip else self._var2)
class TestBasics(Common):
def test_type(self):
self.assertIs(type(self.out), pyvap.types.IntVar)
def test_attributes(self):
self.assertEqual(self.constraint.operands, (self.a, self.b))
self.assertIs(self.constraint.operands[0], self.a)
self.assertIs(self.constraint.operands[1], self.b)
def test_limits(self):
default = self.store.IntVar()
self.assertEqual(self.a.mini, default.mini)
self.assertEqual(self.a.maxi, default.maxi)
self.assertEqual(self.b.mini, default.mini)
self.assertEqual(self.b.maxi, default.maxi)
self.assertEqual(self.out.mini, default.mini)
self.assertEqual(self.out.maxi, default.maxi)
def test_different_stores_raises(self):
extra = Store().IntVar('extra')
with self.assertRaisesRegexp(ValueError, 'Different stores'):
BitwiseAnd(self.a, extra)
with self.assertRaisesRegexp(ValueError, 'Different stores'):
BitwiseAnd(extra, self.a)
def test_operator(self):
out = self.a & self.b
self.a.value = 10
self.b.value = 7
self.assertEqual(out.value, 2)
def test_exact(self):
for aval in xrange(-5, 5):
for bval in xrange(-5, 5):
outval = aval & bval
# first one way...
self.store._push()
self.a.value = aval
self.b.value = bval
self.assertEqual(self.out.value, outval)
self.store._pop()
# then another.
self.store._push()
self.b.value = bval
self.a.value = aval
self.assertEqual(self.out.value, outval)
self.store._pop()
def test_raises(self):
# an UnsolvableError must ALWAYS be raised when all values are
# determined without fulfilling the constraint.
for aval in xrange(-3, 3):
for bval in xrange(-3, 3):
for oval in (x for x in xrange(-3, 3) if x != aval & bval):
self.store._push()
self.a.value = aval
self.b.value = bval
with self.assertRaises(UnsolvableError):
self.out.value = oval
self.store._pop()
self.store._push()
self.out.value = oval
with self.assertRaises(UnsolvableError):
self.b.value = bval
self.a.value = aval
self.store._pop()
self.store._push()
self.out.value = oval
with self.assertRaises(UnsolvableError):
self.a.value = aval
self.b.value = bval
self.store._pop()
class TestNegativeOneA(Common):
def setUp(self):
Common.setUp(self)
self.a.value = -1
def check_non_negative(self, to_set, to_check):
to_set.exclude(None, -1)
self.assertEqual(to_check.mini, 0)
self.assertEqual(to_check.maxi, 1000000)
self.assertEqual(len(to_check), len(to_set))
self.assertEqual(self.a.value, -1, 'a was modified')
self.assertEqual(to_set.mini, 0)
self.assertEqual(to_set.maxi, 1000000)
self.assertEqual(len(to_set), 1000001)
def test_non_negative_b(self):
self.check_non_negative(self.b, self.out)
def test_non_negative_out(self):
self.check_non_negative(self.out, self.b)
def check_negative(self, to_set, to_check):
to_set.exclude(0, None)
self.assertEqual(to_check.mini, -1000000)
self.assertEqual(to_check.maxi, -1)
self.assertEqual(len(to_check), len(to_set))
self.assertEqual(self.a.value, -1, 'a was modified')
self.assertEqual(to_set.mini, -1000000)
self.assertEqual(to_set.maxi, -1)
self.assertEqual(len(to_set), 1000000)
def test_negative_b(self):
self.check_negative(self.b, self.out)
def test_negative_out(self):
self.check_negative(self.out, self.b)
def check_any(self, to_set, to_check):
to_set.shrink(-10, 10)
for v in xrange(-10, 11):
self.assertIn(v, to_check)
self.assertEqual(self.a.value, -1, 'a was modified')
# TODO: would be nice if this worked:
#self.assertEqual(to_check.mini, -10)
#self.assertEqual(to_check.maxi, 10)
#self.assertEqual(len(to_check), 21)
def test_any_b(self):
self.check_any(self.b, self.out)
def test_any_out(self):
self.check_any(self.out, self.b)
def check_5_10(self, to_set, to_check):
to_set.shrink(5, 10)
self.assertEqual(to_check.mini, 5)
self.assertEqual(to_check.maxi, 10)
self.assertEqual(len(to_check), 6)
self.assertEqual(self.a.value, -1, 'a was modified')
self.assertEqual(to_set.mini, 5)
self.assertEqual(to_set.maxi, 10)
self.assertEqual(len(to_set), 6)
def test_b_5_10(self):
self.check_5_10(self.b, self.out)
def test_out_5_10(self):
self.check_5_10(self.out, self.b)
class TestFlippedNegativeOneA(TestNegativeOneA):
def setUp(self):
self.flip = True
TestNegativeOneA.setUp(self)
class TestOneA(Common):
def setUp(self):
Common.setUp(self)
self.a.value = 1
def test_any_b(self):
self.assertEqual(self.out.mini, 0)
self.assertEqual(self.out.maxi, 1)
def test_b_even(self):
self.b.shrink(-4, 4)
self.b.exclude_value(-3)
self.b.exclude_value(-1)
self.b.exclude_value( 1)
self.b.exclude_value( 3)
self.assertEqual(self.out.maxi, 1)
# TODO this would be nice:
#self.assertEqual(self.out.value, 1)
#with self.assertRaises(UnsolvableError):
#self.out.value = 1
def test_out_one(self):
self.b.shrink(-10, 10)
self.out.value = 1
for v in xrange(-9, 11, 2):
self.assertIn(v, self.b)
# TODO this would be nice:
#for v in xrange(-10, 11, 2):
# self.assertNotIn(v, self.b)
with self.assertRaises(UnsolvableError):
self.b.value = 4
def test_out_zero(self):
self.b.shrink(-10, 10)
self.out.value = 0
for v in xrange(-10, 11, 2):
self.assertIn(v, self.b)
# TODO this would be nice:
#for v in xrange(-9, 11, 2):
# self.assertNotIn(v, self.b)
with self.assertRaises(UnsolvableError):
self.b.value = 5
class TestFlippedOneA(TestOneA):
def setUp(self):
self.flip = True
TestOneA.setUp(self)
class TestABetween5And40(Common):
def setUp(self):
Common.setUp(self)
self.a.shrink(5, 40)
def test_a_limited_out(self):
self.assertGreaterEqual(self.out.mini, 0)
self.assertLess(self.out.maxi, 64)
class TestFlippedABetween5And40(TestABetween5And40):
def setUp(self):
self.flip = True
TestABetween5And40.setUp(self)
if __name__ == '__main__':
unittest.main()
| |
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run tests in parallel."""
from __future__ import print_function
import argparse
import ast
import collections
import glob
import itertools
import json
import multiprocessing
import os
import os.path
import pipes
import platform
import random
import re
import socket
import subprocess
import sys
import tempfile
import traceback
import time
from six.moves import urllib
import uuid
import jobset
import report_utils
import watch_dirs
_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(_ROOT)
_FORCE_ENVIRON_FOR_WRAPPERS = {
'GRPC_VERBOSITY': 'DEBUG',
}
_POLLING_STRATEGIES = {
'linux': ['epoll', 'poll', 'poll-cv', 'legacy']
}
def platform_string():
return jobset.platform_string()
_DEFAULT_TIMEOUT_SECONDS = 5 * 60
# SimpleConfig: just compile with CONFIG=config, and run the binary to test
class Config(object):
def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[]):
if environ is None:
environ = {}
self.build_config = config
self.environ = environ
self.environ['CONFIG'] = config
self.tool_prefix = tool_prefix
self.timeout_multiplier = timeout_multiplier
def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
shortname=None, environ={}, cpu_cost=1.0, flaky=False):
"""Construct a jobset.JobSpec for a test under this config
Args:
cmdline: a list of strings specifying the command line the test
would like to run
"""
actual_environ = self.environ.copy()
for k, v in environ.items():
actual_environ[k] = v
return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
shortname=shortname,
environ=actual_environ,
cpu_cost=cpu_cost,
timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
flake_retries=5 if flaky or args.allow_flakes else 0,
timeout_retries=3 if args.allow_flakes else 0)
def get_c_tests(travis, test_lang) :
out = []
platforms_str = 'ci_platforms' if travis else 'platforms'
with open('tools/run_tests/tests.json') as f:
js = json.load(f)
return [tgt
for tgt in js
if tgt['language'] == test_lang and
platform_string() in tgt[platforms_str] and
not (travis and tgt['flaky'])]
def _check_compiler(compiler, supported_compilers):
if compiler not in supported_compilers:
raise Exception('Compiler %s not supported (on this platform).' % compiler)
def _check_arch(arch, supported_archs):
if arch not in supported_archs:
raise Exception('Architecture %s not supported.' % arch)
def _is_use_docker_child():
"""Returns True if running running as a --use_docker child."""
return True if os.getenv('RUN_TESTS_COMMAND') else False
_PythonConfigVars = collections.namedtuple(
'_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
'venv_relative_python', 'toolchain', 'runner'])
def _python_config_generator(name, major, minor, bits, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_python_pattern_function(major=major, minor=minor, bits=bits)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _pypy_config_generator(name, major, config_vars):
return PythonConfig(
name,
config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
_pypy_pattern_function(major=major)] + [
name] + config_vars.venv_relative_python + config_vars.toolchain,
config_vars.shell + config_vars.runner + [
os.path.join(name, config_vars.venv_relative_python[0])])
def _python_pattern_function(major, minor, bits):
# Bit-ness is handled by the test machine's environment
if os.name == "nt":
if bits == "64":
return '/c/Python{major}{minor}/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
major=major, minor=minor, bits=bits)
else:
return 'python{major}.{minor}'.format(major=major, minor=minor)
def _pypy_pattern_function(major):
if major == '2':
return 'pypy'
elif major == '3':
return 'pypy3'
else:
raise ValueError("Unknown PyPy major version")
class CLanguage(object):
def __init__(self, make_target, test_lang):
self.make_target = make_target
self.platform = platform_string()
self.test_lang = test_lang
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(self.args.arch)]
else:
self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
self.args.compiler)
def test_specs(self):
out = []
binaries = get_c_tests(self.args.travis, self.test_lang)
for target in binaries:
polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
if target.get('uses_polling', True)
else ['all'])
for polling_strategy in polling_strategies:
env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
_ROOT + '/src/core/lib/tsi/test_creds/ca.pem',
'GRPC_POLL_STRATEGY': polling_strategy,
'GRPC_VERBOSITY': 'DEBUG'}
shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
if self.config.build_config in target['exclude_configs']:
continue
if self.platform == 'windows':
binary = 'vsprojects/%s%s/%s.exe' % (
'x64/' if self.args.arch == 'x64' else '',
_MSBUILD_CONFIG[self.config.build_config],
target['name'])
else:
binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
if os.path.isfile(binary):
if 'gtest' in target and target['gtest']:
# here we parse the output of --gtest_list_tests to build up a
# complete list of the tests contained in a binary
# for each test, we then add a job to run, filtering for just that
# test
with open(os.devnull, 'w') as fnull:
tests = subprocess.check_output([binary, '--gtest_list_tests'],
stderr=fnull)
base = None
for line in tests.split('\n'):
i = line.find('#')
if i >= 0: line = line[:i]
if not line: continue
if line[0] != ' ':
base = line.strip()
else:
assert base is not None
assert line[1] == ' '
test = base + line.strip()
cmdline = [binary] + ['--gtest_filter=%s' % test]
out.append(self.config.job_spec(cmdline,
shortname='%s --gtest_filter=%s %s' % (binary, test, shortname_ext),
cpu_cost=target['cpu_cost'],
environ=env))
else:
cmdline = [binary] + target['args']
out.append(self.config.job_spec(cmdline,
shortname=' '.join(
pipes.quote(arg)
for arg in cmdline) +
shortname_ext,
cpu_cost=target['cpu_cost'],
flaky=target.get('flaky', False),
timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS),
environ=env))
elif self.args.regex == '.*' or self.platform == 'windows':
print('\nWARNING: binary not found, skipping', binary)
return sorted(out)
def make_targets(self):
if self.platform == 'windows':
# don't build tools on windows just yet
return ['buildtests_%s' % self.make_target]
return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target]
def make_options(self):
return self._make_options;
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\pre_build_c.bat']]
else:
return []
def build_steps(self):
return []
def post_tests_steps(self):
if self.platform == 'windows':
return []
else:
return [['tools/run_tests/post_tests_c.sh']]
def makefile_name(self):
return 'Makefile'
def _clang_make_options(self, version_suffix=''):
return ['CC=clang%s' % version_suffix,
'CXX=clang++%s' % version_suffix,
'LD=clang%s' % version_suffix,
'LDXX=clang++%s' % version_suffix]
def _gcc_make_options(self, version_suffix):
return ['CC=gcc%s' % version_suffix,
'CXX=g++%s' % version_suffix,
'LD=gcc%s' % version_suffix,
'LDXX=g++%s' % version_suffix]
def _compiler_options(self, use_docker, compiler):
"""Returns docker distro and make options to use for given compiler."""
if not use_docker and not _is_use_docker_child():
_check_compiler(compiler, ['default'])
if compiler == 'gcc4.9' or compiler == 'default':
return ('jessie', [])
elif compiler == 'gcc4.4':
return ('wheezy', self._gcc_make_options(version_suffix='-4.4'))
elif compiler == 'gcc4.6':
return ('wheezy', self._gcc_make_options(version_suffix='-4.6'))
elif compiler == 'gcc5.3':
return ('ubuntu1604', [])
elif compiler == 'clang3.4':
# on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
return ('ubuntu1404', self._clang_make_options())
elif compiler == 'clang3.5':
return ('jessie', self._clang_make_options(version_suffix='-3.5'))
elif compiler == 'clang3.6':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
elif compiler == 'clang3.7':
return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
else:
raise Exception('Compiler %s not supported.' % compiler)
def dockerfile_dir(self):
return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return self.make_target
class NodeLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default', 'node0.12',
'node4', 'node5', 'node6'])
if self.args.compiler == 'default':
self.node_version = '4'
else:
# Take off the word "node"
self.node_version = self.args.compiler[4:]
def test_specs(self):
if self.platform == 'windows':
return [self.config.job_spec(['tools\\run_tests\\run_node.bat'], None)]
else:
return [self.config.job_spec(['tools/run_tests/run_node.sh', self.node_version],
None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\pre_build_node.bat']]
else:
return [['tools/run_tests/pre_build_node.sh', self.node_version]]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\build_node.bat']]
else:
return [['tools/run_tests/build_node.sh', self.node_version]]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'node'
class PhpLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php'
class Php7Language(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['src/php/bin/run_tests.sh'], None,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['static_c', 'shared_c']
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/build_php.sh']]
def post_tests_steps(self):
return [['tools/run_tests/post_tests_php.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'php7'
class PythonConfig(collections.namedtuple('PythonConfig', [
'name', 'build', 'run'])):
"""Tuple of commands (named s.t. 'what it says on the tin' applies)"""
class PythonLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
self.pythons = self._get_pythons(self.args)
def test_specs(self):
# load list of known test suites
with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
tests_json = json.load(tests_json_file)
environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
return [self.config.job_spec(
config.run,
timeout_seconds=5*60,
environ=dict(list(environment.items()) +
[('GRPC_PYTHON_TESTRUNNER_FILTER', suite_name)]),
shortname='%s.test.%s' % (config.name, suite_name),)
for suite_name in tests_json
for config in self.pythons]
def pre_build_steps(self):
return []
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [config.build for config in self.pythons]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
def python_manager_name(self):
return 'pyenv' if self.args.compiler in ['python3.5', 'python3.6'] else 'jessie'
def _get_pythons(self, args):
if args.arch == 'x86':
bits = '32'
else:
bits = '64'
if os.name == 'nt':
shell = ['bash']
builder = [os.path.abspath('tools/run_tests/build_python_msys2.sh')]
builder_prefix_arguments = ['MINGW{}'.format(bits)]
venv_relative_python = ['Scripts/python.exe']
toolchain = ['mingw32']
else:
shell = []
builder = [os.path.abspath('tools/run_tests/build_python.sh')]
builder_prefix_arguments = []
venv_relative_python = ['bin/python']
toolchain = ['unix']
runner = [os.path.abspath('tools/run_tests/run_python.sh')]
config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
venv_relative_python, toolchain, runner)
python27_config = _python_config_generator(name='py27', major='2',
minor='7', bits=bits,
config_vars=config_vars)
python34_config = _python_config_generator(name='py34', major='3',
minor='4', bits=bits,
config_vars=config_vars)
python35_config = _python_config_generator(name='py35', major='3',
minor='5', bits=bits,
config_vars=config_vars)
python36_config = _python_config_generator(name='py36', major='3',
minor='6', bits=bits,
config_vars=config_vars)
pypy27_config = _pypy_config_generator(name='pypy', major='2',
config_vars=config_vars)
pypy32_config = _pypy_config_generator(name='pypy3', major='3',
config_vars=config_vars)
if args.compiler == 'default':
if os.name == 'nt':
return (python27_config,)
else:
return (python27_config, python34_config,)
elif args.compiler == 'python2.7':
return (python27_config,)
elif args.compiler == 'python3.4':
return (python34_config,)
elif args.compiler == 'python3.5':
return (python35_config,)
elif args.compiler == 'python3.6':
return (python36_config,)
elif args.compiler == 'pypy':
return (pypy27_config,)
elif args.compiler == 'pypy3':
return (pypy32_config,)
else:
raise Exception('Compiler %s not supported.' % args.compiler)
def __str__(self):
return 'python'
class RubyLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [self.config.job_spec(['tools/run_tests/run_ruby.sh'],
timeout_seconds=10*60,
environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
def pre_build_steps(self):
return [['tools/run_tests/pre_build_ruby.sh']]
def make_targets(self):
return []
def make_options(self):
return []
def build_steps(self):
return [['tools/run_tests/build_ruby.sh']]
def post_tests_steps(self):
return [['tools/run_tests/post_tests_ruby.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
def __str__(self):
return 'ruby'
class CSharpLanguage(object):
def __init__(self):
self.platform = platform_string()
def configure(self, config, args):
self.config = config
self.args = args
if self.platform == 'windows':
# Explicitly choosing between x86 and x64 arch doesn't work yet
_check_arch(self.args.arch, ['default'])
# CoreCLR use 64bit runtime by default.
arch_option = 'x64' if self.args.compiler == 'coreclr' else self.args.arch
self._make_options = [_windows_toolset_option(self.args.compiler),
_windows_arch_option(arch_option)]
else:
_check_compiler(self.args.compiler, ['default', 'coreclr'])
if self.platform == 'linux' and self.args.compiler == 'coreclr':
self._docker_distro = 'coreclr'
else:
self._docker_distro = 'jessie'
if self.platform == 'mac':
# TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
self._make_options = ['EMBED_OPENSSL=true']
if self.args.compiler != 'coreclr':
# On Mac, official distribution of mono is 32bit.
self._make_options += ['CFLAGS=-m32', 'LDFLAGS=-m32']
else:
self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
def test_specs(self):
with open('src/csharp/tests.json') as f:
tests_by_assembly = json.load(f)
msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
nunit_args = ['--labels=All']
assembly_subdir = 'bin/%s' % msbuild_config
assembly_extension = '.exe'
if self.args.compiler == 'coreclr':
assembly_subdir += '/netcoreapp1.0'
runtime_cmd = ['dotnet', 'exec']
assembly_extension = '.dll'
else:
nunit_args += ['--noresult', '--workers=1']
if self.platform == 'windows':
runtime_cmd = []
else:
runtime_cmd = ['mono']
specs = []
for assembly in tests_by_assembly.iterkeys():
assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
assembly_subdir,
assembly,
assembly_extension)
if self.config.build_config != 'gcov' or self.platform != 'windows':
# normally, run each test as a separate process
for test in tests_by_assembly[assembly]:
cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
specs.append(self.config.job_spec(cmdline,
None,
shortname='csharp.%s' % test,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
else:
# For C# test coverage, run all tests from the same assembly at once
# using OpenCover.Console (only works on Windows).
cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
'-target:%s' % assembly_file,
'-targetdir:src\\csharp',
'-targetargs:%s' % ' '.join(nunit_args),
'-filter:+[Grpc.Core]*',
'-register:user',
'-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
# set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
# to prevent problems with registering the profiler.
run_exclusive = 1000000
specs.append(self.config.job_spec(cmdline,
None,
shortname='csharp.coverage.%s' % assembly,
cpu_cost=run_exclusive,
environ=_FORCE_ENVIRON_FOR_WRAPPERS))
return specs
def pre_build_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\pre_build_csharp.bat']]
else:
return [['tools/run_tests/pre_build_csharp.sh']]
def make_targets(self):
return ['grpc_csharp_ext']
def make_options(self):
return self._make_options;
def build_steps(self):
if self.args.compiler == 'coreclr':
if self.platform == 'windows':
return [['tools\\run_tests\\build_csharp_coreclr.bat']]
else:
return [['tools/run_tests/build_csharp_coreclr.sh']]
else:
if self.platform == 'windows':
return [[_windows_build_bat(self.args.compiler),
'src/csharp/Grpc.sln',
'/p:Configuration=%s' % _MSBUILD_CONFIG[self.config.build_config]]]
else:
return [['tools/run_tests/build_csharp.sh']]
def post_tests_steps(self):
if self.platform == 'windows':
return [['tools\\run_tests\\post_tests_csharp.bat']]
else:
return [['tools/run_tests/post_tests_csharp.sh']]
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
_docker_arch_suffix(self.args.arch))
def __str__(self):
return 'csharp'
class ObjCLanguage(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
return [
self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
timeout_seconds=None,
shortname='objc-tests',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
self.config.job_spec(['src/objective-c/tests/build_example_test.sh'],
timeout_seconds=30*60,
shortname='objc-examples-build',
environ=_FORCE_ENVIRON_FOR_WRAPPERS),
]
def pre_build_steps(self):
return []
def make_targets(self):
return ['interop_server']
def make_options(self):
return []
def build_steps(self):
return [['src/objective-c/tests/build_tests.sh']]
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return None
def __str__(self):
return 'objc'
class Sanity(object):
def configure(self, config, args):
self.config = config
self.args = args
_check_compiler(self.args.compiler, ['default'])
def test_specs(self):
import yaml
with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
return [self.config.job_spec(cmd['script'].split(),
timeout_seconds=None, environ={'TEST': 'true'},
cpu_cost=cmd.get('cpu_cost', 1))
for cmd in yaml.load(f)]
def pre_build_steps(self):
return []
def make_targets(self):
return ['run_dep_checks']
def make_options(self):
return []
def build_steps(self):
return []
def post_tests_steps(self):
return []
def makefile_name(self):
return 'Makefile'
def dockerfile_dir(self):
return 'tools/dockerfile/test/sanity'
def __str__(self):
return 'sanity'
# different configurations we can run under
with open('tools/run_tests/configs.json') as f:
_CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
_LANGUAGES = {
'c++': CLanguage('cxx', 'c++'),
'c': CLanguage('c', 'c'),
'node': NodeLanguage(),
'php': PhpLanguage(),
'php7': Php7Language(),
'python': PythonLanguage(),
'ruby': RubyLanguage(),
'csharp': CSharpLanguage(),
'objc' : ObjCLanguage(),
'sanity': Sanity()
}
_MSBUILD_CONFIG = {
'dbg': 'Debug',
'opt': 'Release',
'gcov': 'Debug',
}
def _windows_arch_option(arch):
"""Returns msbuild cmdline option for selected architecture."""
if arch == 'default' or arch == 'x86':
return '/p:Platform=Win32'
elif arch == 'x64':
return '/p:Platform=x64'
else:
print('Architecture %s not supported.' % arch)
sys.exit(1)
def _check_arch_option(arch):
"""Checks that architecture option is valid."""
if platform_string() == 'windows':
_windows_arch_option(arch)
elif platform_string() == 'linux':
# On linux, we need to be running under docker with the right architecture.
runtime_arch = platform.architecture()[0]
if arch == 'default':
return
elif runtime_arch == '64bit' and arch == 'x64':
return
elif runtime_arch == '32bit' and arch == 'x86':
return
else:
print('Architecture %s does not match current runtime architecture.' % arch)
sys.exit(1)
else:
if args.arch != 'default':
print('Architecture %s not supported on current platform.' % args.arch)
sys.exit(1)
def _windows_build_bat(compiler):
"""Returns name of build.bat for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return 'vsprojects\\build_vs2013.bat'
elif compiler == 'vs2015':
return 'vsprojects\\build_vs2015.bat'
elif compiler == 'vs2010':
return 'vsprojects\\build_vs2010.bat'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _windows_toolset_option(compiler):
"""Returns msbuild PlatformToolset for selected compiler."""
# For CoreCLR, fall back to the default compiler for C core
if compiler == 'default' or compiler == 'vs2013' or compiler == 'coreclr':
return '/p:PlatformToolset=v120'
elif compiler == 'vs2015':
return '/p:PlatformToolset=v140'
elif compiler == 'vs2010':
return '/p:PlatformToolset=v100'
else:
print('Compiler %s not supported.' % compiler)
sys.exit(1)
def _docker_arch_suffix(arch):
"""Returns suffix to dockerfile dir to use."""
if arch == 'default' or arch == 'x64':
return 'x64'
elif arch == 'x86':
return 'x86'
else:
print('Architecture %s not supported with current settings.' % arch)
sys.exit(1)
def runs_per_test_type(arg_str):
"""Auxilary function to parse the "runs_per_test" flag.
Returns:
A positive integer or 0, the latter indicating an infinite number of
runs.
Raises:
argparse.ArgumentTypeError: Upon invalid input.
"""
if arg_str == 'inf':
return 0
try:
n = int(arg_str)
if n <= 0: raise ValueError
return n
except:
msg = '\'{}\' is not a positive integer or \'inf\''.format(arg_str)
raise argparse.ArgumentTypeError(msg)
# parse command line
argp = argparse.ArgumentParser(description='Run grpc tests.')
argp.add_argument('-c', '--config',
choices=sorted(_CONFIGS.keys()),
default='opt')
argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
help='A positive integer or "inf". If "inf", all tests will run in an '
'infinite loop. Especially useful in combination with "-f"')
argp.add_argument('-r', '--regex', default='.*', type=str)
argp.add_argument('--regex_exclude', default='', type=str)
argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
argp.add_argument('-s', '--slowdown', default=1.0, type=float)
argp.add_argument('-f', '--forever',
default=False,
action='store_const',
const=True)
argp.add_argument('-t', '--travis',
default=False,
action='store_const',
const=True)
argp.add_argument('--newline_on_success',
default=False,
action='store_const',
const=True)
argp.add_argument('-l', '--language',
choices=['all'] + sorted(_LANGUAGES.keys()),
nargs='+',
default=['all'])
argp.add_argument('-S', '--stop_on_failure',
default=False,
action='store_const',
const=True)
argp.add_argument('--use_docker',
default=False,
action='store_const',
const=True,
help='Run all the tests under docker. That provides ' +
'additional isolation and prevents the need to install ' +
'language specific prerequisites. Only available on Linux.')
argp.add_argument('--allow_flakes',
default=False,
action='store_const',
const=True,
help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
argp.add_argument('--arch',
choices=['default', 'x86', 'x64'],
default='default',
help='Selects architecture to target. For some platforms "default" is the only supported choice.')
argp.add_argument('--compiler',
choices=['default',
'gcc4.4', 'gcc4.6', 'gcc4.9', 'gcc5.3',
'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
'vs2010', 'vs2013', 'vs2015',
'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3',
'coreclr'],
default='default',
help='Selects compiler to use. Allowed values depend on the platform and language.')
argp.add_argument('--build_only',
default=False,
action='store_const',
const=True,
help='Perform all the build steps but dont run any tests.')
argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
help='Measure the cpu costs of tests')
argp.add_argument('--update_submodules', default=[], nargs='*',
help='Update some submodules before building. If any are updated, also run generate_projects. ' +
'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
argp.add_argument('-a', '--antagonists', default=0, type=int)
argp.add_argument('-x', '--xml_report', default=None, type=str,
help='Generates a JUnit-compatible XML report')
argp.add_argument('--report_suite_name', default='tests', type=str,
help='Test suite name to use in generated JUnit XML report')
argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
help='Dont try to iterate over many polling strategies when they exist')
args = argp.parse_args()
if args.force_default_poller:
_POLLING_STRATEGIES = {}
jobset.measure_cpu_costs = args.measure_cpu_costs
# update submodules if necessary
need_to_regenerate_projects = False
for spec in args.update_submodules:
spec = spec.split(':', 1)
if len(spec) == 1:
submodule = spec[0]
branch = 'master'
elif len(spec) == 2:
submodule = spec[0]
branch = spec[1]
cwd = 'third_party/%s' % submodule
def git(cmd, cwd=cwd):
print('in %s: git %s' % (cwd, cmd))
subprocess.check_call('git %s' % cmd, cwd=cwd, shell=True)
git('fetch')
git('checkout %s' % branch)
git('pull origin %s' % branch)
if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
need_to_regenerate_projects = True
if need_to_regenerate_projects:
if jobset.platform_string() == 'linux':
subprocess.check_call('tools/buildgen/generate_projects.sh', shell=True)
else:
print('WARNING: may need to regenerate projects, but since we are not on')
print(' Linux this step is being skipped. Compilation MAY fail.')
# grab config
run_config = _CONFIGS[args.config]
build_config = run_config.build_config
if args.travis:
_FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
if 'all' in args.language:
lang_list = _LANGUAGES.keys()
else:
lang_list = args.language
# We don't support code coverage on some languages
if 'gcov' in args.config:
for bad in ['objc', 'sanity']:
if bad in lang_list:
lang_list.remove(bad)
languages = set(_LANGUAGES[l] for l in lang_list)
for l in languages:
l.configure(run_config, args)
language_make_options=[]
if any(language.make_options() for language in languages):
if not 'gcov' in args.config and len(languages) != 1:
print('languages with custom make options cannot be built simultaneously with other languages')
sys.exit(1)
else:
language_make_options = next(iter(languages)).make_options()
if args.use_docker:
if not args.travis:
print('Seen --use_docker flag, will run tests under docker.')
print('')
print('IMPORTANT: The changes you are testing need to be locally committed')
print('because only the committed changes in the current branch will be')
print('copied to the docker environment.')
time.sleep(5)
dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
if len(dockerfile_dirs) > 1:
if 'gcov' in args.config:
dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
print ('Using multilang_jessie_x64 docker image for code coverage for '
'all languages.')
else:
print ('Languages to be tested require running under different docker '
'images.')
sys.exit(1)
else:
dockerfile_dir = next(iter(dockerfile_dirs))
child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
env = os.environ.copy()
env['RUN_TESTS_COMMAND'] = run_tests_cmd
env['DOCKERFILE_DIR'] = dockerfile_dir
env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
if args.xml_report:
env['XML_REPORT'] = args.xml_report
if not args.travis:
env['TTY_FLAG'] = '-t' # enables Ctrl-C when not on Jenkins.
subprocess.check_call(['tools/run_tests/dockerize/build_docker_and_run_tests.sh'],
shell=True,
env=env)
sys.exit(0)
_check_arch_option(args.arch)
def make_jobspec(cfg, targets, makefile='Makefile'):
if platform_string() == 'windows':
extra_args = []
# better do parallel compilation
# empirically /m:2 gives the best performance/price and should prevent
# overloading the windows workers.
extra_args.extend(['/m:2'])
# disable PDB generation: it's broken, and we don't need it during CI
extra_args.extend(['/p:Jenkins=true'])
return [
jobset.JobSpec([_windows_build_bat(args.compiler),
'vsprojects\\%s.sln' % target,
'/p:Configuration=%s' % _MSBUILD_CONFIG[cfg]] +
extra_args +
language_make_options,
shell=True, timeout_seconds=None)
for target in targets]
else:
if targets:
return [jobset.JobSpec([os.getenv('MAKE', 'make'),
'-f', makefile,
'-j', '%d' % args.jobs,
'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
'CONFIG=%s' % cfg] +
language_make_options +
([] if not args.travis else ['JENKINS_BUILD=1']) +
targets,
timeout_seconds=None)]
else:
return []
make_targets = {}
for l in languages:
makefile = l.makefile_name()
make_targets[makefile] = make_targets.get(makefile, set()).union(
set(l.make_targets()))
def build_step_environ(cfg):
environ = {'CONFIG': cfg}
msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
if msbuild_cfg:
environ['MSBUILD_CONFIG'] = msbuild_cfg
return environ
build_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=5)
for l in languages
for cmdline in l.pre_build_steps()))
if make_targets:
make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
build_steps.extend(set(make_commands))
build_steps.extend(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
for l in languages
for cmdline in l.build_steps()))
post_tests_steps = list(set(
jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
for l in languages
for cmdline in l.post_tests_steps()))
runs_per_test = args.runs_per_test
forever = args.forever
def _shut_down_legacy_server(legacy_server_port):
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % legacy_server_port,
timeout=10).read())
except:
pass
else:
urllib.request.urlopen(
'http://localhost:%d/quitquitquit' % legacy_server_port).read()
def _start_port_server(port_server_port):
# check if a compatible port server is running
# if incompatible (version mismatch) ==> start a new one
# if not running ==> start a new one
# otherwise, leave it up
try:
version = int(urllib.request.urlopen(
'http://localhost:%d/version_number' % port_server_port,
timeout=10).read())
print('detected port server running version %d' % version)
running = True
except Exception as e:
print('failed to detect port server: %s' % sys.exc_info()[0])
print(e.strerror)
running = False
if running:
current_version = int(subprocess.check_output(
[sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
'dump_version']))
print('my port server is version %d' % current_version)
running = (version >= current_version)
if not running:
print('port_server version mismatch: killing the old one')
urllib.request.urlopen('http://localhost:%d/quitquitquit' % port_server_port).read()
time.sleep(1)
if not running:
fd, logfile = tempfile.mkstemp()
os.close(fd)
print('starting port_server, with log file %s' % logfile)
args = [sys.executable, os.path.abspath('tools/run_tests/port_server.py'),
'-p', '%d' % port_server_port, '-l', logfile]
env = dict(os.environ)
env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
if platform_string() == 'windows':
# Working directory of port server needs to be outside of Jenkins
# workspace to prevent file lock issues.
tempdir = tempfile.mkdtemp()
port_server = subprocess.Popen(
args,
env=env,
cwd=tempdir,
creationflags = 0x00000008, # detached process
close_fds=True)
else:
port_server = subprocess.Popen(
args,
env=env,
preexec_fn=os.setsid,
close_fds=True)
time.sleep(1)
# ensure port server is up
waits = 0
while True:
if waits > 10:
print('killing port server due to excessive start up waits')
port_server.kill()
if port_server.poll() is not None:
print('port_server failed to start')
# try one final time: maybe another build managed to start one
time.sleep(1)
try:
urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
print('last ditch attempt to contact port server succeeded')
break
except:
traceback.print_exc()
port_log = open(logfile, 'r').read()
print(port_log)
sys.exit(1)
try:
urllib.request.urlopen('http://localhost:%d/get' % port_server_port,
timeout=1).read()
print('port server is up and ready')
break
except socket.timeout:
print('waiting for port_server: timeout')
traceback.print_exc();
time.sleep(1)
waits += 1
except urllib.error.URLError:
print('waiting for port_server: urlerror')
traceback.print_exc();
time.sleep(1)
waits += 1
except:
traceback.print_exc()
port_server.kill()
raise
def _calculate_num_runs_failures(list_of_results):
"""Caculate number of runs and failures for a particular test.
Args:
list_of_results: (List) of JobResult object.
Returns:
A tuple of total number of runs and failures.
"""
num_runs = len(list_of_results) # By default, there is 1 run per JobResult.
num_failures = 0
for jobresult in list_of_results:
if jobresult.retries > 0:
num_runs += jobresult.retries
if jobresult.num_failures > 0:
num_failures += jobresult.num_failures
return num_runs, num_failures
# _build_and_run results
class BuildAndRunError(object):
BUILD = object()
TEST = object()
POST_TEST = object()
# returns a list of things that failed (or an empty list on success)
def _build_and_run(
check_cancelled, newline_on_success, xml_report=None, build_only=False):
"""Do one pass of building & running tests."""
# build latest sequentially
num_failures, resultset = jobset.run(
build_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
if num_failures:
return [BuildAndRunError.BUILD]
if build_only:
if xml_report:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
return []
# start antagonists
antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
for _ in range(0, args.antagonists)]
port_server_port = 32766
_start_port_server(port_server_port)
resultset = None
num_test_failures = 0
try:
infinite_runs = runs_per_test == 0
one_run = set(
spec
for language in languages
for spec in language.test_specs()
if (re.search(args.regex, spec.shortname) and
(args.regex_exclude == '' or
not re.search(args.regex_exclude, spec.shortname))))
# When running on travis, we want out test runs to be as similar as possible
# for reproducibility purposes.
if args.travis:
massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
else:
# whereas otherwise, we want to shuffle things up to give all tests a
# chance to run.
massaged_one_run = list(one_run) # random.shuffle needs an indexable seq.
random.shuffle(massaged_one_run) # which it modifies in-place.
if infinite_runs:
assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
else itertools.repeat(massaged_one_run, runs_per_test))
all_runs = itertools.chain.from_iterable(runs_sequence)
num_test_failures, resultset = jobset.run(
all_runs, check_cancelled, newline_on_success=newline_on_success,
travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
stop_on_failure=args.stop_on_failure,
add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
if resultset:
for k, v in sorted(resultset.items()):
num_runs, num_failures = _calculate_num_runs_failures(v)
if num_failures == num_runs: # what about infinite_runs???
jobset.message('FAILED', k, do_newline=True)
elif num_failures > 0:
jobset.message(
'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
do_newline=True)
finally:
for antagonist in antagonists:
antagonist.kill()
if xml_report and resultset:
report_utils.render_junit_xml_report(resultset, xml_report,
suite_name=args.report_suite_name)
number_failures, _ = jobset.run(
post_tests_steps, maxjobs=1, stop_on_failure=True,
newline_on_success=newline_on_success, travis=args.travis)
out = []
if number_failures:
out.append(BuildAndRunError.POST_TEST)
if num_test_failures:
out.append(BuildAndRunError.TEST)
return out
if forever:
success = True
while True:
dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
initial_time = dw.most_recent_change()
have_files_changed = lambda: dw.most_recent_change() != initial_time
previous_success = success
errors = _build_and_run(check_cancelled=have_files_changed,
newline_on_success=False,
build_only=args.build_only) == 0
if not previous_success and not errors:
jobset.message('SUCCESS',
'All tests are now passing properly',
do_newline=True)
jobset.message('IDLE', 'No change detected')
while not have_files_changed():
time.sleep(1)
else:
errors = _build_and_run(check_cancelled=lambda: False,
newline_on_success=args.newline_on_success,
xml_report=args.xml_report,
build_only=args.build_only)
if not errors:
jobset.message('SUCCESS', 'All tests passed', do_newline=True)
else:
jobset.message('FAILED', 'Some tests failed', do_newline=True)
exit_code = 0
if BuildAndRunError.BUILD in errors:
exit_code |= 1
if BuildAndRunError.TEST in errors:
exit_code |= 2
if BuildAndRunError.POST_TEST in errors:
exit_code |= 4
sys.exit(exit_code)
| |
import os
import unittest
from copy import deepcopy
from gos.configuration import Configuration
class ConfigurationTestCase(unittest.TestCase):
def setUp(self):
self.init_config = Configuration()
def test_initialization_top_level(self):
""" in simple initialization the top level section must be properly configured """
config = Configuration()
self.assertIn(config.DIR, config)
self.assertIn(config.LOGGER, config)
self.assertIn(config.IOSF, config)
self.assertIn(config.INPUT, config)
self.assertIn(config.ALGORITHM, config)
self.assertIn(config.OUTPUT, config)
self.assertIsInstance(config[config.LOGGER], dict)
self.assertIsInstance(config[config.INPUT], dict)
self.assertIsInstance(config[config.ALGORITHM], dict)
self.assertIsInstance(config[config.OUTPUT], dict)
def test_initialization_input_section(self):
""" input section of the overall configuration must have some default init values and is predefined with them """
config = Configuration()
input_section = config[config.INPUT]
self.assertIn(config.DIR, input_section)
self.assertIn(config.LOGGER, input_section)
self.assertIn(config.IOSF, input_section)
self.assertIn(config.SOURCE, input_section)
self.assertIsInstance(input_section[config.SOURCE], list)
self.assertIsInstance(input_section[config.LOGGER], dict)
def test_initialization_logger_section(self):
""" logger section is a top level configuration for GOS wide logger """
config = Configuration()
logger_section = config[config.LOGGER]
self.assertIn(config.NAME, logger_section)
self.assertIn(config.LEVEL, logger_section)
self.assertIn(config.FORMAT, logger_section)
self.assertIn(config.DESTINATION, logger_section)
def test_initialization_output_section(self):
""" output section configuration for GOS results to be put in"""
config = Configuration()
output_section = config[config.OUTPUT]
self.assertIn(config.DIR, output_section)
self.assertIn(config.LOGGER, output_section)
self.assertIn(config.IOSF, output_section)
self.assertIn(config.ASSEMBLY_POINTS, output_section)
self.assertIn(config.GENOMES, output_section)
self.assertIn(config.STATS, output_section)
self.assertIsInstance(output_section[config.STATS], dict)
self.assertIsInstance(output_section[config.ASSEMBLY_POINTS], dict)
self.assertIsInstance(output_section[config.GENOMES], dict)
def test_initialization_algorithm_section_executable_containers(self):
config = Configuration()
algorithm_section = config[config.ALGORITHM]
self.assertIn(config.EXECUTABLE_CONTAINERS, algorithm_section)
def test_initialization_algorithm_section(self):
""" algorithm section configuration for GOS workflow """
config = Configuration()
algorithm_section = config[config.ALGORITHM]
self.assertIn(config.IOSF, algorithm_section)
self.assertIn(config.LOGGER, algorithm_section)
self.assertIn(config.TASKS, algorithm_section)
self.assertIn(config.PIPELINE, algorithm_section)
self.assertIsInstance(algorithm_section[config.TASKS], dict)
self.assertIsInstance(algorithm_section[config.PIPELINE], dict)
def test_update_with_default_top_level_dir_empty(self):
""" top level configuration field "dir" default fallback when it is not specified """
self.init_config[self.init_config.DIR] = None
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.DIR], os.getcwd())
self.init_config[self.init_config.DIR] = ""
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.DIR], os.getcwd())
def test_update_with_default_to_level_dir_predefined(self):
""" top level configuration field "dir" default fallback when it is specified """
self.init_config[self.init_config.DIR] = os.path.join("dir1", "dir2")
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.DIR], os.path.join("dir1", "dir2"))
def test_update_with_default_top_level_io_silent_fail_empty(self):
""" top level configuration field "io_silent_fail" default fallback when its not specified """
self.init_config[self.init_config.IOSF] = None
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.IOSF], self.init_config.DEFAULT_IOSF)
self.init_config[self.init_config.IOSF] = ""
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.IOSF], self.init_config.DEFAULT_IOSF)
def test_update_with_default_top_level_io_silent_fail_predefined(self):
""" top level configuration field "io_silent_fail" default fallback when its specified """
self.init_config[self.init_config.IOSF] = True
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.IOSF], True)
self.init_config[self.init_config.IOSF] = False
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.IOSF], False)
self.init_config[self.init_config.IOSF] = "CustomValue" # anything that works for if
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.IOSF], "CustomValue")
def test_update_with_default_logger_name_empty(self):
self.init_config[self.init_config.LOGGER][self.init_config.NAME] = ""
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.NAME],
self.init_config.DEFAULT_LOGGER_NAME)
self.init_config[self.init_config.LOGGER][self.init_config.NAME] = None
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.NAME],
self.init_config.DEFAULT_LOGGER_NAME)
def test_update_with_default_logger_name_predefined(self):
self.init_config[self.init_config.LOGGER][self.init_config.NAME] = True
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.NAME],
str(True))
self.init_config[self.init_config.LOGGER][self.init_config.NAME] = "MyName"
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.NAME],
"MyName")
def test_update_with_default_logger_level_empty(self):
self.init_config[self.init_config.LOGGER][self.init_config.LEVEL] = ""
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.LEVEL],
self.init_config.DEFAULT_LOGGER_LEVEL)
self.init_config[self.init_config.LOGGER][self.init_config.LEVEL] = None
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.LEVEL],
self.init_config.DEFAULT_LOGGER_LEVEL)
def test_update_with_default_logger_level_predefined(self):
self.init_config[self.init_config.LOGGER][self.init_config.LEVEL] = "MyLevel"
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.LEVEL],
"MyLevel")
self.init_config[self.init_config.LOGGER][self.init_config.LEVEL] = True
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.LEVEL],
str(True))
def test_update_with_default_logger_format_empty(self):
self.init_config[self.init_config.LOGGER][self.init_config.FORMAT] = ""
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.FORMAT],
self.init_config.DEFAULT_LOGGER_FORMAT)
self.init_config[self.init_config.LOGGER][self.init_config.FORMAT] = None
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.FORMAT],
self.init_config.DEFAULT_LOGGER_FORMAT)
def test_update_with_default_logger_format_predefined(self):
self.init_config[self.init_config.LOGGER][self.init_config.FORMAT] = "MyFormat"
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.FORMAT],
"MyFormat")
self.init_config[self.init_config.LOGGER][self.init_config.FORMAT] = True
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.LOGGER][self.init_config.FORMAT],
str(True))
def test_update_with_default_input_source_empty(self):
for empty_value in (None, ""):
self.init_config[self.init_config.INPUT][self.init_config.SOURCE] = empty_value
self.init_config.update_with_default_values()
self.assertListEqual(self.init_config[self.init_config.INPUT][self.init_config.SOURCE],
[])
def test_update_with_default_input_source_predefined(self):
for source_value in [["path1", "path2"], ["path3", "path4", "path5"]]:
self.init_config[self.init_config.INPUT][self.init_config.SOURCE] = source_value
self.init_config.update_with_default_values()
self.assertListEqual(source_value,
self.init_config[self.init_config.INPUT][self.init_config.SOURCE])
def test_update_with_default_input_dir_empty(self):
for empty_value in (None, ""):
self.init_config[self.init_config.INPUT][self.init_config.DIR] = empty_value
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.INPUT][self.init_config.DIR],
self.init_config.DEFAULT_INPUT_DIR)
def test_update_with_default_input_io_silent_fail_empty(self):
for empty_value in (None, ""):
for top_level_iosf_value in (True, False):
self.init_config[self.init_config.INPUT][self.init_config.IOSF] = empty_value
self.init_config[self.init_config.IOSF] = top_level_iosf_value
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.INPUT][self.init_config.IOSF],
top_level_iosf_value)
def get_list_of_logger_configurations(self):
return [{
self.init_config.NAME: "Logger Name 1",
self.init_config.LEVEL: "info 1",
self.init_config.FORMAT: "format 1",
self.init_config.DESTINATION: "destination 1"
}, {
self.init_config.NAME: "Logger Name 2",
self.init_config.LEVEL: "info 2",
self.init_config.FORMAT: "format 2",
self.init_config.DESTINATION: "destination 2"
}]
def test_update_with_default_input_logger_empty(self):
top_level_loggers = self.get_list_of_logger_configurations()
for logger_config in top_level_loggers:
self.init_config = Configuration()
self.init_config[self.init_config.INPUT][self.init_config.LOGGER] = {}
self.init_config[self.init_config.LOGGER] = logger_config
self.init_config.update_with_default_values()
self.assertDictEqual(self.init_config[self.init_config.INPUT][self.init_config.LOGGER],
logger_config)
def test_update_with_default_input_logger_partially_predefined(self):
partial_logger_configs = [
{self.init_config.NAME: "My name",
self.init_config.LEVEL: "My level"},
{self.init_config.LEVEL: "My level 2"},
{self.init_config.FORMAT: "My format",
self.init_config.DESTINATION: "My destination"}
]
for partial_logger_config in partial_logger_configs:
for full_logger_config in self.get_list_of_logger_configurations():
self.init_config[self.init_config.INPUT][self.init_config.LOGGER] = deepcopy(partial_logger_config)
self.init_config[self.init_config.LOGGER] = full_logger_config
self.init_config.update_with_default_values()
for key, value in full_logger_config.items():
if key not in partial_logger_config:
self.assertEqual(full_logger_config[key],
self.init_config[self.init_config.INPUT][self.init_config.LOGGER][key])
else:
self.assertEqual(partial_logger_config[key],
self.init_config[self.init_config.INPUT][self.init_config.LOGGER][key])
def test_update_with_default_input_logger_specified(self):
for full_logger_spec in self.get_list_of_logger_configurations():
self.init_config[self.init_config.INPUT][self.init_config.LOGGER] = deepcopy(full_logger_spec)
self.init_config.update_with_default_values()
self.assertDictEqual(full_logger_spec,
self.init_config[self.init_config.INPUT][self.init_config.LOGGER])
def test_update_with_default_output_dir_empty(self):
for empty_value in (None, ""):
self.init_config[self.init_config.OUTPUT][self.init_config.DIR] = empty_value
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.DIR],
os.path.join(self.init_config[self.init_config.DIR],
self.init_config.DEFAULT_OUTPUT_DIR))
def test_update_with_default_output_io_silent_fail_empty(self):
for empty_value in (None, ""):
for top_level_iosf_value in (True, False):
self.init_config[self.init_config.OUTPUT][self.init_config.IOSF] = empty_value
self.init_config[self.init_config.IOSF] = top_level_iosf_value
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.IOSF],
top_level_iosf_value)
def tet_update_with_default_output_logger_empty(self):
for empty_value in (None, "", {}):
top_level_loggers = self.get_list_of_logger_configurations()
for logger_config in top_level_loggers:
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER] = empty_value
self.init_config[self.init_config.LOGGER] = logger_config
self.init_config.update_with_default_values()
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER],
logger_config)
def test_update_with_default_output_logger_partially_predefined(self):
partial_logger_configs = [
{self.init_config.NAME: "My name",
self.init_config.LEVEL: "My level"},
{self.init_config.LEVEL: "My level 2"},
{self.init_config.FORMAT: "My format",
self.init_config.DESTINATION: "My destination"}
]
for partial_logger_config in partial_logger_configs:
for full_logger_config in self.get_list_of_logger_configurations():
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER] = deepcopy(partial_logger_config)
self.init_config[self.init_config.LOGGER] = full_logger_config
self.init_config.update_with_default_values()
for key, value in full_logger_config.items():
if key not in partial_logger_config:
self.assertEqual(full_logger_config[key],
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER][key])
else:
self.assertEqual(partial_logger_config[key],
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER][key])
def test_update_with_default_output_logger_specified(self):
for full_logger_spec in self.get_list_of_logger_configurations():
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER] = deepcopy(full_logger_spec)
self.init_config.update_with_default_values()
self.assertDictEqual(full_logger_spec,
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER])
def test_update_with_default_output_stats_empty(self):
for dir_name in ("output_dir1", "output_dir2", "output_dir3"):
for iosf_value in (True, False):
for logger_value in self.get_list_of_logger_configurations():
self.init_config[self.init_config.OUTPUT][self.init_config.STATS] = {}
self.init_config[self.init_config.OUTPUT][self.init_config.DIR] = dir_name
self.init_config[self.init_config.OUTPUT][self.init_config.IOSF] = iosf_value
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER] = logger_value
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.STATS][self.init_config.FILE],
self.init_config.DEFAULT_OUTPUT_STATS_FILE)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.STATS][self.init_config.DIR],
self.init_config.DEFAULT_OUTPUT_STATS_DIR)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.STATS][self.init_config.IOSF],
iosf_value)
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.STATS][self.init_config.LOGGER],
logger_value)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.STATS][self.init_config.FILE],
self.init_config.DEFAULT_OUTPUT_STATS_FILE)
def get_full_stats_configs(self):
return [
{self.init_config.DIR: "stat_dir_predefined_1",
self.init_config.FILE: "file_predefined_1.txt",
self.init_config.LOGGER: self.get_list_of_logger_configurations()[0],
self.init_config.IOSF: True},
{self.init_config.DIR: "stat_dir_predefined_2",
self.init_config.FILE: "file_predefined_2.txt",
self.init_config.LOGGER: self.get_list_of_logger_configurations()[0],
self.init_config.IOSF: False},
]
def test_update_with_default_output_stats_partially_predefined(self):
partial_stats_configs = [
{self.init_config.DIR: "stats_dir",
self.init_config.FILE: "my_file_name.txt"},
{self.init_config.IOSF: True},
{self.init_config.DIR: "my_dir",
self.init_config.FILE: "my_file_name2.txt",
self.init_config.LOGGER: self.get_list_of_logger_configurations()[0]}
]
for partial_stats_config in partial_stats_configs:
for full_stats_config in self.get_full_stats_configs():
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER] = full_stats_config[self.init_config.LOGGER]
self.init_config[self.init_config.OUTPUT][self.init_config.STATS] = deepcopy(partial_stats_config)
self.init_config[self.init_config.OUTPUT][self.init_config.IOSF] = full_stats_config[self.init_config.IOSF]
self.init_config.update_with_default_values()
for key, value in partial_stats_config.items():
self.assertEqual(partial_stats_config[key],
self.init_config[self.init_config.OUTPUT][self.init_config.STATS][key])
def test_update_with_default_output_stats_predefined(self):
for full_stats_config in self.get_full_stats_configs():
self.init_config[self.init_config.OUTPUT][self.init_config.STATS] = deepcopy(full_stats_config)
self.init_config.update_with_default_values()
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.STATS],
full_stats_config)
def test_update_with_default_output_assembly_points_empty(self):
self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS] = {}
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.FILE],
self.init_config.DEFAULT_OUTPUT_AP_FILE)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.DIR],
self.init_config.DEFAULT_OUTPUT_AP_DIR)
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.LOGGER],
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.GENOME_SPECIFIC],
self.init_config.DEFAULT_OUTPUT_AP_GENOME_SPECIFIC)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.GENOME_SPECIFIC_FNP],
self.init_config.DEFAULT_OUTPUT_AP_GSFNP)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.IOSF],
self.init_config[self.init_config.OUTPUT][self.init_config.IOSF])
def test_update_with_default_output_assembly_points_partially_predefined(self):
partial_ap_config = {
self.init_config.DIR: "my_ap_dir",
self.init_config.GENOME_SPECIFIC: True,
self.init_config.IOSF: False
}
self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS] = partial_ap_config
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.FILE],
self.init_config.DEFAULT_OUTPUT_AP_FILE)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.DIR],
partial_ap_config[self.init_config.DIR])
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.LOGGER],
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.GENOME_SPECIFIC],
partial_ap_config[self.init_config.GENOME_SPECIFIC])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.GENOME_SPECIFIC_FNP],
self.init_config.DEFAULT_OUTPUT_AP_GSFNP)
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][self.init_config.IOSF],
partial_ap_config[self.init_config.IOSF])
def test_update_with_default_output_assembly_points_predefined(self):
full_ap_config = {
self.init_config.FILE: "my_ap_file.txt",
self.init_config.DIR: "my_ap_dir",
self.init_config.IOSF: True,
self.init_config.GENOME_SPECIFIC: True,
self.init_config.GENOME_SPECIFIC_FNP: "my_patter_string_{genome_name}.txt",
self.init_config.LOGGER: self.get_list_of_logger_configurations()[0]
}
self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS] = full_ap_config
self.init_config.update_with_default_values()
for key, value in full_ap_config.items():
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.ASSEMBLY_POINTS][key],
full_ap_config[key])
def test_update_with_default_output_genomes_empty(self):
self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES] = {}
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.DIR],
self.init_config.DEFAULT_OUTPUT_GENOMES_DIR)
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.LOGGER],
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.IOSF],
self.init_config[self.init_config.OUTPUT][self.init_config.IOSF])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.OUTPUT_NG_FRAGMENTS],
self.init_config.DEFAULT_OUTPUT_GENOMES_ONGF)
def test_update_with_default_output_genomes_partially_predefined(self):
partial_genomes_config = {
self.init_config.OUTPUT_NG_FRAGMENTS: True,
self.init_config.IOSF: False
}
self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES] = partial_genomes_config
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.DIR],
self.init_config.DEFAULT_OUTPUT_GENOMES_DIR)
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.LOGGER],
self.init_config[self.init_config.OUTPUT][self.init_config.LOGGER])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.IOSF],
partial_genomes_config[self.init_config.IOSF])
self.assertEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES][self.init_config.OUTPUT_NG_FRAGMENTS],
partial_genomes_config[self.init_config.OUTPUT_NG_FRAGMENTS])
def test_update_with_default_output_genomes_predefined(self):
predefined_genome_config = {
self.init_config.OUTPUT_NG_FRAGMENTS: True,
self.init_config.IOSF: False,
self.init_config.LOGGER: self.get_list_of_logger_configurations()[0],
self.init_config.DIR: "my_genome_dir"
}
self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES] = predefined_genome_config
self.init_config.update_with_default_values()
self.assertDictEqual(self.init_config[self.init_config.OUTPUT][self.init_config.GENOMES],
predefined_genome_config)
def test_update_with_default_algorithm_empty(self):
self.init_config[self.init_config.ALGORITHM] = {}
self.init_config.update_with_default_values()
self.assertDictEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.LOGGER],
self.init_config[self.init_config.LOGGER])
self.assertDictEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.TASKS], {
self.init_config.PATHS: [self.init_config.DEFAULT_ALGORITHM_TASKS_PATH]})
self.assertEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.EXECUTABLE_CONTAINERS], [])
expected_pipeline_config = {
self.init_config.LOGGER: self.init_config[self.init_config.ALGORITHM][self.init_config.LOGGER],
self.init_config.SELF_LOOP: self.init_config.DEFAULT_ALGORITHM_PIPELINE_SELF_LOOP,
self.init_config.ENTRIES: [],
self.init_config.IOSF: self.init_config[self.init_config.ALGORITHM][self.init_config.IOSF]
}
self.assertDictEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.PIPELINE],
expected_pipeline_config)
def test_update_with_default_algorithm_predefined_tasks_paths(self):
my_path_list = ["my_path1", "my_path2"]
self.init_config[self.init_config.ALGORITHM] = {
self.init_config.TASKS: {
self.init_config.PATHS: deepcopy(my_path_list)
}
}
self.init_config.update_with_default_values()
self.assertIn(self.init_config.DEFAULT_ALGORITHM_TASKS_PATH,
self.init_config[self.init_config.ALGORITHM][self.init_config.TASKS][self.init_config.PATHS])
for my_path in my_path_list:
self.assertIn(my_path,
self.init_config[self.init_config.ALGORITHM][self.init_config.TASKS][self.init_config.PATHS])
def test_update_with_default_algorithm_pipeline_logger(self):
self.init_config[self.init_config.ALGORITHM] = {
self.init_config.PIPELINE: {
self.init_config.ENTRIES: []
}
}
self.init_config.update_with_default_values()
self.assertDictEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.PIPELINE][self.init_config.LOGGER],
self.init_config[self.init_config.ALGORITHM][self.init_config.LOGGER])
def test_update_with_default_algorithm_predefined(self):
predefined_algorithm_config = {
self.init_config.IOSF: False,
self.init_config.LOGGER: self.get_list_of_logger_configurations()[0],
self.init_config.TASKS: {
self.init_config.PATHS: ["my_path_1", "my_path_2"]
},
self.init_config.PIPELINE: {
self.init_config.LOGGER: self.get_list_of_logger_configurations()[1],
self.init_config.SELF_LOOP: False,
self.init_config.ENTRIES: ["round1", "round2"]
}
}
self.init_config[self.init_config.ALGORITHM] = deepcopy(predefined_algorithm_config)
self.init_config.update_with_default_values()
self.assertEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.IOSF],
predefined_algorithm_config[self.init_config.IOSF])
self.assertDictEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.LOGGER],
predefined_algorithm_config[self.init_config.LOGGER])
self.assertListEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.TASKS][self.init_config.PATHS],
[self.init_config.DEFAULT_ALGORITHM_TASKS_PATH] +
predefined_algorithm_config[self.init_config.TASKS][self.init_config.PATHS])
predefined_algorithm_config[self.init_config.PIPELINE][self.init_config.IOSF] = self.init_config[self.init_config.ALGORITHM][self.init_config.IOSF]
self.assertDictEqual(self.init_config[self.init_config.ALGORITHM][self.init_config.PIPELINE],
predefined_algorithm_config[self.init_config.PIPELINE])
def test_update_with_default_algorithm_specified_executable_container_instantiation(self):
self.set_up_executable_containers_for_algorithm_section()
self.init_config.update_with_default_values()
self.assertIsInstance(self.init_config[self.init_config.ALGORITHM]["stages"], list)
def set_up_executable_containers_for_algorithm_section(self):
ecs = [
{
"name": "stage",
"reference": "stages",
"entry_type_name": "task"
}
]
self.init_config[self.init_config.ALGORITHM][self.init_config.EXECUTABLE_CONTAINERS] = ecs
def test_update_with_default_algorithm_automatically_generated_reference_for_executable_container(self):
self.init_config[self.init_config.ALGORITHM][self.init_config.EXECUTABLE_CONTAINERS] = [{
"name": "stage",
"entry_type_name": "task"
}]
self.init_config.update_with_default_values()
ecs = self.init_config[self.init_config.ALGORITHM][self.init_config.EXECUTABLE_CONTAINERS]
self.assertEqual(ecs[0]["reference"], "stages")
def test_update_with_default_algorithm_specified_executable_container_partially_specification(self):
self.set_up_executable_containers_for_algorithm_section()
self.init_config[self.init_config.ALGORITHM]["stages"] = [
{
self.init_config.NAME: "stage1",
},
{
self.init_config.NAME: "stage2",
self.init_config.ENTRIES: ["task1", "task2"]
},
{
self.init_config.NAME: "stage3",
self.init_config.SELF_LOOP: False,
self.init_config.ENTRIES: ["task1", "task2", "task3"]
}
]
self.init_config.update_with_default_values()
stages = self.init_config[self.init_config.ALGORITHM]["stages"]
self.assertIsInstance(stages, list)
self.assertEqual(len(stages), 3)
stage1, stage2, stage3 = stages
self.assertEqual(stage1[self.init_config.NAME], "stage1")
self.assertEqual(stage1[self.init_config.SELF_LOOP], self.init_config.DEFAULT_ALGORITHM_EC_SELF_LOOP)
self.assertListEqual(stage1[self.init_config.ENTRIES], [])
self.assertEqual(stage2[self.init_config.NAME], "stage2")
self.assertEqual(stage2[self.init_config.SELF_LOOP], self.init_config.DEFAULT_ALGORITHM_EC_SELF_LOOP)
self.assertListEqual(stage2[self.init_config.ENTRIES], ["task1", "task2"])
self.assertEqual(stage3[self.init_config.NAME], "stage3")
self.assertEqual(stage3[self.init_config.SELF_LOOP], False)
self.assertListEqual(stage3[self.init_config.ENTRIES], ["task1", "task2", "task3"])
if __name__ == '__main__':
unittest.main()
| |
"""Colormaps."""
# --- import --------------------------------------------------------------------------------------
import copy
import numpy as np
from numpy import r_
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as mplcolors
import matplotlib.gridspec as grd
from ._turbo import turbo
# --- define -------------------------------------------------------------------------------------
__all__ = [
"colormaps",
"get_color_cycle",
"grayify_cmap",
"overline_colors",
"plot_colormap_components",
]
# --- functions ----------------------------------------------------------------------------------
def make_cubehelix(name="WrightTools", gamma=0.5, s=0.25, r=-1, h=1.3, reverse=False, darkest=0.7):
"""Define cubehelix type colorbars.
Look `here`__ for more information.
__ http://arxiv.org/abs/1108.5083
Parameters
----------
name : string (optional)
Name of new cmap. Default is WrightTools.
gamma : number (optional)
Intensity factor. Default is 0.5
s : number (optional)
Start color factor. Default is 0.25
r : number (optional)
Number and direction of rotations. Default is -1
h : number (option)
Hue factor. Default is 1.3
reverse : boolean (optional)
Toggle reversal of output colormap. By default (Reverse = False),
colormap goes from light to dark.
darkest : number (optional)
Default is 0.7
Returns
-------
matplotlib.colors.LinearSegmentedColormap
See Also
--------
plot_colormap_components
Displays RGB components of colormaps.
"""
rr = 0.213 / 0.30
rg = 0.715 / 0.99
rb = 0.072 / 0.11
def get_color_function(p0, p1):
def color(x):
# Calculate amplitude and angle of deviation from the black to
# white diagonal in the plane of constant perceived intensity.
xg = darkest * x**gamma
lum = 1 - xg # starts at 1
if reverse:
lum = lum[::-1]
a = lum.copy()
a[lum < 0.5] = h * lum[lum < 0.5] / 2.0
a[lum >= 0.5] = h * (1 - lum[lum >= 0.5]) / 2.0
phi = 2 * np.pi * (s / 3 + r * x)
out = lum + a * (p0 * np.cos(phi) + p1 * np.sin(phi))
return out
return color
rgb_dict = {
"red": get_color_function(-0.14861 * rr, 1.78277 * rr),
"green": get_color_function(-0.29227 * rg, -0.90649 * rg),
"blue": get_color_function(1.97294 * rb, 0.0),
}
cmap = matplotlib.colors.LinearSegmentedColormap(name, rgb_dict)
return cmap
def make_colormap(seq, name="CustomMap", plot=False):
"""Generate a LinearSegmentedColormap.
Parameters
----------
seq : list of tuples
A sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
name : string (optional)
A name for the colormap
plot : boolean (optional)
Use to generate a plot of the colormap (Default is False).
Returns
-------
matplotlib.colors.LinearSegmentedColormap
`Source`__
__ http://nbviewer.ipython.org/gist/anonymous/a4fa0adb08f9e9ea4f94
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {"red": [], "green": [], "blue": []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict["red"].append([item, r1, r2])
cdict["green"].append([item, g1, g2])
cdict["blue"].append([item, b1, b2])
cmap = mplcolors.LinearSegmentedColormap(name, cdict)
if plot:
plot_colormap_components(cmap)
return cmap
def nm_to_rgb(nm):
"""Convert a wavelength to corresponding RGB values [0.0-1.0].
Parameters
----------
nm : int or float
The wavelength of light.
Returns
-------
List of [R,G,B] values between 0 and 1
`original code`__
__ http://www.physics.sfasu.edu/astro/color/spectra.html
"""
w = int(nm)
# color ---------------------------------------------------------------------------------------
if w >= 380 and w < 440:
R = -(w - 440.0) / (440.0 - 350.0)
G = 0.0
B = 1.0
elif w >= 440 and w < 490:
R = 0.0
G = (w - 440.0) / (490.0 - 440.0)
B = 1.0
elif w >= 490 and w < 510:
R = 0.0
G = 1.0
B = -(w - 510.0) / (510.0 - 490.0)
elif w >= 510 and w < 580:
R = (w - 510.0) / (580.0 - 510.0)
G = 1.0
B = 0.0
elif w >= 580 and w < 645:
R = 1.0
G = -(w - 645.0) / (645.0 - 580.0)
B = 0.0
elif w >= 645 and w <= 780:
R = 1.0
G = 0.0
B = 0.0
else:
R = 0.0
G = 0.0
B = 0.0
# intensity correction ------------------------------------------------------------------------
if w >= 380 and w < 420:
SSS = 0.3 + 0.7 * (w - 350) / (420 - 350)
elif w >= 420 and w <= 700:
SSS = 1.0
elif w > 700 and w <= 780:
SSS = 0.3 + 0.7 * (780 - w) / (780 - 700)
else:
SSS = 0.0
SSS *= 255
return [
float(int(SSS * R) / 256.0),
float(int(SSS * G) / 256.0),
float(int(SSS * B) / 256.0),
]
def plot_colormap_components(cmap):
"""Plot the components of a given colormap."""
from ._helpers import set_ax_labels # recursive import protection
plt.figure(figsize=[8, 4])
gs = grd.GridSpec(3, 1, height_ratios=[1, 10, 1], hspace=0.05)
# colorbar
ax = plt.subplot(gs[0])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0.0, vmax=1.0)
ax.set_title(cmap.name, fontsize=20)
ax.set_axis_off()
# components
ax = plt.subplot(gs[1])
x = np.arange(cmap.N)
colors = cmap(x)
r = colors[:, 0]
g = colors[:, 1]
b = colors[:, 2]
RGB_weight = [0.299, 0.587, 0.114]
k = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
r.clip(0, 1, out=r)
g.clip(0, 1, out=g)
b.clip(0, 1, out=b)
xi = np.linspace(0, 1, x.size)
plt.plot(xi, r, "r", linewidth=5, alpha=0.6)
plt.plot(xi, g, "g", linewidth=5, alpha=0.6)
plt.plot(xi, b, "b", linewidth=5, alpha=0.6)
plt.plot(xi, k, "k", linewidth=5, alpha=0.6)
ax.set_xlim(0, 1)
ax.set_ylim(-0.1, 1.1)
set_ax_labels(ax=ax, xlabel=None, xticks=False, ylabel="intensity")
# grayified colorbar
cmap = grayify_cmap(cmap)
ax = plt.subplot(gs[2])
gradient = np.linspace(0, 1, 256)
gradient = np.vstack((gradient, gradient))
ax.imshow(gradient, aspect="auto", cmap=cmap, vmin=0.0, vmax=1.0)
ax.set_axis_off()
def grayify_cmap(cmap):
"""Return a grayscale version of the colormap.
`Source`__
__ https://jakevdp.github.io/blog/2014/10/16/how-bad-is-your-colormap/
"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# convert RGBA to perceived greyscale luminance
# cf. http://alienryderflex.com/hsp.html
RGB_weight = [0.299, 0.587, 0.114]
luminance = np.sqrt(np.dot(colors[:, :3] ** 2, RGB_weight))
colors[:, :3] = luminance[:, np.newaxis]
return mplcolors.LinearSegmentedColormap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def get_color_cycle(n, cmap="rainbow", rotations=3):
"""Get a list of RGBA colors following a colormap.
Useful for plotting lots of elements, keeping the color of each unique.
Parameters
----------
n : integer
The number of colors to return.
cmap : string (optional)
The colormap to use in the cycle. Default is rainbow.
rotations : integer (optional)
The number of times to repeat the colormap over the cycle. Default is 3.
Returns
-------
list
List of RGBA lists.
"""
cmap = colormaps[cmap]
if np.mod(n, rotations) == 0:
per = np.floor_divide(n, rotations)
else:
per = np.floor_divide(n, rotations) + 1
vals = list(np.linspace(0, 1, per))
vals = vals * rotations
vals = vals[:n]
out = cmap(vals)
return out
# --- color maps ----------------------------------------------------------------------------------
cubehelix = make_cubehelix()
greenscale = ["#000000", "#00FF00"] # black # green
greyscale = ["#FFFFFF", "#000000"] # white # black
invisible = ["#FFFFFF", "#FFFFFF"] # white # white
# isoluminant colorbar based on the research of Kindlmann et al.
# http://dx.doi.org/10.1109/VISUAL.2002.1183788
c = mplcolors.ColorConverter().to_rgb
isoluminant1 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.847, 0.057, 0.057]),
1 / 6.0,
c(r_[0.847, 0.057, 0.057]),
c(r_[0.527, 0.527, 0.000]),
2 / 6.0,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.000, 0.592, 0.000]),
3 / 6.0,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.000, 0.559, 0.559]),
4 / 6.0,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.316, 0.316, 0.991]),
5 / 6.0,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.718, 0.000, 0.718]),
],
name="isoluminant`",
)
isoluminant2 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.718, 0.000, 0.718]),
1 / 6.0,
c(r_[0.718, 0.000, 0.718]),
c(r_[0.316, 0.316, 0.991]),
2 / 6.0,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
3 / 6.0,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
4 / 6.0,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
5 / 6.0,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant2",
)
isoluminant3 = make_colormap(
[
c(r_[1.000, 1.000, 1.000]),
c(r_[0.316, 0.316, 0.991]),
1 / 5.0,
c(r_[0.316, 0.316, 0.991]),
c(r_[0.000, 0.559, 0.559]),
2 / 5.0,
c(r_[0.000, 0.559, 0.559]),
c(r_[0.000, 0.592, 0.000]),
3 / 5.0,
c(r_[0.000, 0.592, 0.000]),
c(r_[0.527, 0.527, 0.000]),
4 / 5.0,
c(r_[0.527, 0.527, 0.000]),
c(r_[0.847, 0.057, 0.057]),
],
name="isoluminant3",
)
signed_old = [
"#0000FF", # blue
"#00BBFF", # blue-aqua
"#00FFFF", # aqua
"#FFFFFF", # white
"#FFFF00", # yellow
"#FFBB00", # orange
"#FF0000", # red
]
skyebar = [
"#FFFFFF", # white
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000", # dark red
]
skyebar_d = [
"#000000", # black
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000", # dark red
]
skyebar_i = [
"#000000", # black
"#FFFFFF", # white
"#0000FF", # blue
"#00FFFF", # cyan
"#64FF00", # light green
"#FFFF00", # yellow
"#FF8000", # orange
"#FF0000", # red
"#800000", # dark red
]
wright = ["#FFFFFF", "#0000FF", "#00FFFF", "#00FF00", "#FFFF00", "#FF0000", "#881111"]
class cmapdict(dict):
def __getitem__(self, key):
if key in self:
return self.get(key)
self[key] = plt.get_cmap(key)
return self.get(key)
colormaps = cmapdict()
colormaps["cubehelix"] = copy.copy(plt.get_cmap("cubehelix_r"))
colormaps["default"] = cubehelix
colormaps["signed"] = copy.copy(plt.get_cmap("bwr"))
colormaps["greenscale"] = mplcolors.LinearSegmentedColormap.from_list("greenscale", greenscale)
colormaps["greyscale"] = mplcolors.LinearSegmentedColormap.from_list("greyscale", greyscale)
colormaps["invisible"] = mplcolors.LinearSegmentedColormap.from_list("invisible", invisible)
colormaps["isoluminant1"] = isoluminant1
colormaps["isoluminant2"] = isoluminant2
colormaps["isoluminant3"] = isoluminant3
colormaps["signed_old"] = mplcolors.LinearSegmentedColormap.from_list("signed", signed_old)
colormaps["skyebar1"] = mplcolors.LinearSegmentedColormap.from_list("skyebar", skyebar)
colormaps["skyebar2"] = mplcolors.LinearSegmentedColormap.from_list("skyebar dark", skyebar_d)
colormaps["skyebar3"] = mplcolors.LinearSegmentedColormap.from_list("skyebar inverted", skyebar_i)
colormaps["turbo"] = turbo
colormaps["wright"] = mplcolors.LinearSegmentedColormap.from_list("wright", wright)
# enforce grey as 'bad' value for colormaps
for cmap in colormaps.values():
cmap.set_bad([0.75] * 3, 1)
# enforce under and over for default colormap
colormaps["default"].set_under([0.50] * 3, 1)
colormaps["default"].set_over("m")
# enforce under and over for signed colormap
colormaps["signed"].set_under("c")
colormaps["signed"].set_over("m")
# a nice set of line colors
overline_colors = ["#CCFF00", "#FE4EDA", "#FF6600", "#00FFBF", "#00B7EB"]
| |
#coding: utf-8
'''
# WKWebView - modern webview for Pythonista
'''
from objc_util import *
import ui, console, webbrowser
import queue, weakref, ctypes, functools, time, os, json, re
from types import SimpleNamespace
# Helpers for invoking ObjC function blocks with no return value
class _block_descriptor (Structure):
_fields_ = [('reserved', c_ulong), ('size', c_ulong), ('copy_helper', c_void_p), ('dispose_helper', c_void_p), ('signature', c_char_p)]
def _block_literal_fields(*arg_types):
return [('isa', c_void_p), ('flags', c_int), ('reserved', c_int), ('invoke', ctypes.CFUNCTYPE(c_void_p, c_void_p, *arg_types)), ('descriptor', _block_descriptor)]
class WKWebView(ui.View):
# Data detector constants
NONE = 0
PHONE_NUMBER = 1
LINK = 1 << 1
ADDRESS = 1 << 2
CALENDAR_EVENT = 1 << 3
TRACKING_NUMBER = 1 << 4
FLIGHT_NUMBER = 1 << 5
LOOKUP_SUGGESTION = 1 << 6
ALL = 18446744073709551615 # NSUIntegerMax
# Global webview index for console
webviews = []
console_view = UIApplication.sharedApplication().\
keyWindow().rootViewController().\
accessoryViewController().\
consoleViewController()
#>Brun0oO
def __init__(self, swipe_navigation=False, allowsInlineMediaPlayback=True, data_detectors=NONE, log_js_evals=False, respect_safe_areas=False, **kwargs):
#<Brun0oO
WKWebView.webviews.append(self)
self.delegate = None
self.log_js_evals = log_js_evals
self.respect_safe_areas = respect_safe_areas
super().__init__(**kwargs)
self.eval_js_queue = queue.Queue()
custom_message_handler = WKWebView.CustomMessageHandler.new().autorelease()
retain_global(custom_message_handler)
custom_message_handler._pythonistawebview = weakref.ref(self)
user_content_controller = self.user_content_controller = WKWebView.WKUserContentController.new().autorelease()
for key in dir(self):
if key.startswith('on_'):
message_name = key[3:]
user_content_controller.addScriptMessageHandler_name_(custom_message_handler, message_name)
self.add_script(WKWebView.js_logging_script)
webview_config = WKWebView.WKWebViewConfiguration.new().autorelease()
webview_config.userContentController = user_content_controller
#>Brun0oO
webview_config.allowsInlineMediaPlayback = allowsInlineMediaPlayback
webview_config.preferences().setValue_forKey_(True, "developerExtrasEnabled")
#webview_config.allowsLinkPreview=True
#webview_config.allowsPictureInPictureMediaPlayback=True
#webview_config.mediaTypesRequiringUserActionForPlayback=False
#<Brun0oO
data_detectors = sum(data_detectors) if type(data_detectors) is tuple else data_detectors
# Must be set to True to get real js
# errors, in combination with setting a
# base directory in the case of load_html
webview_config.preferences().setValue_forKey_(True, 'allowFileAccessFromFileURLs')
webview_config.setDataDetectorTypes_(data_detectors)
nav_delegate = WKWebView.CustomNavigationDelegate.new()
retain_global(nav_delegate)
nav_delegate._pythonistawebview = weakref.ref(self)
ui_delegate = WKWebView.CustomUIDelegate.new()
retain_global(ui_delegate)
ui_delegate._pythonistawebview = weakref.ref(self)
self._create_webview(webview_config, nav_delegate, ui_delegate)
self.swipe_navigation = swipe_navigation
@on_main_thread
def _create_webview(self, webview_config, nav_delegate, ui_delegate):
self.webview = WKWebView.WKWebView.alloc().initWithFrame_configuration_(
((0,0), (self.width, self.height)), webview_config).autorelease()
self.webview.autoresizingMask = 2 + 16 # WH
self.webview.setNavigationDelegate_(nav_delegate)
self.webview.setUIDelegate_(ui_delegate)
self.objc_instance.addSubview_(self.webview)
def layout(self):
if self.respect_safe_areas:
self.update_safe_area_insets()
@on_main_thread
def load_url(self, url, no_cache=False, timeout=10):
''' Loads the contents of the given url
asynchronously.
If the url starts with `file://`, loads a local file. If the remaining url
starts with `/`, path starts from Pythonista root.
For remote (non-file) requests, there are
two additional options:
* Set `no_cache` to `True` to skip the local cache, default is `False`
* Set `timeout` to a specific timeout value, default is 10 (seconds)
'''
if url.startswith('file://'):
file_path = url[7:]
if file_path.startswith('/'):
root = os.path.expanduser('~')
file_path = root + file_path
else:
current_working_directory = os.path.dirname(os.getcwd())
file_path = current_working_directory+'/' + file_path
dir_only = os.path.dirname(file_path)
file_path = NSURL.fileURLWithPath_(file_path)
dir_only = NSURL.fileURLWithPath_(dir_only)
self.webview.loadFileURL_allowingReadAccessToURL_(file_path, dir_only)
else:
#self.webview.loadRequest_(WKWebView.NSURLRequest.requestWithURL_(nsurl(url)))
cache_policy = 1 if no_cache else 0
self.webview.loadRequest_(
WKWebView.NSURLRequest.requestWithURL_cachePolicy_timeoutInterval_(
nsurl(url),
cache_policy,
timeout))
@on_main_thread
def load_html(self, html):
# Need to set a base directory to get
# real js errors
current_working_directory = os.path.dirname(os.getcwd())
root_dir = NSURL.fileURLWithPath_(current_working_directory)
self.webview.loadHTMLString_baseURL_(html, root_dir)
def eval_js(self, js):
self.eval_js_async(js, self._eval_js_sync_callback)
value = self.eval_js_queue.get()
return value
evaluate_javascript = eval_js
@on_main_thread
def _eval_js_sync_callback(self, value):
self.eval_js_queue.put(value)
@on_main_thread
def eval_js_async(self, js, callback=None):
if self.log_js_evals:
self.console.message({'level': 'code', 'content': js})
handler = functools.partial(WKWebView._handle_completion, callback, self)
block = ObjCBlock(handler, restype=None, argtypes=[c_void_p, c_void_p, c_void_p])
retain_global(block)
self.webview.evaluateJavaScript_completionHandler_(js, block)
def clear_cache(self):
'''
//// All kinds of data
//NSSet *websiteDataTypes = [WKWebsiteDataStore allWebsiteDataTypes];
//// Date from
NSDate *dateFrom = [NSDate dateWithTimeIntervalSince1970:0];
//// Execute
[[WKWebsiteDataStore defaultDataStore] removeDataOfTypes:websiteDataTypes modifiedSince:dateFrom completionHandler:^{
// Done
}];
'''
# Javascript evaluation completion handler
def _handle_completion(callback, webview, _cmd, _obj, _err):
result = str(ObjCInstance(_obj)) if _obj else None
if webview.log_js_evals:
webview._message({'level': 'raw', 'content': str(result)})
if callback:
callback(result)
def add_script(self, js_script, add_to_end=True):
location = 1 if add_to_end else 0
wk_script = WKWebView.WKUserScript.alloc().initWithSource_injectionTime_forMainFrameOnly_(js_script, location, False)
self.user_content_controller.addUserScript_(wk_script)
def add_style(self, css):
"Convenience method to add a style tag with the given css, to every page loaded by the view."
css = css.replace("'", "\'")
js = f"var style = document.createElement('style');style.innerHTML = '{css}';document.getElementsByTagName('head')[0].appendChild(style);"
self.add_script(js, add_to_end=True)
def add_meta(self, name, content):
"Convenience method to add a meta tag with the given name and content, to every page loaded by the view."
name = name.replace("'", "\'")
content = content.replace("'", "\'")
js = f"var meta = document.createElement('meta'); meta.setAttribute('name', '{name}'); meta.setAttribute('content', '{content}'); document.getElementsByTagName('head')[0].appendChild(meta);"
self.add_script(js, add_to_end=True)
def disable_zoom(self):
name = 'viewport'
content = 'width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no'
self.add_meta(name, content)
def disable_user_selection(self):
css = '* { -webkit-user-select: none; }'
self.add_style(css)
def disable_font_resizing(self):
css = 'body { -webkit-text-size-adjust: none; }'
self.add_style(css)
def disable_scrolling(self):
"Included for consistency with the other `disable_x` methods, this is equivalent to setting `scroll_enabled` to false."
self.scroll_enabled = False
def disable_all(self):
"Convenience method that calls all the `disable_x` methods to make the loaded pages act more like an app."
self.disable_zoom()
self.disable_scrolling()
self.disable_user_selection()
self.disable_font_resizing()
@property
def user_agent(self):
"Must be called outside main thread"
return self.eval_js('navigator.userAgent')
@on_main_thread
def _get_user_agent2(self):
return str(self.webview.customUserAgent())
@user_agent.setter
def user_agent(self, value):
value = str(value)
self._set_user_agent(value)
@on_main_thread
def _set_user_agent(self, value):
self.webview.setCustomUserAgent_(value)
@on_main_thread
def go_back(self):
self.webview.goBack()
@on_main_thread
def go_forward(self):
self.webview.goForward()
@on_main_thread
def reload(self):
self.webview.reload()
@on_main_thread
def stop(self):
self.webview.stopLoading()
@property
def scales_page_to_fit(self):
raise NotImplementedError('Not supported on iOS. Use the "disable_" methods instead.')
@scales_page_to_fit.setter
def scales_page_to_fit(self, value):
raise NotImplementedError('Not supported on iOS. Use the "disable_" methods instead.')
@property
def swipe_navigation(self):
return self.webview.allowsBackForwardNavigationGestures()
@swipe_navigation.setter
def swipe_navigation(self, value):
self.webview.setAllowsBackForwardNavigationGestures_(value == True)
@property
def scroll_enabled(self):
'''Controls whether scrolling is enabled.
Disabling scrolling is applicable for pages that need to look like an app.'''
return self.webview.scrollView().scrollEnabled()
@scroll_enabled.setter
def scroll_enabled(self, value):
self.webview.scrollView().setScrollEnabled_(value == True)
def update_safe_area_insets(self):
insets = self.objc_instance.safeAreaInsets()
self.frame = self.frame.inset(insets.top, insets.left, insets.bottom, insets.right)
def _javascript_alert(self, host, message):
console.alert(host, message, 'OK', hide_cancel_button=True)
def _javascript_confirm(self, host, message):
try:
console.alert(host, message, 'OK')
return True
except KeyboardInterrupt:
return False
def _javascript_prompt(self, host, prompt, default_text):
try:
return console.input_alert(host, prompt, default_text, 'OK')
except KeyboardInterrupt:
return None
js_logging_script = 'console = new Object(); console.info = function(message) { window.webkit.messageHandlers.javascript_console_message.postMessage(JSON.stringify({ level: "info", content: message})); return false; }; console.log = function(message) { window.webkit.messageHandlers.javascript_console_message.postMessage(JSON.stringify({ level: "log", content: message})); return false; }; console.warn = function(message) { window.webkit.messageHandlers.javascript_console_message.postMessage(JSON.stringify({ level: "warn", content: message})); return false; }; console.error = function(message) { window.webkit.messageHandlers.javascript_console_message.postMessage(JSON.stringify({ level: "error", content: message})); return false; }; window.onerror = (function(error, url, line, col, errorobj) { console.error("" + error + " (" + url + ", line: " + line + ", column: " + col + ")"); });'
def on_javascript_console_message(self, message):
log_message = json.loads(message)
#self.console.message(log_message)
self._message(log_message)
def _message(self, message):
#>bruno
level = message['level']
content = ''
if 'content' in message.keys():
content = message['content']
if type(content) is dict:
content = ','.join("{!s}={!r}".format(key,val) for (key,val) in content.items())
#<bruno
if level == 'code':
print('>>> ' + content)
elif level == 'raw':
print(content)
else:
print(level.upper() + ': ' + content)
class Theme:
@classmethod
def get_theme(cls):
theme_dict = json.loads(cls.clean_json(cls.get_theme_data()))
theme = SimpleNamespace(**theme_dict)
theme.dict = theme_dict
return theme
@classmethod
def get_theme_data(cls):
# Name of current theme
defaults = ObjCClass("NSUserDefaults").standardUserDefaults()
name = str(defaults.objectForKey_("ThemeName"))
# Theme is user-created
if name.startswith("User:"):
home = os.getenv("CFFIXED_USER_HOME")
user_themes_path = os.path.join(home,
"Library/Application Support/Themes")
theme_path = os.path.join(user_themes_path, name[5:] + ".json")
# Theme is built-in
else:
res_path = str(ObjCClass("NSBundle").mainBundle().resourcePath())
theme_path = os.path.join(res_path, "Themes2/%s.json" % name)
# Read theme file
with open(theme_path, "r") as f:
data = f.read()
# Return contents
return data
@classmethod
def clean_json(cls, string):
# From http://stackoverflow.com/questions/23705304
string = re.sub(",[ \t\r\n]+}", "}", string)
string = re.sub(",[ \t\r\n]+\]", "]", string)
return string
@classmethod
def console(self, webview_index=0):
webview = WKWebView.webviews[webview_index]
theme = WKWebView.Theme.get_theme()
print('Welcome to WKWebView console.')
print('Evaluate javascript in any active WKWebView.')
print('Special commands: list, switch #, load <url>, quit')
console.set_color(*ui.parse_color(theme.tint)[:3])
while True:
value = input('js> ').strip()
self.console_view.history().insertObject_atIndex_(ns(value+'\n'),0)
if value == 'quit':
break
if value == 'list':
for i in range(len(WKWebView.webviews)):
wv = WKWebView.webviews[i]
print(i, '-', wv.name, '-', wv.eval_js('document.title'))
elif value.startswith('switch '):
i = int(value[len('switch '):])
webview = WKWebView.webviews[i]
elif value.startswith('load '):
url = value[len('load '):]
webview.load_url(url)
else:
print(webview.eval_js(value))
console.set_color(*ui.parse_color(theme.default_text)[:3])
# MAIN OBJC SECTION
WKWebView = ObjCClass('WKWebView')
UIViewController = ObjCClass('UIViewController')
WKWebViewConfiguration = ObjCClass('WKWebViewConfiguration')
WKUserContentController = ObjCClass('WKUserContentController')
NSURLRequest = ObjCClass('NSURLRequest')
WKUserScript = ObjCClass('WKUserScript')
# Navigation delegate
class _block_decision_handler(Structure):
_fields_ = _block_literal_fields(ctypes.c_long)
def webView_decidePolicyForNavigationAction_decisionHandler_(_self, _cmd, _webview, _navigation_action, _decision_handler):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
deleg = webview.delegate
nav_action = ObjCInstance(_navigation_action)
ns_url = nav_action.request().URL()
url = str(ns_url)
nav_type = int(nav_action.navigationType())
allow = True
if deleg is not None:
if hasattr(deleg, 'webview_should_start_load'):
allow = deleg.webview_should_start_load(webview, url, nav_type)
scheme = str(ns_url.scheme())
if not WKWebView.WKWebView.handlesURLScheme_(scheme):
allow = False
webbrowser.open(url)
allow_or_cancel = 1 if allow else 0
decision_handler = ObjCInstance(_decision_handler)
retain_global(decision_handler)
blk = WKWebView._block_decision_handler.from_address(_decision_handler)
blk.invoke(_decision_handler, allow_or_cancel)
f = webView_decidePolicyForNavigationAction_decisionHandler_
f.argtypes = [c_void_p]*3
f.restype = None
f.encoding = b'v@:@@@?'
# https://developer.apple.com/library/archive/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html
def webView_didCommitNavigation_(_self, _cmd, _webview, _navigation):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
deleg = webview.delegate
if deleg is not None:
if hasattr(deleg, 'webview_did_start_load'):
deleg.webview_did_start_load(webview)
def webView_didFinishNavigation_(_self, _cmd, _webview, _navigation):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
deleg = webview.delegate
if deleg is not None:
if hasattr(deleg, 'webview_did_finish_load'):
deleg.webview_did_finish_load(webview)
def webView_didFailNavigation_withError_(_self, _cmd, _webview, _navigation, _error):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
deleg = webview.delegate
err = ObjCInstance(_error)
error_code = int(err.code())
error_msg = str(err.localizedDescription())
if deleg is not None:
if hasattr(deleg, 'webview_did_fail_load'):
deleg.webview_did_fail_load(webview, error_code, error_msg)
return
raise RuntimeError(f'WKWebView load failed with code {error_code}: {error_msg}')
def webView_didFailProvisionalNavigation_withError_(_self, _cmd, _webview, _navigation, _error):
WKWebView.webView_didFailNavigation_withError_(_self, _cmd, _webview, _navigation, _error)
CustomNavigationDelegate = create_objc_class('CustomNavigationDelegate', superclass=NSObject, methods=[
webView_didCommitNavigation_,
webView_didFinishNavigation_,
webView_didFailNavigation_withError_,
webView_didFailProvisionalNavigation_withError_,
webView_decidePolicyForNavigationAction_decisionHandler_
],
protocols=['WKNavigationDelegate'])
# Script message handler
def userContentController_didReceiveScriptMessage_(_self, _cmd, _userContentController, _message):
controller_instance = ObjCInstance(_self)
webview = controller_instance._pythonistawebview()
wk_message = ObjCInstance(_message)
name = str(wk_message.name())
content = str(wk_message.body())
handler = getattr(webview, 'on_'+name, None)
if handler:
handler(content)
else:
raise Exception(f'Unhandled message from script - name: {name}, content: {content}')
CustomMessageHandler = create_objc_class('CustomMessageHandler', UIViewController, methods=[
userContentController_didReceiveScriptMessage_
], protocols=['WKScriptMessageHandler'])
# UI delegate (for alerts etc.)
class _block_alert_completion(Structure):
_fields_ = _block_literal_fields()
def webView_runJavaScriptAlertPanelWithMessage_initiatedByFrame_completionHandler_(_self, _cmd, _webview, _message, _frame, _completion_handler):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
message = str(ObjCInstance(_message))
host = str(ObjCInstance(_frame).request().URL().host())
webview._javascript_alert(host, message)
#console.alert(host, message, 'OK', hide_cancel_button=True)
completion_handler = ObjCInstance(_completion_handler)
retain_global(completion_handler)
blk = WKWebView._block_alert_completion.from_address(_completion_handler)
blk.invoke(_completion_handler)
f = webView_runJavaScriptAlertPanelWithMessage_initiatedByFrame_completionHandler_
f.argtypes = [c_void_p]*4
f.restype = None
f.encoding = b'v@:@@@@?'
class _block_confirm_completion(Structure):
_fields_ = _block_literal_fields(ctypes.c_bool)
def webView_runJavaScriptConfirmPanelWithMessage_initiatedByFrame_completionHandler_(_self, _cmd, _webview, _message, _frame, _completion_handler):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
message = str(ObjCInstance(_message))
host = str(ObjCInstance(_frame).request().URL().host())
result = webview._javascript_confirm(host, message)
completion_handler = ObjCInstance(_completion_handler)
retain_global(completion_handler)
blk = WKWebView._block_confirm_completion.from_address(_completion_handler)
blk.invoke(_completion_handler, result)
f = webView_runJavaScriptConfirmPanelWithMessage_initiatedByFrame_completionHandler_
f.argtypes = [c_void_p]*4
f.restype = None
f.encoding = b'v@:@@@@?'
class _block_text_completion(Structure):
_fields_ = _block_literal_fields(c_void_p)
def webView_runJavaScriptTextInputPanelWithPrompt_defaultText_initiatedByFrame_completionHandler_(_self, _cmd, _webview, _prompt, _default_text, _frame, _completion_handler):
delegate_instance = ObjCInstance(_self)
webview = delegate_instance._pythonistawebview()
prompt = str(ObjCInstance(_prompt))
default_text = str(ObjCInstance(_default_text))
host = str(ObjCInstance(_frame).request().URL().host())
result = webview._javascript_prompt(host, prompt, default_text)
completion_handler = ObjCInstance(_completion_handler)
retain_global(completion_handler)
blk = WKWebView._block_text_completion.from_address(_completion_handler)
blk.invoke(_completion_handler, ns(result))
f = webView_runJavaScriptTextInputPanelWithPrompt_defaultText_initiatedByFrame_completionHandler_
f.argtypes = [c_void_p]*5
f.restype = None
f.encoding = b'v@:@@@@@?'
CustomUIDelegate = create_objc_class('CustomUIDelegate', superclass=NSObject, methods=[
webView_runJavaScriptAlertPanelWithMessage_initiatedByFrame_completionHandler_,
webView_runJavaScriptConfirmPanelWithMessage_initiatedByFrame_completionHandler_,
webView_runJavaScriptTextInputPanelWithPrompt_defaultText_initiatedByFrame_completionHandler_
],
protocols=['WKUIDelegate'])
if __name__ == '__main__':
class MyWebViewDelegate:
def webview_should_start_load(self, webview, url, nav_type):
"See nav_type options at https://developer.apple.com/documentation/webkit/wknavigationtype?language=objc"
print('Will start loading', url)
return True
def webview_did_start_load(self, webview):
print('Started loading')
@ui.in_background
def webview_did_finish_load(self, webview):
print('Finished loading ' + webview.eval_js('document.title'))
class MyWebView(WKWebView):
def on_greeting(self, message):
console.alert(message, 'Message passed to Python', 'OK', hide_cancel_button=True)
html = '''
<html>
<head>
<title>WKWebView tests</title>
<script>
function initialize() {
//result = prompt('Initialized', 'Yes, indeed');
//if (result) {
//window.webkit.messageHandlers.greeting.postMessage(result ? result : "<Dialog cancelled>");
//}
}
</script>
</head>
<body onload="initialize()" style="font-size: xx-large; text-align: center">
<p>
Hello world
</p>
<p>
<a href="http://omz-software.com/pythonista/">Pythonista home page</a>
</p>
<p>
+358 40 1234567
</p>
<p>
http://omz-software.com/pythonista/
</p>
</body>
'''
r = ui.View(background_color='black')
v = MyWebView(name='DemoWKWebView', delegate=MyWebViewDelegate(), swipe_navigation=True, data_detectors=(WKWebView.PHONE_NUMBER,WKWebView.LINK), frame=r.bounds, flex='WH')
r.add_subview(v)
r.present() # Use 'panel' if you want to view console in another tab
#v.disable_all()
v.load_html(html)
#v.load_url('http://omz-software.com/pythonista/', no_cache=True, timeout=5)
#v.load_url('file://some/local/file.html')
| |
import enum
from itertools import chain
from typing import List, Set, Tuple, Type
class Entity:
def __init__(self, sent_id: int, start: int, end: int, tag: str):
self.sent_id = sent_id
self.start = start
self.end = end
self.tag = tag
def __repr__(self):
return '({}, {}, {}, {})'.format(self.sent_id, self.tag, self.start, self.end)
def __eq__(self, other: 'Entity'):
return self.to_tuple() == other.to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def to_tuple(self):
return self.sent_id, self.tag, self.start, self.end
class Prefix(enum.Flag):
I = enum.auto()
O = enum.auto()
B = enum.auto()
E = enum.auto()
S = enum.auto()
U = enum.auto()
L = enum.auto()
ANY = I | O | B | E | S | U | L
Prefixes = dict(Prefix.__members__)
class Tag(enum.Flag):
SAME = enum.auto()
DIFF = enum.auto()
ANY = SAME | DIFF
class Token:
allowed_prefix = None
start_patterns = None
inside_patterns = None
end_patterns = None
def __init__(self, token: str, suffix: bool = False, delimiter: str = '-'):
self.token = token
self.prefix = Prefixes[token[-1]] if suffix else Prefixes[token[0]]
tag = token[:-1] if suffix else token[1:]
self.tag = tag.strip(delimiter) or '_'
def __repr__(self):
return self.token
def is_valid(self):
"""Check whether the prefix is allowed or not."""
if self.prefix not in self.allowed_prefix:
allowed_prefixes = str(self.allowed_prefix).replace('Prefix.', '')
message = 'Invalid token is found: {}. Allowed prefixes are: {}.'
raise ValueError(message.format(self.token, allowed_prefixes))
return True
def is_start(self, prev: 'Token'):
"""Check whether the current token is the start of chunk."""
return self.check_patterns(prev, self.start_patterns)
def is_inside(self, prev: 'Token'):
"""Check whether the current token is inside of chunk."""
return self.check_patterns(prev, self.inside_patterns)
def is_end(self, prev: 'Token'):
"""Check whether the previous token is the end of chunk."""
return self.check_patterns(prev, self.end_patterns)
def check_tag(self, prev: 'Token', cond: Tag):
"""Check whether the tag pattern is matched."""
if cond == Tag.ANY:
return True
if prev.tag == self.tag and cond == Tag.SAME:
return True
if prev.tag != self.tag and cond == Tag.DIFF:
return True
return False
def check_patterns(self, prev: 'Token', patterns: Set[Tuple[Prefix, Prefix, Tag]]):
"""Check whether the prefix patterns are matched."""
for prev_prefix, current_prefix, tag_cond in patterns:
if prev.prefix in prev_prefix and self.prefix in current_prefix and self.check_tag(prev, tag_cond):
return True
return False
class IOB1(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.B
start_patterns = {
(Prefix.O, Prefix.I, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.B, Prefix.I, Tag.ANY),
(Prefix.I, Prefix.B, Tag.SAME),
(Prefix.B, Prefix.B, Tag.SAME)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME)
}
end_patterns = {
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.O, Tag.ANY),
(Prefix.I, Prefix.B, Tag.ANY),
(Prefix.B, Prefix.O, Tag.ANY),
(Prefix.B, Prefix.I, Tag.DIFF),
(Prefix.B, Prefix.B, Tag.SAME)
}
class IOE1(Token):
# Todo: IOE1 hasn't yet been able to handle some cases. See unit testing.
allowed_prefix = Prefix.I | Prefix.O | Prefix.E
start_patterns = {
(Prefix.O, Prefix.I, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.E, Prefix.I, Tag.ANY),
(Prefix.E, Prefix.E, Tag.SAME)
}
inside_patterns = {
(Prefix.I, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.E, Tag.SAME)
}
end_patterns = {
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.O, Tag.ANY),
(Prefix.I, Prefix.E, Tag.DIFF),
(Prefix.E, Prefix.I, Tag.SAME),
(Prefix.E, Prefix.E, Tag.SAME)
}
class IOB2(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.B
start_patterns = {
(Prefix.ANY, Prefix.B, Tag.ANY)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME)
}
end_patterns = {
(Prefix.I, Prefix.O, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.B, Tag.ANY),
(Prefix.B, Prefix.O, Tag.ANY),
(Prefix.B, Prefix.I, Tag.DIFF),
(Prefix.B, Prefix.B, Tag.ANY)
}
class IOE2(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.E
start_patterns = {
(Prefix.O, Prefix.I, Tag.ANY),
(Prefix.O, Prefix.E, Tag.ANY),
(Prefix.E, Prefix.I, Tag.ANY),
(Prefix.E, Prefix.E, Tag.ANY),
(Prefix.I, Prefix.I, Tag.DIFF),
(Prefix.I, Prefix.E, Tag.DIFF)
}
inside_patterns = {
(Prefix.I, Prefix.E, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME)
}
end_patterns = {
(Prefix.E, Prefix.ANY, Tag.ANY)
}
class IOBES(Token):
allowed_prefix = Prefix.I | Prefix.O | Prefix.B | Prefix.E | Prefix.S
start_patterns = {
(Prefix.ANY, Prefix.B, Tag.ANY),
(Prefix.ANY, Prefix.S, Tag.ANY)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.B, Prefix.E, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.E, Tag.SAME)
}
end_patterns = {
(Prefix.S, Prefix.ANY, Tag.ANY),
(Prefix.E, Prefix.ANY, Tag.ANY)
}
class BILOU(Token):
allowed_prefix = Prefix.B | Prefix.I | Prefix.L | Prefix.O | Prefix.U
start_patterns = {
(Prefix.ANY, Prefix.B, Tag.ANY),
(Prefix.ANY, Prefix.U, Tag.ANY)
}
inside_patterns = {
(Prefix.B, Prefix.I, Tag.SAME),
(Prefix.B, Prefix.L, Tag.SAME),
(Prefix.I, Prefix.I, Tag.SAME),
(Prefix.I, Prefix.L, Tag.SAME)
}
end_patterns = {
(Prefix.U, Prefix.ANY, Tag.ANY),
(Prefix.L, Prefix.ANY, Tag.ANY)
}
class Tokens:
def __init__(self, tokens: List[str], scheme: Type[Token],
suffix: bool = False, delimiter: str = '-', sent_id: int = None):
self.outside_token = scheme('O', suffix=suffix, delimiter=delimiter)
self.tokens = [scheme(token, suffix=suffix, delimiter=delimiter) for token in tokens]
self.extended_tokens = self.tokens + [self.outside_token]
self.sent_id = sent_id
@property
def entities(self):
"""Extract entities from tokens.
Returns:
list: list of Entity.
Example:
>>> tokens = Tokens(['B-PER', 'I-PER', 'O', 'B-LOC'], IOB2)
>>> tokens.entities
[('PER', 0, 2), ('LOC', 3, 4)]
"""
i = 0
entities = []
prev = self.outside_token
while i < len(self.extended_tokens):
token = self.extended_tokens[i]
token.is_valid()
if token.is_start(prev):
end = self._forward(start=i + 1, prev=token)
if self._is_end(end):
entity = Entity(sent_id=self.sent_id, start=i, end=end, tag=token.tag)
entities.append(entity)
i = end
else:
i += 1
prev = self.extended_tokens[i - 1]
return entities
def _forward(self, start: int, prev: Token):
for i, token in enumerate(self.extended_tokens[start:], start):
if token.is_inside(prev):
prev = token
else:
return i
return len(self.tokens) - 1
def _is_end(self, i: int):
token = self.extended_tokens[i]
prev = self.extended_tokens[i - 1]
return token.is_end(prev)
class Entities:
def __init__(self, sequences: List[List[str]], scheme: Type[Token], suffix: bool = False, delimiter: str = '-'):
self.entities = [
Tokens(seq, scheme=scheme, suffix=suffix, delimiter=delimiter, sent_id=sent_id).entities
for sent_id, seq in enumerate(sequences)
]
def filter(self, tag_name: str):
entities = {entity for entity in chain(*self.entities) if entity.tag == tag_name}
return entities
@property
def unique_tags(self):
tags = {
entity.tag for entity in chain(*self.entities)
}
return tags
def auto_detect(sequences: List[List[str]], suffix: bool = False, delimiter: str = '-'):
"""Detects scheme automatically.
auto_detect supports the following schemes:
- IOB2
- IOE2
- IOBES
"""
prefixes = set()
error_message = 'This scheme is not supported: {}'
for tokens in sequences:
for token in tokens:
try:
token = Token(token, suffix=suffix, delimiter=delimiter)
prefixes.add(token.prefix)
except KeyError:
raise ValueError(error_message.format(token))
allowed_iob2_prefixes = [
{Prefix.I, Prefix.O, Prefix.B},
{Prefix.I, Prefix.B},
{Prefix.B, Prefix.O},
{Prefix.B}
]
allowed_ioe2_prefixes = [
{Prefix.I, Prefix.O, Prefix.E},
{Prefix.I, Prefix.E},
{Prefix.E, Prefix.O},
{Prefix.E}
]
allowed_iobes_prefixes = [
{Prefix.I, Prefix.O, Prefix.B, Prefix.E, Prefix.S},
{Prefix.I, Prefix.B, Prefix.E, Prefix.S},
{Prefix.I, Prefix.O, Prefix.B, Prefix.E},
{Prefix.O, Prefix.B, Prefix.E, Prefix.S},
{Prefix.I, Prefix.B, Prefix.E},
{Prefix.B, Prefix.E, Prefix.S},
{Prefix.O, Prefix.B, Prefix.E},
{Prefix.B, Prefix.E},
{Prefix.S}
]
allowed_bilou_prefixes = [
{Prefix.I, Prefix.O, Prefix.B, Prefix.L, Prefix.U},
{Prefix.I, Prefix.B, Prefix.L, Prefix.U},
{Prefix.I, Prefix.O, Prefix.B, Prefix.L},
{Prefix.O, Prefix.B, Prefix.L, Prefix.U},
{Prefix.I, Prefix.B, Prefix.L},
{Prefix.B, Prefix.L, Prefix.U},
{Prefix.O, Prefix.B, Prefix.L},
{Prefix.B, Prefix.L},
{Prefix.U}
]
if prefixes in allowed_iob2_prefixes:
return IOB2
elif prefixes in allowed_ioe2_prefixes:
return IOE2
elif prefixes in allowed_iobes_prefixes:
return IOBES
elif prefixes in allowed_bilou_prefixes:
return BILOU
else:
raise ValueError(error_message.format(prefixes))
| |
import pytest
import datetime
from api.base.settings.defaults import API_BASE
from api.providers.workflows import Workflows
from osf.utils.workflows import RequestTypes, RegistrationModerationTriggers, RegistrationModerationStates
from osf_tests.factories import (
AuthUserFactory,
RegistrationFactory,
RegistrationProviderFactory,
NodeRequestFactory,
EmbargoFactory,
RetractionFactory,
)
from tests.base import get_default_metaschema
from osf.models import NodeRequest
from osf.migrations import update_provider_auth_groups
@pytest.mark.django_db
class TestRegistriesModerationSubmissions:
@pytest.fixture()
def moderator(self):
return AuthUserFactory()
@pytest.fixture()
def moderator_wrong_provider(self):
user = AuthUserFactory()
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(get_default_metaschema())
provider.get_group('moderator').user_set.add(user)
provider.save()
return user
@pytest.fixture()
def provider(self, moderator):
provider = RegistrationProviderFactory()
update_provider_auth_groups()
provider.schemas.add(get_default_metaschema())
provider.get_group('moderator').user_set.add(moderator)
provider.reviews_workflow = Workflows.PRE_MODERATION.value
provider.save()
return provider
@pytest.fixture()
def admin(self, provider):
user = AuthUserFactory()
provider.get_group('admin').user_set.add(user)
provider.save()
return user
@pytest.fixture()
def registration_with_withdraw_request(self, provider):
registration = RegistrationFactory(provider=provider)
NodeRequest.objects.create(
request_type=RequestTypes.WITHDRAWAL.value,
target=registration,
creator=registration.creator
)
return registration
@pytest.fixture()
def access_request(self, provider):
request = NodeRequestFactory(request_type=RequestTypes.ACCESS.value)
request.target.provider = provider
request.target.save()
return request
@pytest.fixture()
def reg_creator(self):
return AuthUserFactory()
@pytest.fixture()
def registration(self, provider, reg_creator):
return RegistrationFactory(provider=provider, creator=reg_creator)
@pytest.fixture()
def embargo_registration(self, provider, reg_creator):
one_month_from_now = datetime.datetime.now() + datetime.timedelta(days=30)
embargo = EmbargoFactory(end_date=one_month_from_now, user=reg_creator)
registration = embargo.target_registration
registration.provider = provider
registration.update_moderation_state()
registration.save()
return registration
@pytest.fixture()
def retract_registration(self, provider, reg_creator):
retract = RetractionFactory(user=reg_creator)
registration = retract.target_registration
registration.provider = provider
registration.update_moderation_state()
registration.save()
return registration
@pytest.fixture()
def provider_requests_url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/requests/'
@pytest.fixture()
def registration_requests_url(self, registration_with_withdraw_request):
return f'/{API_BASE}registrations/{registration_with_withdraw_request._id}/requests/'
@pytest.fixture()
def registrations_url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/registrations/'
@pytest.fixture()
def registration_detail_url(self, registration):
return f'/{API_BASE}registrations/{registration._id}/'
@pytest.fixture()
def registration_log_url(self, registration):
return f'/{API_BASE}registrations/{registration._id}/logs/'
@pytest.fixture()
def provider_actions_url(self, provider):
return f'/{API_BASE}providers/registrations/{provider._id}/actions/'
@pytest.fixture()
def registration_actions_url(self, registration):
return f'/{API_BASE}registrations/{registration._id}/actions/'
@pytest.fixture()
def embargo_registration_actions_url(self, embargo_registration):
return f'/{API_BASE}registrations/{embargo_registration._id}/actions/'
@pytest.fixture()
def retract_registration_actions_url(self, retract_registration):
return f'/{API_BASE}registrations/{retract_registration._id}/actions/'
@pytest.fixture()
def actions_payload_base(self):
payload = {
'data': {
'attributes': {
},
'relationships': {
'target': {
'data': {
'type': 'registrations'
}
}
},
'type': 'registration-actions'
}
}
return payload
def test_get_provider_requests(self, app, provider_requests_url, registration_with_withdraw_request, access_request, moderator, moderator_wrong_provider):
resp = app.get(provider_requests_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(provider_requests_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(provider_requests_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 2
resp = app.get(f'{provider_requests_url}?filter[request_type]=withdrawal', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['relationships']['target']['data']['id'] == registration_with_withdraw_request._id
def test_get_registration_requests(self, app, registration_requests_url, registration_with_withdraw_request, access_request, moderator, moderator_wrong_provider):
resp = app.get(registration_requests_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_requests_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registration_requests_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
resp = app.get(f'{registration_requests_url}?filter[request_type]=withdrawal', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['relationships']['target']['data']['id'] == registration_with_withdraw_request._id
def test_get_registrations(self, app, registrations_url, registration, moderator, moderator_wrong_provider):
resp = app.get(registrations_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registrations_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registrations_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['id'] == registration._id
assert resp.json['data'][0]['attributes']['reviews_state'] == RegistrationModerationStates.INITIAL.db_name
assert resp.json['data'][0]['relationships']['requests']
assert resp.json['data'][0]['relationships']['review_actions']
def test_get_registrations_reviews_state_filter(self, app, registrations_url, registration, moderator):
resp = app.get(f'{registrations_url}?filter[reviews_state]=initial', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['id'] == registration._id
resp = app.get(f'{registrations_url}?filter[reviews_state]=accepted', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 0
# RegistrationFactory auto-approves the initial RegistrationApproval
registration.update_moderation_state()
resp = app.get(f'{registrations_url}?filter[reviews_state]=accepted&meta[reviews_state_counts]=true', auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['id'] == registration._id
assert resp.json['data'][0]['attributes']['reviews_state'] == RegistrationModerationStates.ACCEPTED.db_name
assert resp.json['meta']['reviews_state_counts']['accepted'] == 1
@pytest.mark.enable_quickfiles_creation
def test_get_registration_actions(self, app, registration_actions_url, registration, moderator):
resp = app.get(registration_actions_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_actions_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 0
registration.is_public = True
retraction = registration.retract_registration(
user=registration.creator, justification='because')
retraction.approve(
user=registration.creator,
token=retraction.token_for_user(registration.creator, 'approval')
)
registration.save()
resp = app.get(registration_actions_url, auth=moderator.auth)
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['attributes']['trigger'] == RegistrationModerationTriggers.REQUEST_WITHDRAWAL.db_name
assert resp.json['data'][0]['relationships']['creator']['data']['id'] == registration.creator._id
@pytest.mark.enable_quickfiles_creation
def test_get_provider_actions(self, app, provider_actions_url, registration, moderator):
resp = app.get(provider_actions_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(provider_actions_url, auth=moderator.auth)
assert resp.status_code == 200
assert len(resp.json['data']) == 0
registration.require_approval(user=registration.creator)
approval = registration.registration_approval
approval.approve(
user=registration.creator,
token=approval.token_for_user(registration.creator, 'approval')
)
resp = app.get(provider_actions_url, auth=moderator.auth)
assert len(resp.json['data']) == 1
assert resp.json['data'][0]['attributes']['trigger'] == RegistrationModerationTriggers.SUBMIT.db_name
assert resp.json['data'][0]['relationships']['creator']['data']['id'] == registration.creator._id
def test_registries_moderation_permission(self, app, registration_detail_url, registration, moderator, moderator_wrong_provider):
# Moderators should be able to view registration details once the registration is pending
registration.moderation_state = RegistrationModerationStates.PENDING.db_name
registration.save()
resp = app.get(registration_detail_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_detail_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registration_detail_url, auth=moderator.auth)
assert resp.status_code == 200
def test_registries_moderation_permission_log(self, app, registration_log_url, registration, moderator, moderator_wrong_provider):
# Moderators should be able to view registration logs once the registration is pending
registration.moderation_state = RegistrationModerationStates.PENDING.db_name
registration.save()
resp = app.get(registration_log_url, expect_errors=True)
assert resp.status_code == 401
resp = app.get(registration_log_url, auth=moderator_wrong_provider.auth, expect_errors=True)
assert resp.status_code == 403
resp = app.get(registration_log_url, auth=moderator.auth)
assert resp.status_code == 200
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_accept(self, app, registration, moderator, registration_actions_url, actions_payload_base, reg_creator):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Best registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_reject_moderator(self, app, registration, reg_creator, moderator, registration_actions_url, actions_payload_base):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Worst registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.REJECTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_embargo(self, app, embargo_registration, moderator, provider, embargo_registration_actions_url, actions_payload_base, reg_creator):
assert embargo_registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
embargo_registration.sanction.accept()
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Looks good! (Embargo)'
actions_payload_base['data']['relationships']['target']['data']['id'] = embargo_registration._id
resp = app.post_json_api(embargo_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.EMBARGO.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_embargo_reject(self, app, embargo_registration, moderator, provider, embargo_registration_actions_url, actions_payload_base, reg_creator):
assert embargo_registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
embargo_registration.sanction.accept()
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Looks good! (Embargo)'
actions_payload_base['data']['relationships']['target']['data']['id'] = embargo_registration._id
resp = app.post_json_api(embargo_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.REJECT_SUBMISSION.db_name
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.REJECTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_withdraw_accept(self, app, retract_registration, moderator, retract_registration_actions_url, actions_payload_base, provider):
retract_registration.sanction.accept()
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = retract_registration._id
resp = app.post_json_api(retract_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_withdraw_reject(self, app, retract_registration, moderator, retract_registration_actions_url, actions_payload_base, provider):
retract_registration.sanction.accept()
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.PENDING_WITHDRAW.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.REJECT_WITHDRAWAL.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = retract_registration._id
resp = app.post_json_api(retract_registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.REJECT_WITHDRAWAL.db_name
retract_registration.refresh_from_db()
assert retract_registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_force_withdraw(self, app, registration, moderator, registration_actions_url, actions_payload_base, provider, reg_creator):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.registration_approval.accept(user=moderator) # Gotta make it Accepted
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.WITHDRAWN.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_accept_errors(self, app, registration, moderator, registration_actions_url, actions_payload_base, reg_creator):
registration.require_approval(user=registration.creator)
#Moderator can't submit
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Submitting registration'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth, expect_errors=True)
assert resp.status_code == 403
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
#Admin contributor can't approve
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Best registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_withdraw_admin_cant_accept(self, app, retract_registration, reg_creator, retract_registration_actions_url, actions_payload_base, provider):
retract_registration.sanction.accept()
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = retract_registration._id
resp = app.post_json_api(retract_registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_embargo_admin_cant_accept(self, app, embargo_registration, provider, embargo_registration_actions_url, actions_payload_base, reg_creator):
embargo_registration.require_approval(user=embargo_registration.creator)
embargo_registration.registration_approval.accept()
embargo_registration.refresh_from_db()
assert embargo_registration.moderation_state == RegistrationModerationStates.INITIAL.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Looks good! (Embargo)'
actions_payload_base['data']['relationships']['target']['data']['id'] = embargo_registration._id
resp = app.post_json_api(embargo_registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
@pytest.mark.enable_quickfiles_creation
def test_registries_moderation_post_admin_cant_force_withdraw(self, app, registration, moderator, registration_actions_url, actions_payload_base, provider, reg_creator):
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.PENDING.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
actions_payload_base['data']['attributes']['comment'] = 'Best registration Ive ever seen'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.status_code == 201
assert resp.json['data']['attributes']['trigger'] == RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name
registration.refresh_from_db()
assert registration.moderation_state == RegistrationModerationStates.ACCEPTED.db_name
actions_payload_base['data']['attributes']['trigger'] = RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
actions_payload_base['data']['attributes']['comment'] = 'Bye bye'
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=reg_creator.auth, expect_errors=True)
assert resp.status_code == 403
@pytest.mark.parametrize(
'moderator_trigger',
[
RegistrationModerationTriggers.ACCEPT_SUBMISSION,
RegistrationModerationTriggers.REJECT_SUBMISSION
]
)
@pytest.mark.enable_quickfiles_creation
def test_post_submission_action_persists_comment(self, app, registration, moderator, registration_actions_url, actions_payload_base, moderator_trigger):
assert registration.actions.count() == 0
registration.require_approval(user=registration.creator)
registration.registration_approval.accept()
moderator_comment = 'inane comment'
actions_payload_base['data']['attributes']['trigger'] = moderator_trigger.db_name
actions_payload_base['data']['attributes']['comment'] = moderator_comment
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.json['data']['attributes']['comment'] == moderator_comment
persisted_action = registration.actions.get(trigger=moderator_trigger.db_name)
assert persisted_action.comment == moderator_comment
@pytest.mark.parametrize(
'moderator_trigger',
[
RegistrationModerationTriggers.ACCEPT_WITHDRAWAL,
RegistrationModerationTriggers.REJECT_WITHDRAWAL,
]
)
@pytest.mark.enable_quickfiles_creation
def test_post_withdrawal_action_persists_comment(self, app, registration, moderator, registration_actions_url, actions_payload_base, moderator_trigger):
assert registration.actions.count() == 0
registration.is_public = True
registration.retract_registration(user=registration.creator)
registration.retraction.accept()
moderator_comment = 'inane comment'
actions_payload_base['data']['attributes']['trigger'] = moderator_trigger.db_name
actions_payload_base['data']['attributes']['comment'] = moderator_comment
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
assert resp.json['data']['attributes']['comment'] == moderator_comment
persisted_action = registration.actions.get(trigger=moderator_trigger.db_name)
assert persisted_action.comment == moderator_comment
@pytest.mark.enable_quickfiles_creation
def test_post_force_withdraw_action_persists_comment(self, app, registration, moderator, registration_actions_url, actions_payload_base):
assert registration.actions.count() == 0
registration.is_public = True
registration.update_moderation_state() # implicit ACCEPTED state from RegistrationFactory
moderator_comment = 'inane comment'
force_withdraw_trigger = RegistrationModerationTriggers.FORCE_WITHDRAW.db_name
actions_payload_base['data']['attributes']['trigger'] = force_withdraw_trigger
actions_payload_base['data']['attributes']['comment'] = moderator_comment
actions_payload_base['data']['relationships']['target']['data']['id'] = registration._id
resp = app.post_json_api(registration_actions_url, actions_payload_base, auth=moderator.auth)
expected_comment = 'Force withdrawn by moderator: ' + moderator_comment
assert resp.json['data']['attributes']['comment'] == expected_comment
persisted_action = registration.actions.get(trigger=force_withdraw_trigger)
assert persisted_action.comment == expected_comment
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator as ea
from tensorflow.python.summary.writer.writer import SummaryToEventTransformer
from tensorflow.python.training import saver
class _EventGenerator(object):
"""Class that can add_events and then yield them back.
Satisfies the EventGenerator API required for the EventAccumulator.
Satisfies the EventWriter API required to create a SummaryWriter.
Has additional convenience methods for adding test events.
"""
def __init__(self, zero_out_timestamps=False):
self.items = []
self.zero_out_timestamps = zero_out_timestamps
def Load(self):
while self.items:
yield self.items.pop(0)
def AddScalar(self, tag, wall_time=0, step=0, value=0):
event = tf.Event(
wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
simple_value=value)]))
self.AddEvent(event)
def AddHistogram(self,
tag,
wall_time=0,
step=0,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=None,
hbucket=None):
histo = tf.HistogramProto(min=hmin,
max=hmax,
num=hnum,
sum=hsum,
sum_squares=hsum_squares,
bucket_limit=hbucket_limit,
bucket=hbucket)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
histo=histo)]))
self.AddEvent(event)
def AddImage(self,
tag,
wall_time=0,
step=0,
encoded_image_string=b'imgstr',
width=150,
height=100):
image = tf.Summary.Image(encoded_image_string=encoded_image_string,
width=width,
height=height)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
image=image)]))
self.AddEvent(event)
def AddAudio(self,
tag,
wall_time=0,
step=0,
encoded_audio_string=b'sndstr',
content_type='audio/wav',
sample_rate=44100,
length_frames=22050):
audio = tf.Summary.Audio(encoded_audio_string=encoded_audio_string,
content_type=content_type,
sample_rate=sample_rate,
length_frames=length_frames)
event = tf.Event(wall_time=wall_time,
step=step,
summary=tf.Summary(value=[tf.Summary.Value(tag=tag,
audio=audio)]))
self.AddEvent(event)
def AddEvent(self, event):
if self.zero_out_timestamps:
event.wall_time = 0
self.items.append(event)
def add_event(self, event): # pylint: disable=invalid-name
"""Match the EventWriter API."""
self.AddEvent(event)
class EventAccumulatorTest(tf.test.TestCase):
def assertTagsEqual(self, tags1, tags2):
# Make sure the two dictionaries have the same keys.
self.assertItemsEqual(tags1, tags2)
# Additionally, make sure each key in the dictionary maps to the same value.
for key in tags1:
if isinstance(tags1[key], list):
# We don't care about the order of the values in lists, thus asserting
# only if the items are equal.
self.assertItemsEqual(tags1[key], tags2[key])
else:
# Make sure the values are equal.
self.assertEqual(tags1[key], tags2[key])
class MockingEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(MockingEventAccumulatorTest, self).setUp()
self.stubs = googletest.StubOutForTesting()
self.empty = {ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []}
self._real_constructor = ea.EventAccumulator
self._real_generator = ea._GeneratorFromPath
def _FakeAccumulatorConstructor(generator, *args, **kwargs):
ea._GeneratorFromPath = lambda x: generator
return self._real_constructor(generator, *args, **kwargs)
ea.EventAccumulator = _FakeAccumulatorConstructor
def tearDown(self):
self.stubs.CleanUp()
ea.EventAccumulator = self._real_constructor
ea._GeneratorFromPath = self._real_generator
def testEmptyAccumulator(self):
gen = _EventGenerator()
x = ea.EventAccumulator(gen)
x.Reload()
self.assertEqual(x.Tags(), self.empty)
def testTags(self):
gen = _EventGenerator()
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testReload(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
self.assertEqual(acc.Tags(), self.empty)
gen.AddScalar('s1')
gen.AddScalar('s2')
gen.AddHistogram('hst1')
gen.AddHistogram('hst2')
gen.AddImage('im1')
gen.AddImage('im2')
gen.AddAudio('snd1')
gen.AddAudio('snd2')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1', 'im2'],
ea.AUDIO: ['snd1', 'snd2'],
ea.SCALARS: ['s1', 's2'],
ea.HISTOGRAMS: ['hst1', 'hst2'],
ea.COMPRESSED_HISTOGRAMS: ['hst1', 'hst2'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testScalars(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
s1 = ea.ScalarEvent(wall_time=1, step=10, value=32)
s2 = ea.ScalarEvent(wall_time=2, step=12, value=64)
gen.AddScalar('s1', wall_time=1, step=10, value=32)
gen.AddScalar('s2', wall_time=2, step=12, value=64)
acc.Reload()
self.assertEqual(acc.Scalars('s1'), [s1])
self.assertEqual(acc.Scalars('s2'), [s2])
def testHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
val1 = ea.HistogramValue(min=1,
max=2,
num=3,
sum=4,
sum_squares=5,
bucket_limit=[1, 2, 3],
bucket=[0, 3, 0])
val2 = ea.HistogramValue(min=-2,
max=3,
num=4,
sum=5,
sum_squares=6,
bucket_limit=[2, 3, 4],
bucket=[1, 3, 0])
hst1 = ea.HistogramEvent(wall_time=1, step=10, histogram_value=val1)
hst2 = ea.HistogramEvent(wall_time=2, step=12, histogram_value=val2)
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
self.assertEqual(acc.Histograms('hst1'), [hst1])
self.assertEqual(acc.Histograms('hst2'), [hst2])
def testCompressedHistograms(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, compression_bps=(0, 2500, 5000, 7500, 10000))
gen.AddHistogram('hst1',
wall_time=1,
step=10,
hmin=1,
hmax=2,
hnum=3,
hsum=4,
hsum_squares=5,
hbucket_limit=[1, 2, 3],
hbucket=[0, 3, 0])
gen.AddHistogram('hst2',
wall_time=2,
step=12,
hmin=-2,
hmax=3,
hnum=4,
hsum=5,
hsum_squares=6,
hbucket_limit=[2, 3, 4],
hbucket=[1, 3, 0])
acc.Reload()
# Create the expected values after compressing hst1
expected_vals1 = [ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, 1.0), (2500, 1.25), (5000, 1.5), (
7500, 1.75), (10000, 2.0)]]
expected_cmphst1 = ea.CompressedHistogramEvent(
wall_time=1,
step=10,
compressed_histogram_values=expected_vals1)
self.assertEqual(acc.CompressedHistograms('hst1'), [expected_cmphst1])
# Create the expected values after compressing hst2
expected_vals2 = [
ea.CompressedHistogramValue(bp, val)
for bp, val in [(0, -2), (2500, 2), (5000, 2 + 1 / 3), (7500, 2 + 2 / 3
), (10000, 3)]
]
expected_cmphst2 = ea.CompressedHistogramEvent(
wall_time=2,
step=12,
compressed_histogram_values=expected_vals2)
self.assertEqual(acc.CompressedHistograms('hst2'), [expected_cmphst2])
def testCompressHistogram_uglyHistogram(self):
bps = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
histogram_values = ea.HistogramValue(
min=0.0,
max=1.0,
num=960.0,
sum=64.0,
sum_squares=64.0,
bucket_limit=[
0.0, 1e-12, 0.917246389039776, 1.0089710279437536,
1.7976931348623157e+308
],
bucket=[0.0, 896.0, 0.0, 64.0, 0.0])
histogram_event = ea.HistogramEvent(0, 0, histogram_values)
compressed_event = ea._CompressHistogram(histogram_event, bps)
vals = compressed_event.compressed_histogram_values
self.assertEquals(tuple(v.basis_point for v in vals), bps)
self.assertAlmostEqual(vals[0].value, 0.0)
self.assertAlmostEqual(vals[1].value, 7.157142857142856e-14)
self.assertAlmostEqual(vals[2].value, 1.7003571428571426e-13)
self.assertAlmostEqual(vals[3].value, 3.305357142857143e-13)
self.assertAlmostEqual(vals[4].value, 5.357142857142857e-13)
self.assertAlmostEqual(vals[5].value, 7.408928571428571e-13)
self.assertAlmostEqual(vals[6].value, 9.013928571428571e-13)
self.assertAlmostEqual(vals[7].value, 9.998571428571429e-13)
self.assertAlmostEqual(vals[8].value, 1.0)
def testImages(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
im1 = ea.ImageEvent(wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
im2 = ea.ImageEvent(wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
gen.AddImage('im1',
wall_time=1,
step=10,
encoded_image_string=b'big',
width=400,
height=300)
gen.AddImage('im2',
wall_time=2,
step=12,
encoded_image_string=b'small',
width=40,
height=30)
acc.Reload()
self.assertEqual(acc.Images('im1'), [im1])
self.assertEqual(acc.Images('im2'), [im2])
def testAudio(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
snd1 = ea.AudioEvent(wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
snd2 = ea.AudioEvent(wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
gen.AddAudio('snd1',
wall_time=1,
step=10,
encoded_audio_string=b'big',
content_type='audio/wav',
sample_rate=44100,
length_frames=441000)
gen.AddAudio('snd2',
wall_time=2,
step=12,
encoded_audio_string=b'small',
content_type='audio/wav',
sample_rate=44100,
length_frames=44100)
acc.Reload()
self.assertEqual(acc.Audio('snd1'), [snd1])
self.assertEqual(acc.Audio('snd2'), [snd2])
def testKeyError(self):
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
acc.Reload()
with self.assertRaises(KeyError):
acc.Scalars('s1')
with self.assertRaises(KeyError):
acc.Scalars('hst1')
with self.assertRaises(KeyError):
acc.Scalars('im1')
with self.assertRaises(KeyError):
acc.Histograms('s1')
with self.assertRaises(KeyError):
acc.Histograms('im1')
with self.assertRaises(KeyError):
acc.Images('s1')
with self.assertRaises(KeyError):
acc.Images('hst1')
with self.assertRaises(KeyError):
acc.Audio('s1')
with self.assertRaises(KeyError):
acc.Audio('hst1')
def testNonValueEvents(self):
"""Tests that non-value events in the generator don't cause early exits."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=10, value=20)
gen.AddEvent(tf.Event(wall_time=2, step=20, file_version='nots2'))
gen.AddScalar('s3', wall_time=3, step=100, value=1)
gen.AddHistogram('hst1')
gen.AddImage('im1')
gen.AddAudio('snd1')
acc.Reload()
self.assertTagsEqual(acc.Tags(), {
ea.IMAGES: ['im1'],
ea.AUDIO: ['snd1'],
ea.SCALARS: ['s1', 's3'],
ea.HISTOGRAMS: ['hst1'],
ea.COMPRESSED_HISTOGRAMS: ['hst1'],
ea.GRAPH: False,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
def testExpiredDataDiscardedAfterRestartForFileVersionLessThan2(self):
"""Tests that events are discarded after a restart is detected.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items with the same tag
that are outdated.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
def testOrphanedDataNotDiscardedIfFlagUnset(self):
"""Tests that events are not discarded if purge_orphaned_data is false.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen, purge_orphaned_data=False)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
acc.Reload()
## Check that number of items are what they should be
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300])
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300 from s1
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200, 300, 101,
201, 301])
def testEventsDiscardedPerTagAfterRestartForFileVersionLessThan2(self):
"""Tests that event discards after restart, only affect the misordered tag.
If a step value is observed to be lower than what was previously seen,
this should force a discard of all previous items that are outdated, but
only for the out of order tag. Other tags should remain unaffected.
Only file versions < 2 use this out-of-order discard logic. Later versions
discard events based on the step value of SessionLog.START.
"""
warnings = []
self.stubs.Set(logging, 'warn', warnings.append)
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=0, file_version='brain.Event:1'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=101, value=20)
gen.AddScalar('s1', wall_time=1, step=201, value=20)
gen.AddScalar('s1', wall_time=1, step=301, value=20)
gen.AddScalar('s2', wall_time=1, step=101, value=20)
gen.AddScalar('s2', wall_time=1, step=201, value=20)
gen.AddScalar('s2', wall_time=1, step=301, value=20)
acc.Reload()
## Check that we have discarded 200 and 300
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 101, 201, 301])
## Check that s1 discards do not affect s2
## i.e. check that only events from the out of order tag are discarded
self.assertEqual([x.step for x in acc.Scalars('s2')], [101, 201, 301])
def testOnlySummaryEventsTriggerDiscards(self):
"""Test that file version event does not trigger data purge."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddScalar('s1', wall_time=1, step=100, value=20)
ev1 = tf.Event(wall_time=2, step=0, file_version='brain.Event:1')
graph_bytes = graph_pb2.GraphDef().SerializeToString()
ev2 = tf.Event(wall_time=3, step=0, graph_def=graph_bytes)
gen.AddEvent(ev1)
gen.AddEvent(ev2)
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100])
def testSessionLogStartMessageDiscardsExpiredEvents(self):
"""Test that SessionLog.START message discards expired events.
This discard logic is preferred over the out-of-order step discard logic,
but this logic can only be used for event protos which have the SessionLog
enum, which was introduced to event.proto for file_version >= brain.Event:2.
"""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=0, step=1, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=1, step=100, value=20)
gen.AddScalar('s1', wall_time=1, step=200, value=20)
gen.AddScalar('s1', wall_time=1, step=300, value=20)
gen.AddScalar('s1', wall_time=1, step=400, value=20)
gen.AddScalar('s2', wall_time=1, step=202, value=20)
gen.AddScalar('s2', wall_time=1, step=203, value=20)
slog = SessionLog(status=SessionLog.START)
gen.AddEvent(tf.Event(wall_time=2, step=201, session_log=slog))
acc.Reload()
self.assertEqual([x.step for x in acc.Scalars('s1')], [100, 200])
self.assertEqual([x.step for x in acc.Scalars('s2')], [])
def testFirstEventTimestamp(self):
"""Test that FirstEventTimestamp() returns wall_time of the first event."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=10, step=20, file_version='brain.Event:2'))
gen.AddScalar('s1', wall_time=30, step=40, value=20)
self.assertEqual(acc.FirstEventTimestamp(), 10)
def testReloadPopulatesFirstEventTimestamp(self):
"""Test that Reload() means FirstEventTimestamp() won't load events."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=1, step=2, file_version='brain.Event:2'))
acc.Reload()
def _Die(*args, **kwargs): # pylint: disable=unused-argument
raise RuntimeError('Load() should not be called')
self.stubs.Set(gen, 'Load', _Die)
self.assertEqual(acc.FirstEventTimestamp(), 1)
def testFirstEventTimestampLoadsEvent(self):
"""Test that FirstEventTimestamp() doesn't discard the loaded event."""
gen = _EventGenerator()
acc = ea.EventAccumulator(gen)
gen.AddEvent(tf.Event(wall_time=1, step=2, file_version='brain.Event:2'))
self.assertEqual(acc.FirstEventTimestamp(), 1)
acc.Reload()
self.assertEqual(acc.file_version, 2.0)
def testTFSummaryScalar(self):
"""Verify processing of tf.summary.scalar, which uses TensorSummary op."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.placeholder(tf.float32)
tf.summary.scalar('scalar1', ipt)
tf.summary.scalar('scalar2', ipt * ipt)
merged = tf.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged, feed_dict={ipt: i})
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
seq1 = [ea.ScalarEvent(wall_time=0, step=i, value=i) for i in xrange(10)]
seq2 = [
ea.ScalarEvent(
wall_time=0, step=i, value=i * i) for i in xrange(10)
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['scalar1', 'scalar2'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
self.assertEqual(accumulator.Scalars('scalar1'), seq1)
self.assertEqual(accumulator.Scalars('scalar2'), seq2)
first_value = accumulator.Scalars('scalar1')[0].value
self.assertTrue(isinstance(first_value, float))
def testTFSummaryImage(self):
"""Verify processing of tf.summary.image."""
event_sink = _EventGenerator(zero_out_timestamps=True)
writer = SummaryToEventTransformer(event_sink)
with self.test_session() as sess:
ipt = tf.ones([10, 4, 4, 3], tf.uint8)
# This is an interesting example, because the old tf.image_summary op
# would throw an error here, because it would be tag reuse.
# Using the tf node name instead allows argument re-use to the image
# summary.
with tf.name_scope('1'):
tf.summary.image('images', ipt, max_outputs=1)
with tf.name_scope('2'):
tf.summary.image('images', ipt, max_outputs=2)
with tf.name_scope('3'):
tf.summary.image('images', ipt, max_outputs=3)
merged = tf.merge_all_summaries()
writer.add_graph(sess.graph)
for i in xrange(10):
summ = sess.run(merged)
writer.add_summary(summ, global_step=i)
accumulator = ea.EventAccumulator(event_sink)
accumulator.Reload()
tags = [
u'1/images/image', u'2/images/image/0', u'2/images/image/1',
u'3/images/image/0', u'3/images/image/1', u'3/images/image/2'
]
self.assertTagsEqual(accumulator.Tags(), {
ea.IMAGES: tags,
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: False,
ea.RUN_METADATA: []
})
class RealisticEventAccumulatorTest(EventAccumulatorTest):
def setUp(self):
super(RealisticEventAccumulatorTest, self).setUp()
def testScalarsRealistically(self):
"""Test accumulator by writing values and then reading them."""
def FakeScalarSummary(tag, value):
value = tf.Summary.Value(tag=tag, simple_value=value)
summary = tf.Summary(value=[value])
return summary
directory = os.path.join(self.get_temp_dir(), 'values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.train.SummaryWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
writer.add_graph(graph)
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
run_metadata = tf.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# Write a bunch of events using the writer.
for i in xrange(30):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: ['id', 'sq'],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: ['test run']
})
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(30, len(id_events))
self.assertEqual(30, len(sq_events))
for i in xrange(30):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
# Write a few more events to test incremental reloading
for i in xrange(30, 40):
summ_id = FakeScalarSummary('id', i)
summ_sq = FakeScalarSummary('sq', i * i)
writer.add_summary(summ_id, i * 5)
writer.add_summary(summ_sq, i * 5)
writer.flush()
# Verify we can now see all of the data
acc.Reload()
id_events = acc.Scalars('id')
sq_events = acc.Scalars('sq')
self.assertEqual(40, len(id_events))
self.assertEqual(40, len(sq_events))
for i in xrange(40):
self.assertEqual(i * 5, id_events[i].step)
self.assertEqual(i * 5, sq_events[i].step)
self.assertEqual(i, id_events[i].value)
self.assertEqual(i * i, sq_events[i].value)
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
def testGraphFromMetaGraphBecomesAvailable(self):
"""Test accumulator by writing values and then reading them."""
directory = os.path.join(self.get_temp_dir(), 'metagraph_test_values_dir')
if gfile.IsDirectory(directory):
gfile.DeleteRecursively(directory)
gfile.MkDir(directory)
writer = tf.train.SummaryWriter(directory, max_queue=100)
with tf.Graph().as_default() as graph:
_ = tf.constant([2.0, 1.0])
# Add a graph to the summary writer.
meta_graph_def = saver.export_meta_graph(
graph_def=graph.as_graph_def(add_shapes=True))
writer.add_meta_graph(meta_graph_def)
writer.flush()
# Verify that we can load those events properly
acc = ea.EventAccumulator(directory)
acc.Reload()
self.assertTagsEqual(
acc.Tags(),
{
ea.IMAGES: [],
ea.AUDIO: [],
ea.SCALARS: [],
ea.HISTOGRAMS: [],
ea.COMPRESSED_HISTOGRAMS: [],
ea.GRAPH: True,
ea.META_GRAPH: True,
ea.RUN_METADATA: []
})
self.assertProtoEquals(graph.as_graph_def(add_shapes=True), acc.Graph())
self.assertProtoEquals(meta_graph_def, acc.MetaGraph())
if __name__ == '__main__':
tf.test.main()
| |
# -*- coding: utf-8 -*-
"""
simplegcm.gcm.
This module implements the Google Cloud Service API.
:copyright: (c) 2015 by Martin Alderete.
:license: BSD License, see LICENSE for more details.
"""
import json
import requests
__all__ = ('GCMException', 'Message', 'Notification',
'Result', 'Options', 'Sender')
class GCMException(Exception):
"""Exception related to GCM service."""
pass
class InnerDictSerializeMixin:
"""Mixin which add the data property."""
@property
def data(self):
"""Return the object data.
:rtype: dict
"""
d = {k: v for (k, v) in self.__dict__.items() if v}
return d
class Notification(InnerDictSerializeMixin, object):
"""Notificication.
:param title: Notification title.
:type title: str
:param body: Notification body.
:type body: str
:param icon: Notification icon.
:type icon: str
:param sound: Sound to be played.
:type sound: str
:param badge: Badge to be used.
:type badge: str
:param tag: Indicates whether each notification results in a new entry or not.
:type tag: bool
:param color: Color of the icon.
:type color: str
:param click_action: Action binded to notification click.
:type click_action: str
:param body_loc_key: Corresponds to "loc-key" in APNS payload.
:type body_loc_key: str
:param body_loc_args: Arguments. Corresponds to "loc-args" in APNS payload.
:type body_loc_args: list
:param title_loc_key: Corresponds to "title-loc-key" in APNS payload.
:type title_loc_key: str
:param title_loc_args: Arguments. Corresponds to "title-loc-args" in APNS payload.
:type title_loc_args: list
.. note:: All of the above parameters are platform dependent.
See https://developers.google.com/cloud-messaging/server-ref#table1b
"""
ANDROID = 'android'
IOS = 'ios'
_MANDATORY_FIELS_BY_PLATFORM = {
'android': ('title', 'icon'),
'ios': ()
}
def __init__(self, title=None, body=None, icon=None, sound=None,
badge=None, tag=None, color=None, click_action=None,
body_loc_key=None, body_loc_args=None,
title_loc_key=None, title_loc_args=None):
self.title = title
self.body = body
self.icon = icon
self.sound = sound
self.badge = badge
self.tag = tag
self.color = color
self.click_action = click_action
self.body_loc_key = body_loc_key
self.body_loc_args = body_loc_args
self.title_loc_key = title_loc_key
self.title_loc_args = title_loc_args
class Options(InnerDictSerializeMixin, object):
"""Options.
:param collapse_key: Identifies a group of messages.
:type collapse_key: str
:param priority: Messages priority.
:type priority: int
:param content_available: Flag to wakes up an inactivce device (iOS).
:type content_available: bool
:param delay_while_idle: Flag to sends when the device becomes available.
:type delay_while_idle: bool
:param time_to_live: How long the message should be store in GCM.
:type time_to_live: int
:param delivery_receipt_requested: Flag to confirm a delivered message.
:type delivery_receipt_requested: bool
:param restricted_package_name: Specifies the package name of the application.
:type restricted_package_name: str
:param dry_run: Flag to sets testing mode.
:type dry_run: bool
.. note:: All of the above parameters are optionals.
See https://developers.google.com/cloud-messaging/server-ref#table1
"""
def __init__(self, collapse_key=None,
priority=None, content_available=None,
delay_while_idle=None, time_to_live=None,
delivery_receipt_requested=None, dry_run=None,
restricted_package_name=None):
self.collapse_key = collapse_key
self.priority = priority
self.content_available = content_available
self.delay_while_idle = delay_while_idle
self.time_to_live = time_to_live
self.delivery_receipt_requested = delivery_receipt_requested
self.dry_run = dry_run
self.restricted_package_name = restricted_package_name
class Result(object):
"""Response from GCM.
:param canonicals: Map with old token and new token
:type canonicals: dict
:param multicast_id: Unique ID (number) identifying the multicast message.
:type multicast_id: int
:param success: Map registration_id with message_id successfully sent.
:type success: dict
:param failure: Map registration_id with error message.
:type failure: dict
:param unregistered: List with registration_ids not registered.
:type unregistered: list
:param unavailables: List with registration_ids to re-send.
:type unavailables: list
:param backoff: Estimated time to wait before retry.
:type backoff: int
:param message: Related message.
:type message: :class:`~simplegcm.gcm.Message`
:param raw_result: JSON returned by GCM server.
:type raw_result: dict
"""
def __init__(self, canonicals=None, multicast_id=None,
success=None, failure=None, unregistered=None,
unavailables=None, backoff=None, message=None,
raw_result=None):
self.canonicals = canonicals
self.multicast_id = multicast_id
self.success = success
self.failure = failure
self.unregistered = unregistered
self.unavailables = unavailables
self.message = message
self.backoff = backoff
self._raw_result = raw_result
def get_retry_message(self):
"""Return a new Message.
:return: A new message to retry or None.
:rtype: :class:`~simplegcm.gcm.Message` or None
"""
if self.unavailables:
klass = self.message.__class__
return klass.build_retry_message(self.message, self.unavailables)
return None
class Message(object):
"""GCM Message to send.
:param to: A registation token or a topic (multicast)
:type to: str
:param registration_ids: Registration device tokens
:type registration_ids: list
:param data: Custom data to send
:type data: dict
:param notification: Notification to send
:type notification: :class:`~simplegcm.gcm.Notification`
:param options: Options for the message
:type options: :class:`~simplegcm.gcm.Options`
.. note:: Messages MUST contain 'to' or 'registration_ids' at least
"""
notification_class = Notification
options_class = Options
def __init__(self, to=None, registration_ids=None,
data=None, notification=None, options=None):
if not any((to, registration_ids)):
raise ValueError('You must provide "registration_ids" or "to"')
if all((to, registration_ids)):
raise ValueError('You must provide "registration_ids" or "to" no both')
self._to = to
self._registration_ids = registration_ids
self._data = None
self._notif = None
self._opt = None
if data is not None:
self._data = data
if notification is not None:
self._notif = self.notification_class(**notification)
if options is not None:
self._opt = self.options_class(**options)
@property
def body(self):
"""Return the payload which repesents the message.
:rtype: dict
"""
payload = {}
# Set the receptor
if self._to:
payload['to'] = self._to
if self._registration_ids:
payload['registration_ids'] = self._registration_ids
# Notification
if self._notif:
payload['notification'] = self._notif.data
# Options
if self._opt:
payload.update(self._opt.data)
# Custom data
if self._data:
payload['data'] = self._data
return payload
@classmethod
def build_retry_message(cls, message, registration_ids):
"""Return a new Message using the given message as base.
:return: A new message.
:rtype: :class:`~simplegcm.gcm.Message`
"""
data = {
'registration_ids': registration_ids,
'data': message._data,
'notification': message._notif.data if message._notif else None,
'options': message._opt.data if message._opt else None
}
retry_msg = cls(**data)
return retry_msg
class Sender(object):
"""GCM Sender.
Example:
>>> import simplegcm
>>> sender = simplegcm.Sender(api_key='your_api_key')
>>> r_ids = ['ABC', 'HJK']
>>> data = {'score': 5.1}
>>> opt = {'dry_run': True}
>>> message = simplegcm.Message(registration_ids=r_ids,
data=data, options=opt)
>>> ret = sender.send(message)
>>> retry_msg = ret.get_retry_message()
>>> if retry_msg:
>>> print('Retry')
>>> ret = g.send(retry_msg)
>>> else:
>>> print('All sent!')
:param api_key: Service's API key
:type api_key: str
:param url: Service's URL
:type url: str
"""
GCM_URL = 'https://gcm-http.googleapis.com/gcm/send'
result_class = Result
def __init__(self, api_key=None, url=None):
self.api_key = api_key
self.url = self.GCM_URL
if url:
self.url = url
def _build_headers(self):
headers = {
'Content-type': 'application/json',
'Authorization': 'key=%s' % self.api_key,
}
return headers
def _parse_response(self, message, response):
r_status = response.status_code
if r_status == requests.codes.BAD:
# bad request more info in content
raise GCMException(response.content)
if r_status == requests.codes.UNAUTHORIZED:
# Invalid API key
raise GCMException('Unauthorized API_KEY')
retry_after = response.headers.get('Retry-After')
# 5xx family!
if (r_status >= 500 and r_status <= 599):
# this dict will force a retry
# set all the registration_ids as 'UNAVAILABLES'
data = {
'raw_result': None,
'message': message,
'canonicals': None,
'multicast_id': None,
'success': {},
'failure': {},
'unregistered': [],
'unavailables': message._registration_ids,
'backoff': retry_after
}
elif r_status == requests.codes.OK:
r_ids = message._registration_ids
resp_data = response.json()
success = {}
failure = {}
canonicals = {}
unregistered = []
unavailables = []
for reg_id, resp in zip(r_ids, resp_data['results']):
if 'message_id' in resp:
success[reg_id] = resp['message_id']
if 'registration_id' in resp:
# new token for reg_id
canonicals[reg_id] = resp['registration_id']
else:
error = resp['error']
if error in ('Unavailable', 'InternalServerError'):
unavailables.append(reg_id)
elif error == 'NotRegistered':
unregistered.append(reg_id)
else:
failure[reg_id] = error
data = {
# HTTP response
'raw_result': resp_data,
'message': message,
# GCM fields
'canonicals': canonicals,
'multicast_id': resp_data['multicast_id'],
'success': success,
'failure': failure,
'unregistered': unregistered,
'unavailables': unavailables,
'backoff': retry_after
}
return data
def _make_request(self, message):
payload = self._build_payload(message)
headers = self._build_headers()
data = json.dumps(payload)
response = requests.post(self.url, data, headers=headers)
result_data = self._parse_response(message, response)
gcm_result = self.result_class(**result_data)
return gcm_result
def _build_payload(self, message):
payload = message.body
return payload
def send(self, message):
"""Send a message.
:param message: A :class:`~simplegcm.gcm.Message`
:return: Result object
:rtype: :class:`~simplegcm.gcm.Result`
:raises GCMException: If there was an error.
"""
if self.api_key is None:
raise ValueError('The API KEY has not been set yet!')
return self._make_request(message)
| |
# -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate
# operation of an SR Beamline.
# The standard use of this script is from command line, with some optional arguments,
# e.g. for calculation (with default parameter values) of:
# UR Spectrum Through a Slit (Flux within a default aperture):
# python SRWLIB_VirtBL_*.py --sm
# Single-Electron UR Spectrum (Flux per Unit Surface):
# python SRWLIB_VirtBL_*.py --ss
# UR Power Density (at the first optical element):
# python SRWLIB_VirtBL_*.py --pw
# Input Single-Electron UR Intensity Distribution (at the first optical element):
# python SRWLIB_VirtBL_*.py --si
# Single-Electron Wavefront Propagation:
# python SRWLIB_VirtBL_*.py --ws
# Multi-Electron Wavefront Propagation:
# Sequential Mode:
# python SRWLIB_VirtBL_*.py --wm
# Parallel Mode (using MPI / mpi4py), e.g.:
# mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm
# For changing parameters of all these calculaitons from the default valuse, see the definition
# of all options in the list at the end of the script.
# v 0.07
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
from srwl_bl import *
try:
import cPickle as pickle
except:
import pickle
#import time
#*********************************Setting Up Optical Elements and Propagation Parameters
def set_optics(_v):
"""This function describes optical layout of the Coherent Hoard X-ray (CHX) beamline of NSLS-II.
Such function has to be written for every beamline to be simulated; it is specific to a particular beamline.
:param _v: structure containing all parameters allowed to be varied for that particular beamline
"""
#---Nominal Positions of Optical Elements [m] (with respect to straight section center)
zS0 = 20.5 #S0 (primary slit)
zHDM = 27.4 #Horizontally-Deflecting Mirror (HDM)
zS1 = 29.9 #S1 slit
zDCM = 31.6 #DCM (vertically-deflecting)
zS2 = 34.3 #S2 slit
zBPM = 34.6 #BPM for beam visualization
zCRL = 35.4 #+tzCRL*1e-3 #CRL transfocator (corrected by translation)
zKL = 45.0 #44.5 #+tzKL*1e-3 #Kinoform Lens for horizontal focusing (corrected by translation)
zS3 = 48.0 #S3 slit ('pinhole', waist position)
zSample = 48.7 #Sample position, COR of diffractometer
zD = 58.7 #Detector position
#---Instantiation of the Optical Elements
arElNamesAllOpt = [
['S0', 'S0_S1', 'S1', 'S1_S2', 'S2', 'S2_BPM', 'BPM_CRL', 'CRL1', 'CRL2', 'CRL_KL', 'KLA', 'KL', 'KL_S3', 'S3', 'S3_SMP', 'SMP', 'SMP_D'], #1
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_SMP'], #2
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_KL', 'KLA', 'KL', 'KL_S3', 'S3', 'SMP', 'SMP_D'], #3
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'CRL1', 'CRL2', 'CRL_SMP'], #4
['S0', 'S0_HDM', 'HDM', 'HDM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_S2', 'S2', 'S2_CRL', 'FIB', 'CRL_SMP'], #5
]
arElNamesAll = arElNamesAllOpt[int(round(_v.op_BL - 1))]
if(len(_v.op_fin) > 0):
if(_v.op_fin not in arElNamesAll): raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline')
#Could be made more general
arElNames = [];
for i in range(len(arElNamesAll)):
arElNames.append(arElNamesAll[i])
if(len(_v.op_fin) > 0):
if(arElNamesAll[i] == _v.op_fin): break
el = []; pp = [] #lists of SRW optical element objects and their corresponding propagation parameters
#S0 (primary slit)
if('S0' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S0_dx, _v.op_S0_dy, _v.op_S0_x, _v.op_S0_y)); pp.append(_v.op_S0_pp)
#Drift S0 -> HDM
if('S0_HDM' in arElNames):
el.append(SRWLOptD(zHDM - zS0)); pp.append(_v.op_S0_HDM_pp)
#Drift S0 -> S1
if('S0_S1' in arElNames):
el.append(SRWLOptD(zS1 - zS0)); pp.append(_v.op_S0_S1_pp)
#HDM (Height Profile Error)
if('HDM' in arElNames):
horApHDM = 0.94e-03 #Projected dimensions
verApHDM = 1.e-03
angHDM = 3.1415926e-03 #? grazing angle
ifnHDM = os.path.join(_v.fdir, _v.op_HDM_ifn) if len(_v.op_HDM_ifn) > 0 else ''
if(len(ifnHDM) > 0):
hProfDataHDM = srwl_uti_read_data_cols(ifnHDM, '\t', 0, 1)
opHDM = srwl_opt_setup_surf_height_1d(hProfDataHDM, 'x', _ang=angHDM, _amp_coef=_v.op_HDM_amp, _nx=1000, _ny=200, _size_x=horApHDM, _size_y=verApHDM, _xc=_v.op_HDM_x, _yc=_v.op_HDM_y)
ofnHDM = os.path.join(_v.fdir, _v.op_HDM_ofn) if len(_v.op_HDM_ofn) > 0 else ''
if(len(ofnHDM) > 0):
pathDifHDM = opHDM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifHDM, opHDM.mesh, ofnHDM, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opHDM); pp.append(_v.op_HDM_pp)
#Drift HDM -> S1
if('HDM_S1' in arElNames):
el.append(SRWLOptD(zS1 - zHDM + _v.op_S1_dz)); pp.append(_v.op_HDM_S1_pp)
#S1 slit
if('S1' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S1_dx, _v.op_S1_dy, _v.op_S1_x, _v.op_S1_y)); pp.append(_v.op_S1_pp)
#Drift S1 -> DCM
if('S1_DCM' in arElNames):
el.append(SRWLOptD(zDCM - zS1)); pp.append(_v.op_S1_DCM_pp)
#Double-Crystal Monochromator
tCr1 = [0, 0, -1] #required for surface error
if('DCM' in arElNames):
tc = 1e-02 # [m] crystal thickness
angAs = 0.*3.1415926/180. # [rad] asymmetry angle
hc = [1,1,1]
dc = srwl_uti_cryst_pl_sp(hc, 'Si')
#print('DCM Interplannar dist.:', dc)
psi = srwl_uti_cryst_pol_f(_v.op_DCM_e, hc, 'Si')
#print('DCM Fourier Components:', psi)
#---------------------- DCM Crystal #1
opCr1 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs)
#Find appropriate orientation of the Crystal #1 and the Output Beam Frame (using a member-function in SRWLOptCryst):
#orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=1.5707963) # Horizontally-deflecting (from HXN)
orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e) # Vertically-deflecting
#Crystal #1 Orientation found:
orientCr1 = orientDataCr1[0]
tCr1 = orientCr1[0] #Tangential Vector to Crystal surface
sCr1 = orientCr1[1] #Sagital Vector to Crystal surface
nCr1 = orientCr1[2] #Normal Vector to Crystal surface
print('DCM Crystal #1 Orientation (original):')
print(' t =', tCr1, 's =', orientCr1[1], 'n =', nCr1)
if(_v.op_DCM_ac1 != 0): #Small rotation of DCM Crystal #1:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac1, [0,0,0])
tCr1 = uti_math.matr_prod(rot[0], tCr1)
sCr1 = uti_math.matr_prod(rot[0], sCr1)
nCr1 = uti_math.matr_prod(rot[0], nCr1)
#Set the Crystal #1 orientation:
opCr1.set_orient(nCr1[0], nCr1[1], nCr1[2], tCr1[0], tCr1[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr1OutFr = orientDataCr1[1]
rxCr1 = orientCr1OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr1 = orientCr1OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr1 = orientCr1OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
print('DCM Crystal #1 Outgoing Beam Frame:')
print(' ex =', rxCr1, 'ey =', ryCr1, 'ez =', rzCr1)
#Incoming/Outgoing beam frame transformation matrix for the DCM Crystal #1
TCr1 = [rxCr1, ryCr1, rzCr1]
print('Total transformation matrix after DCM Crystal #1:')
uti_math.matr_print(TCr1)
#print(' ')
el.append(opCr1); pp.append(_v.op_DCMC1_pp)
#---------------------- DCM Crystal #2
opCr2 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=3.1415926)
#Find appropriate orientation of the Crystal #2 and the Output Beam Frame
#orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=-1.5707963) #from HXN
orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e, _ang_dif_pl=3.1415926) #Vertically-deflecting
#Crystal #2 Orientation found:
orientCr2 = orientDataCr2[0]
tCr2 = orientCr2[0] #Tangential Vector to Crystal surface
sCr2 = orientCr2[1]
nCr2 = orientCr2[2] #Normal Vector to Crystal surface
print('Crystal #2 Orientation (original):')
print(' t =', tCr2, 's =', sCr2, 'n =', nCr2)
if(_v.op_DCM_ac2 != 0): #Small rotation of DCM Crystal #2:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac2, [0,0,0])
tCr2 = uti_math.matr_prod(rot[0], tCr2)
sCr2 = uti_math.matr_prod(rot[0], sCr2)
nCr2 = uti_math.matr_prod(rot[0], nCr2)
#Set the Crystal #2 orientation
opCr2.set_orient(nCr2[0], nCr2[1], nCr2[2], tCr2[0], tCr2[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr2OutFr = orientDataCr2[1]
rxCr2 = orientCr2OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr2 = orientCr2OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr2 = orientCr2OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
print('DCM Crystal #2 Outgoing Beam Frame:')
print(' ex =', rxCr2, 'ey =', ryCr2, 'ez =',rzCr2)
#Incoming/Outgoing beam transformation matrix for the DCM Crystal #2
TCr2 = [rxCr2, ryCr2, rzCr2]
Ttot = uti_math.matr_prod(TCr2, TCr1)
print('Total transformation matrix after DCM Crystal #2:')
uti_math.matr_print(Ttot)
#print(' ')
el.append(opCr2); pp.append(_v.op_DCMC2_pp)
#DCM Surface Error
horApDCM = 2.e-03 #Projected dimensions
verApDCM = 2.e-03
angDCM = asin(abs(tCr1[2])) #Grazing angle to crystal surface
ifnDCME = os.path.join(_v.fdir, _v.op_DCME_ifn) if len(_v.op_DCME_ifn) > 0 else ''
if(len(ifnDCME) > 0):
hProfDataDCME = srwl_uti_read_data_cols(ifnDCME, '\t', 0, 1)
opDCME = srwl_opt_setup_surf_height_1d(hProfDataDCME, 'y', _ang=angDCM, _amp_coef=_v.op_DCME_amp, _nx=1000, _ny=200, _size_x=horApDCM, _size_y=verApDCM, _xc=_v.op_DCME_x, _yc=_v.op_DCME_y)
ofnDCME = os.path.join(_v.fdir, _v.op_DCME_ofn) if len(_v.op_DCME_ofn) > 0 else ''
if(len(ofnDCME) > 0):
pathDifDCME = opDCME.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifDCME, opDCME.mesh, ofnDCME, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opDCME); pp.append(_v.op_DCME_pp)
#Drift DCM -> S2
if('DCM_S2' in arElNames):
el.append(SRWLOptD(zS2 - zDCM + _v.op_S2_dz)); pp.append(_v.op_DCM_S2_pp)
#Boron Fiber (with Tungsten core)
if('FIB' in arElNames):
fpln = 3 #focusing in both planes
if((_v.op_FIB_fpl == 'h') or (_v.op_FIB_fpl == 'H') or (_v.op_FIB_fpl == 'x') or (_v.op_FIB_fpl == 'X')): fpln = 1
elif((_v.op_FIB_fpl == 'v') or (_v.op_FIB_fpl == 'V') or (_v.op_FIB_fpl == 'y') or (_v.op_FIB_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_cyl_fiber(fpln, _v.op_FIB_delta_e, _v.op_FIB_delta_c, _v.op_FIB_atnl_e, _v.op_FIB_atnl_c, _v.op_FIB_d_e, _v.op_FIB_d_c, _v.op_FIB_x, _v.op_FIB_y))
pp.append(_v.op_FIB_pp)
#Drift S1 -> S2
if('S1_S2' in arElNames):
el.append(SRWLOptD(zS2 - zS1 + _v.op_S2_dz)); pp.append(_v.op_S1_S2_pp)
#S2 slit
if('S2' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S2_dx, _v.op_S2_dy, _v.op_S2_x, _v.op_S2_y)); pp.append(_v.op_S2_pp)
#Drift S2 -> BPM
if('S2_BPM' in arElNames):
el.append(SRWLOptD(zBPM - zS2 + _v.op_BPM_dz)); pp.append(_v.op_S2_BPM_pp)
#Drift BPM -> CRL
if('BPM_CRL' in arElNames):
el.append(SRWLOptD(zCRL - zBPM + _v.op_CRL_dz)); pp.append(_v.op_BPM_CRL_pp)
#Drift S2 -> CRL
if('S2_CRL' in arElNames):
el.append(SRWLOptD(zCRL - zS2 - _v.op_S2_dz + _v.op_CRL_dz)); pp.append(_v.op_S2_CRL_pp)
#CRL1 (1D, vertically-focusing)
if('CRL1' in arElNames):
if((_v.op_CRL1_n > 0) and (_v.op_CRL1_fpl != '')):
fpln = 3 #focusing in both planes
if((_v.op_CRL1_fpl == 'h') or (_v.op_CRL1_fpl == 'H') or (_v.op_CRL1_fpl == 'x') or (_v.op_CRL1_fpl == 'X')): fpln = 1
elif((_v.op_CRL1_fpl == 'v') or (_v.op_CRL1_fpl == 'V') or (_v.op_CRL1_fpl == 'y') or (_v.op_CRL1_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_CRL(fpln, _v.op_CRL1_delta, _v.op_CRL1_atnl, 1, _v.op_CRL1_apnf, _v.op_CRL1_apf, _v.op_CRL1_rmin, _v.op_CRL1_n, _v.op_CRL1_thck, _v.op_CRL1_x, _v.op_CRL1_y))
pp.append(_v.op_CRL1_pp)
#CRL2 (1D, vertically-focusing)
if('CRL2' in arElNames):
if((_v.op_CRL2_n > 0) and (_v.op_CRL2_fpl != '')):
fpln = 3 #focusing in both planes
if((_v.op_CRL2_fpl == 'h') or (_v.op_CRL2_fpl == 'H') or (_v.op_CRL2_fpl == 'x') or (_v.op_CRL2_fpl == 'X')): fpln = 1
elif((_v.op_CRL2_fpl == 'v') or (_v.op_CRL2_fpl == 'V') or (_v.op_CRL2_fpl == 'y') or (_v.op_CRL2_fpl == 'Y')): fpln = 2
el.append(srwl_opt_setup_CRL(fpln, _v.op_CRL2_delta, _v.op_CRL2_atnl, 1, _v.op_CRL2_apnf, _v.op_CRL2_apf, _v.op_CRL2_rmin, _v.op_CRL2_n, _v.op_CRL2_thck, _v.op_CRL2_x, _v.op_CRL2_y))
pp.append(_v.op_CRL2_pp)
#Drift CRL -> KL
if('CRL_KL' in arElNames):
el.append(SRWLOptD(zKL - zCRL - _v.op_CRL_dz + _v.op_KL_dz)); pp.append(_v.op_CRL_KL_pp)
#Drift CRL -> Sample
if('CRL_SMP' in arElNames):
el.append(SRWLOptD(zSample - zCRL - _v.op_CRL_dz + _v.op_SMP_dz)); pp.append(_v.op_CRL_SMP_pp)
#KL Aperture
if('KLA' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_KLA_dx, _v.op_KLA_dy, _v.op_KL_x, _v.op_KL_y)); pp.append(_v.op_KLA_pp)
#KL (1D, horizontally-focusing)
if('KL' in arElNames):
el.append(SRWLOptL(_v.op_KL_fx, _v.op_KL_fy, _v.op_KL_x, _v.op_KL_y)) #KL as Ideal Lens; to make it a transmission element with a profile read from a file
pp.append(_v.op_KL_pp)
#Drift KL -> S3
if('KL_S3' in arElNames):
el.append(SRWLOptD(zS3 - zKL + _v.op_S3_dz)); pp.append(_v.op_KL_S3_pp)
#S3 slit
if('S3' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S3_dx, _v.op_S3_dy, _v.op_S3_x, _v.op_S3_y)); pp.append(_v.op_S3_pp)
#Drift S3 -> Sample
if('S3_SMP' in arElNames):
el.append(SRWLOptD(zSample - zS3 + _v.op_SMP_dz)); pp.append(_v.op_S3_SMP_pp)
#Sample
if('SMP' in arElNames):
ifnSMP = os.path.join(_v.fdir, _v.op_SMP_ifn) if len(_v.op_SMP_ifn) > 0 else ''
if(len(ifnSMP) > 0):
ifSMP = open(ifnSMP, 'rb')
opSMP = pickle.load(ifSMP)
#Implementing transverse shift of sample ??
xSt = opSMP.mesh.xStart
xFi = opSMP.mesh.xFin
halfRangeX = 0.5*(xFi - xSt)
opSMP.mesh.xStart = -halfRangeX + _v.op_SMP_x
opSMP.mesh.xFin = halfRangeX + _v.op_SMP_x
ySt = opSMP.mesh.yStart
yFi = opSMP.mesh.yFin
halfRangeY = 0.5*(yFi - ySt)
opSMP.mesh.yStart = -halfRangeY + _v.op_SMP_y
opSMP.mesh.yFin = halfRangeY + _v.op_SMP_y
ofnSMP = os.path.join(_v.fdir, _v.op_SMP_ofn) if len(_v.op_SMP_ofn) > 0 else ''
if(len(ofnSMP) > 0):
pathDifSMP = opSMP.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifSMP, opSMP.mesh, ofnSMP, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opSMP); pp.append(_v.op_SMP_pp)
ifSMP.close()
#Drift Sample -> Detector
if('SMP_D' in arElNames):
el.append(SRWLOptD(zD - zSample + _v.op_D_dz)); pp.append(_v.op_SMP_D_pp)
pp.append(_v.op_fin_pp)
return SRWLOptC(el, pp)
#*********************************List of Parameters allowed to be varied
#---List of supported options / commands / parameters allowed to be varied for this Beamline (comment-out unnecessary):
varParam = [
#---Data Folder
['fdir', 's', os.path.join(os.getcwd(), 'data_CHX'), 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', 'NSLS-II Low Beta ', 'standard electron beam name'],
['ebm_nms', 's', 'Day1', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
['ebm_de', 'f', 0., 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0., 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0., 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0., 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0., 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', -1.7, 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', -1, 'electron beam relative energy spread'],
['ebm_emx', 'f', -1, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', -1, 'electron beam vertical emittance [m]'],
#---Undulator
['und_per', 'f', 0.02, 'undulator period [m]'],
['und_len', 'f', 3., 'undulator length [m]'],
['und_b', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
#['und_bx', 'f', 0., 'undulator horizontal peak magnetic field [T]'],
#['und_by', 'f', 1., 'undulator vertical peak magnetic field [T]'],
#['und_phx', 'f', 1.5708, 'undulator horizontal magnetic field phase [rad]'],
#['und_phy', 'f', 0., 'undulator vertical magnetic field phase [rad]'],
['und_sx', 'i', 1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_zc', 'f', 0., 'undulator center longitudinal position [m]'],
['und_mdir', 's', 'magn_meas', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', 'ivu20_chx_sum.txt', 'name of magnetic measurements for different gaps summary file'],
#['und_g', 'f', 0., 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
#NOTE: the above option/variable names (fdir, ebm*, und*, ss*, sm*, pw*, is*, ws*, wm*) should be the same in all beamline scripts
#on the other hand, the beamline optics related options below (op*) are specific to a particular beamline (and can be differ from beamline to beamline).
#However, the default values of all the options/variables (above and below) can differ from beamline to beamline.
#---Beamline Optics
['op_r', 'f', 20.5, 'longitudinal position of the first optical element [m]'],
['op_fin', 's', 'FIB', 'name of the final optical element wavefront has to be propagated through'],
['op_BL', 'f', 5, 'beamline version/option number'],
['op_S0_dx', 'f', 0.2e-03, 'slit S0: horizontal size [m]'],
['op_S0_dy', 'f', 1.0e-03, 'slit S0: vertical size [m]'],
['op_S0_x', 'f', 0., 'slit S0: horizontal center position [m]'],
['op_S0_y', 'f', 0., 'slit S0: vertical center position [m]'],
['op_HDM_ifn', 's', 'CHX_HDM_height_prof_1d.dat', 'mirror HDM: input file name of height profile data'],
['op_HDM_amp', 'f', 1., 'mirror HDM: amplification coefficient for height profile data'],
['op_HDM_ofn', 's', 'res_CHX_HDM_opt_path_dif.dat', 'mirror HDM: output file name of optical path difference data'],
['op_HDM_x', 'f', 0., 'mirror HDM surface error: horizontal center position [m]'],
['op_HDM_y', 'f', 0., 'mirror HDM surface error: vertical center position [m]'],
['op_S1_dz', 'f', 0., 'S1: offset of longitudinal position [m]'],
['op_S1_dx', 'f', 0.2e-03, 'slit S1: horizontal size [m]'],
['op_S1_dy', 'f', 1.0e-03, 'slit S1: vertical size [m]'],
['op_S1_x', 'f', 0., 'slit S1: horizontal center position [m]'],
['op_S1_y', 'f', 0., 'slit S1: vertical center position [m]'],
['op_DCM_e', 'f', 9000., 'DCM: central photon energy DCM is tuned to [eV]'],
['op_DCM_ac1', 'f', 0., 'DCM: angular deviation of 1st crystal from exact Bragg angle [rad]'],
['op_DCM_ac2', 'f', 0., 'DCM: angular deviation of 2nd crystal from exact Bragg angle [rad]'],
['op_DCME_ifn', 's', 'CHX_DCM_height_prof_1d.dat', 'DCM surface error: input file name of height profile data'],
['op_DCME_amp', 'f', 1., 'DCM surface error: amplification coefficient'],
['op_DCME_ofn', 's', 'res_CHX_DCM_opt_path_dif.dat', 'DCM surface error: output file name of optical path difference data'],
['op_DCME_x', 'f', 0., 'DCM surface error: horizontal center position [m]'],
['op_DCME_y', 'f', 0., 'DCM surface error: vertical center position [m]'],
['op_FIB_fpl', 's', '', 'FIB: focusing plane ("h" or "v" or "hv" or "")'],
['op_FIB_delta_e', 'f', 4.20756805e-06, 'Fiber: refractive index decrement of main (exterior) material'],
['op_FIB_delta_c', 'f', 4.20756805e-06, 'Fiber: refractive index decrement of core material'],
['op_FIB_atnl_e', 'f', 7312.94e-06, 'Fiber: attenuation length of main (exterior) material [m]'],
['op_FIB_atnl_c', 'f', 7312.94e-06, 'Fiber: attenuation length of core material [m]'],
['op_FIB_d_e', 'f', 100.e-06, 'Fiber: ext. diameter [m]'],
['op_FIB_d_c', 'f', 10.e-06, 'Fiber: core diameter [m]'],
['op_FIB_x', 'f', 0., 'Fiber: horizontal center position [m]'],
['op_FIB_y', 'f', 0., 'Fiber: vertical center position [m]'],
['op_S2_dz', 'f', 0., 'S2: offset of longitudinal position [m]'],
['op_S2_dx', 'f', 0.05e-03, 'slit S2: horizontal size [m]'],
['op_S2_dy', 'f', 0.2e-03, 'slit S2: vertical size [m]'], #1.0e-03, 'slit S2: vertical size [m]'],
['op_S2_x', 'f', 0., 'slit S2: horizontal center position [m]'],
['op_S2_y', 'f', 0., 'slit S2: vertical center position [m]'],
['op_BPM_dz', 'f', 0., 'BPM: offset of longitudinal position [m]'],
['op_CRL_dz', 'f', 0., 'CRL: offset of longitudinal position [m]'],
['op_CRL1_fpl', 's', 'v', 'CRL1: focusing plane ("h" or "v" or "hv" or "")'],
['op_CRL1_delta', 'f', 4.20756805e-06, 'CRL1: refractive index decrements of material'],
['op_CRL1_atnl', 'f', 7312.94e-06, 'CRL1: attenuation length of material [m]'],
['op_CRL1_apnf', 'f', 1.e-03, 'CRL1: geometrical aparture of 1D CRL in the plane where there is no focusing'],
['op_CRL1_apf', 'f', 2.4e-03, 'CRL1: geometrical aparture of 1D CRL in the focusing plane'],
['op_CRL1_rmin', 'f', 1.5e-03, 'CRL1: radius of curface curvature at the tip of parabola [m]'],
['op_CRL1_n', 'i', 1, 'CRL1: number of individual lenses'],
['op_CRL1_thck', 'f', 80.e-06, 'CRL1: wall thickness (at the tip of parabola) [m]'],
['op_CRL1_x', 'f', 0., 'CRL1: horizontal center position [m]'],
['op_CRL1_y', 'f', 0., 'CRL1: vertical center position [m]'],
['op_CRL2_fpl', 's', 'v', 'CRL2: focusing plane ("h" or "v" or "hv" or "")'],
['op_CRL2_delta', 'f', 4.20756805e-06, 'CRL2: refractive index decrements of material'],
['op_CRL2_atnl', 'f', 7312.94e-06, 'CRL2: attenuation length of material [m]'],
['op_CRL2_apnf', 'f', 1.e-03, 'CRL2: geometrical aparture of 1D CRL in the plane where there is no focusing'],
['op_CRL2_apf', 'f', 1.4e-03, 'CRL2: geometrical aparture of 1D CRL in the focusing plane'],
['op_CRL2_rmin', 'f', 0.5e-03, 'CRL2: radius of curface curvature at the tip of parabola [m]'],
['op_CRL2_n', 'i', 6, 'CRL2: number of individual lenses'],
['op_CRL2_thck', 'f', 80.e-06, 'CRL2: wall thickness (at the tip of parabola) [m]'],
['op_CRL2_x', 'f', 0., 'CRL2: horizontal center position [m]'],
['op_CRL2_y', 'f', 0., 'CRL2: vertical center position [m]'],
['op_KLA_dx', 'f', 1.0e-03, 'KL aperture: horizontal size [m]'], #1.4e-03, 'KL Aperture: horizontal size [m]'],
['op_KLA_dy', 'f', 0.1e-03, 'KL aperture: vertical size [m]'], #0.2e-03, 'KL Aperture: vertical size [m]'],
['op_KL_dz', 'f', 0., 'KL: offset of longitudinal position [m]'],
['op_KL_fx', 'f', 3.24479, 'KL: horizontal focal length [m]'],
['op_KL_fy', 'f', 1.e+23, 'KL: vertical focal length [m]'],
['op_KL_x', 'f', 0., 'KL: horizontal center position [m]'],
['op_KL_y', 'f', 0., 'KL: vertical center position [m]'],
['op_S3_dz', 'f', 0., 'S3: offset of longitudinal position [m]'],
['op_S3_dx', 'f', 10.e-06, 'slit S3: horizontal size [m]'],
['op_S3_dy', 'f', 10.e-06, 'slit S3: vertical size [m]'],
['op_S3_x', 'f', 0., 'slit S3: horizontal center position [m]'],
['op_S3_y', 'f', 0., 'slit S3: vertical center position [m]'],
['op_SMP_dz', 'f', 0., 'sample: offset of longitudinal position [m]'],
['op_SMP_ifn', 's', 'CHX_SMP_CDI_001.pickle', 'sample: model file name (binary "dumped" SRW transmission object)'],
['op_SMP_ofn', 's', 'res_CHX_SMP_opt_path_dif.dat', 'sample: output file name of optical path difference data'],
['op_SMP_x', 'f', 0., 'sample: horizontal center position [m]'],
['op_SMP_y', 'f', 0., 'sample: vertical center position [m]'],
['op_D_dz', 'f', 0., 'detector: offset of longitudinal position [m]'],
#to add options for different beamline cases, etc.
#Propagation Param.: [0][1][2][3][4] [5] [6] [7] [8] [9][10][11]
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 4.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_HDM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> HDM: propagation parameters'],
['op_S0_S1_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> S1: propagation parameters'],
['op_HDM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HDM: propagation parameters'],
['op_HDM_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift HDM -> S1: propagation parameters'],
['op_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S1: propagation parameters'],
['op_S1_DCM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> DCM: propagation parameters'],
['op_DCMC1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #1: propagation parameters'],
['op_DCMC2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #2: propagation parameters'],
['op_DCME_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM Crystal #1&2: surface height error'],
['op_FIB_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'fiber: propagation parameters'],
['op_DCM_S2_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift DCM -> S2: propagation parameters'],
['op_S1_S2_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> S2: propagation parameters'],
['op_S2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S2: propagation parameters'],
['op_S2_BPM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S2 -> BPM: propagation parameters'],
['op_S2_CRL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S2 -> BPM: propagation parameters'],
['op_BPM_CRL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift BPM -> CRL: propagation parameters'],
['op_CRL1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'CRL1: propagation parameters'],
['op_CRL2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'CRL2: propagation parameters'],
['op_CRL_KL_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift CRL -> KL: propagation parameters'],
['op_CRL_SMP_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift CRL -> sample: propagation parameters'],
['op_KLA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'KL aperture: propagation parameters'],
#['op_KL_pp', 'f', [0, 0, 1, 0, 0, 1.0, 5.0, 1.0, 7.0, 0, 0, 0], 'KL: propagation parameters'],
['op_KL_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'KL: propagation parameters'],
['op_KL_S3_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift KL -> S3: propagation parameters'],
#['op_S3_pp', 'f', [0, 0, 1, 0, 0, 0.3, 3.0, 0.3, 3.0, 0, 0, 0], 'slit S3: propagation parameters'],
['op_S3_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S3: propagation parameters'],
#['op_S3_SMP_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> Sample: propagation parameters'],
['op_S3_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> sample: propagation parameters'],
['op_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample: propagation parameters'],
['op_SMP_D_pp', 'f', [0, 0, 1, 3, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample -> detector: propagation parameters'],
#['op_fin_pp', 'f', [0, 0, 1, 0, 1, 0.1, 5.0, 1.0, 1.5, 0, 0, 0], 'final post-propagation (resize) parameters'],
['op_fin_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
varParam = srwl_uti_ext_options(varParam)
#*********************************Entry
if __name__ == "__main__":
#---Parse options, defining Beamline elements and running calculations
v = srwl_uti_parse_options(varParam)
#---Add some constant "parameters" (not allowed to be varied) for the beamline
#v.und_per = 0.02 #['und_per', 'f', 0.02, 'undulator period [m]'],
#v.und_len = 3. #['und_len', 'f', 3., 'undulator length [m]'],
#v.und_zc = 0. #['und_zc', 'f', 0., 'undulator center longitudinal position [m]'],
#v.und_sy = -1 #['und_sy', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
#---Setup optics only if Wavefront Propagation is required:
op = set_optics(v) if(v.ws or v.wm) else None
#---Run all requested calculations
SRWLBeamline('Coherent Hard X-ray beamline').calc_all(v, op)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from django.utils.datastructures import SortedDict
from openstack_dashboard.api import neutron
neutronclient = neutron.neutronclient
class Vip(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer vip."""
def __init__(self, apiresource):
super(Vip, self).__init__(apiresource)
class Pool(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool."""
def __init__(self, apiresource):
if 'provider' not in apiresource:
apiresource['provider'] = None
super(Pool, self).__init__(apiresource)
class Member(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer member."""
def __init__(self, apiresource):
super(Member, self).__init__(apiresource)
class PoolStats(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool stats."""
def __init__(self, apiresource):
super(PoolStats, self).__init__(apiresource)
class PoolMonitor(neutron.NeutronAPIDictWrapper):
"""Wrapper for neutron load balancer pool health monitor."""
def __init__(self, apiresource):
super(PoolMonitor, self).__init__(apiresource)
def vip_create(request, **kwargs):
"""Create a vip for a specified pool.
:param request: request context
:param address: virtual IP address
:param name: name for vip
:param description: description for vip
:param subnet_id: subnet_id for subnet of vip
:param protocol_port: transport layer port number for vip
:returns: Vip object
"""
body = {'vip': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol_port': kwargs['protocol_port'],
'protocol': kwargs['protocol'],
'pool_id': kwargs['pool_id'],
'session_persistence': kwargs['session_persistence'],
'admin_state_up': kwargs['admin_state_up']
}}
if kwargs.get('connection_limit'):
body['vip']['connection_limit'] = kwargs['connection_limit']
if kwargs.get('address'):
body['vip']['address'] = kwargs['address']
vip = neutronclient(request).create_vip(body).get('vip')
return Vip(vip)
def vip_list(request, **kwargs):
vips = neutronclient(request).list_vips(**kwargs).get('vips')
return [Vip(v) for v in vips]
def vip_get(request, vip_id):
vip = neutronclient(request).show_vip(vip_id).get('vip')
return Vip(vip)
def vip_update(request, vip_id, **kwargs):
vip = neutronclient(request).update_vip(vip_id, kwargs).get('vip')
return Vip(vip)
def vip_delete(request, vip_id):
neutronclient(request).delete_vip(vip_id)
def pool_create(request, **kwargs):
"""Create a pool for specified protocol
:param request: request context
:param name: name for pool
:param description: description for pool
:param subnet_id: subnet_id for subnet of pool
:param protocol: load balanced protocol
:param lb_method: load balancer method
:param admin_state_up: admin state (default on)
"""
body = {'pool': {'name': kwargs['name'],
'description': kwargs['description'],
'subnet_id': kwargs['subnet_id'],
'protocol': kwargs['protocol'],
'lb_method': kwargs['lb_method'],
'admin_state_up': kwargs['admin_state_up'],
'provider': kwargs['provider'],
}}
pool = neutronclient(request).create_pool(body).get('pool')
return Pool(pool)
def _get_vip_name(request, pool, vip_dict):
if pool['vip_id'] is not None:
try:
if vip_dict:
return vip_dict.get(pool['vip_id']).name
else:
return vip_get(request, pool['vip_id']).name
except Exception:
return pool['vip_id']
else:
return None
def pool_list(request, **kwargs):
return _pool_list(request, expand_subnet=True, expand_vip=True, **kwargs)
def _pool_list(request, expand_subnet=False, expand_vip=False, **kwargs):
pools = neutronclient(request).list_pools(**kwargs).get('pools')
if expand_subnet:
subnets = neutron.subnet_list(request)
subnet_dict = SortedDict((s.id, s) for s in subnets)
for p in pools:
p['subnet_name'] = subnet_dict.get(p['subnet_id']).cidr
if expand_vip:
vips = vip_list(request)
vip_dict = SortedDict((v.id, v) for v in vips)
for p in pools:
p['vip_name'] = _get_vip_name(request, p, vip_dict)
return [Pool(p) for p in pools]
def pool_get(request, pool_id):
return _pool_get(request, pool_id, expand_subnet=True, expand_vip=True)
def _pool_get(request, pool_id, expand_subnet=False, expand_vip=False):
pool = neutronclient(request).show_pool(pool_id).get('pool')
if expand_subnet:
pool['subnet_name'] = neutron.subnet_get(request,
pool['subnet_id']).cidr
if expand_vip:
pool['vip_name'] = _get_vip_name(request, pool, vip_dict=False)
return Pool(pool)
def pool_update(request, pool_id, **kwargs):
pool = neutronclient(request).update_pool(pool_id, kwargs).get('pool')
return Pool(pool)
def pool_delete(request, pool):
neutronclient(request).delete_pool(pool)
# not linked to UI yet
def pool_stats(request, pool_id, **kwargs):
stats = neutronclient(request).retrieve_pool_stats(pool_id, **kwargs)
return PoolStats(stats)
def pool_health_monitor_create(request, **kwargs):
"""Create a health monitor
:param request: request context
:param type: type of monitor
:param delay: delay of monitor
:param timeout: timeout of monitor
:param max_retries: max retries [1..10]
:param http_method: http method
:param url_path: url path
:param expected_codes: http return code
:param admin_state_up: admin state
"""
monitor_type = kwargs['type'].upper()
body = {'health_monitor': {'type': monitor_type,
'delay': kwargs['delay'],
'timeout': kwargs['timeout'],
'max_retries': kwargs['max_retries'],
'admin_state_up': kwargs['admin_state_up']
}}
if monitor_type in ['HTTP', 'HTTPS']:
body['health_monitor']['http_method'] = kwargs['http_method']
body['health_monitor']['url_path'] = kwargs['url_path']
body['health_monitor']['expected_codes'] = kwargs['expected_codes']
mon = neutronclient(request).create_health_monitor(body).get(
'health_monitor')
return PoolMonitor(mon)
def pool_health_monitor_list(request, **kwargs):
monitors = neutronclient(request).list_health_monitors(
**kwargs).get('health_monitors')
return [PoolMonitor(m) for m in monitors]
def pool_health_monitor_get(request, monitor_id):
monitor = neutronclient(request
).show_health_monitor(monitor_id
).get('health_monitor')
return PoolMonitor(monitor)
def pool_health_monitor_update(request, monitor_id, **kwargs):
monitor = neutronclient(request).update_health_monitor(monitor_id, kwargs)
return PoolMonitor(monitor)
def pool_health_monitor_delete(request, mon_id):
neutronclient(request).delete_health_monitor(mon_id)
def member_create(request, **kwargs):
"""Create a load balance member
:param request: request context
:param pool_id: pool_id of pool for member
:param address: IP address
:param protocol_port: transport layer port number
:param weight: weight for member
:param admin_state_up: admin_state
"""
body = {'member': {'pool_id': kwargs['pool_id'],
'address': kwargs['address'],
'protocol_port': kwargs['protocol_port'],
'admin_state_up': kwargs['admin_state_up']
}}
if kwargs.get('weight'):
body['member']['weight'] = kwargs['weight']
member = neutronclient(request).create_member(body).get('member')
return Member(member)
def member_list(request, **kwargs):
return _member_list(request, expand_pool=True, **kwargs)
def _member_list(request, expand_pool, **kwargs):
members = neutronclient(request).list_members(**kwargs).get('members')
if expand_pool:
pools = _pool_list(request)
pool_dict = SortedDict((p.id, p) for p in pools)
for m in members:
m['pool_name'] = pool_dict.get(m['pool_id']).name
return [Member(m) for m in members]
def member_get(request, member_id):
return _member_get(request, member_id, expand_pool=True)
def _member_get(request, member_id, expand_pool):
member = neutronclient(request).show_member(member_id).get('member')
if expand_pool:
member['pool_name'] = _pool_get(request, member['pool_id']).name
return Member(member)
def member_update(request, member_id, **kwargs):
member = neutronclient(request).update_member(member_id, kwargs)
return Member(member)
def member_delete(request, mem_id):
neutronclient(request).delete_member(mem_id)
def pool_monitor_association_create(request, **kwargs):
"""Associate a health monitor with pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
body = {'health_monitor': {'id': kwargs['monitor_id'], }}
neutronclient(request).associate_health_monitor(
kwargs['pool_id'], body)
def pool_monitor_association_delete(request, **kwargs):
"""Disassociate a health monitor from pool
:param request: request context
:param monitor_id: id of monitor
:param pool_id: id of pool
"""
neutronclient(request).disassociate_health_monitor(
kwargs['pool_id'], kwargs['monitor_id'])
| |
#!/usr/bin/python
"""
See for a good intro into debuggers:
http://eli.thegreenplace.net/2011/01/23/how-debuggers-work-part-1
http://eli.thegreenplace.net/2011/01/27/how-debuggers-work-part-2-breakpoints
Or take a look at:
http://python-ptrace.readthedocs.org/en/latest/
"""
import os
import ctypes
from ppci.binutils.dbg.debug_driver import DebugDriver, DebugState
from ppci.arch.x86_64 import registers as x86_registers
libc = ctypes.CDLL("libc.so.6")
PTRACE_TRACEME = 0
PTRACE_PEEKTEXT = 1
PTRACE_PEEKDATA = 2
PTRACE_POKETEXT = 4
PTRACE_POKEDATA = 5
PTRACE_CONT = 7
PTRACE_SINGLESTEP = 9
PTRACE_GETREGS = 12
PTRACE_SETREGS = 13
# TODO: what is this calling convention??
libc.ptrace.restype = ctypes.c_ulong
libc.ptrace.argtypes = (
ctypes.c_ulong,
ctypes.c_ulong,
ctypes.c_void_p,
ctypes.c_void_p,
)
class UserRegsStruct(ctypes.Structure):
_fields_ = [
("r15", ctypes.c_ulonglong),
("r14", ctypes.c_ulonglong),
("r13", ctypes.c_ulonglong),
("r12", ctypes.c_ulonglong),
("rbp", ctypes.c_ulonglong),
("rbx", ctypes.c_ulonglong),
("r11", ctypes.c_ulonglong),
("r10", ctypes.c_ulonglong),
("r9", ctypes.c_ulonglong),
("r8", ctypes.c_ulonglong),
("rax", ctypes.c_ulonglong),
("rcx", ctypes.c_ulonglong),
("rdx", ctypes.c_ulonglong),
("rsi", ctypes.c_ulonglong),
("rdi", ctypes.c_ulonglong),
("orig_rax", ctypes.c_ulonglong),
("rip", ctypes.c_ulonglong),
("cs", ctypes.c_ulonglong),
("eflags", ctypes.c_ulonglong),
("rsp", ctypes.c_ulonglong),
("ss", ctypes.c_ulonglong),
("fs_base", ctypes.c_ulonglong),
("gs_base", ctypes.c_ulonglong),
("ds", ctypes.c_ulonglong),
("es", ctypes.c_ulonglong),
("fs", ctypes.c_ulonglong),
("gs", ctypes.c_ulonglong),
]
# Idea: use decorators to assert state?
def stopped(f):
def f2():
pass
return f
def running(f):
return f
class Linux64DebugDriver(DebugDriver):
""" Implements a debugger backend """
def __init__(self):
super().__init__()
self.pid = None
self.status = DebugState.STOPPED
self.breakpoint_backup = {}
def go_for_it(self, argz):
self.pid = fork_spawn_stop(argz)
self.status = DebugState.STOPPED
# Api:
def get_status(self):
return self.status
@stopped
def run(self):
rip = self.get_pc()
if rip in self.breakpoint_backup:
# We are at a breakpoint, step over it first!
self.step_over_bp()
libc.ptrace(PTRACE_CONT, self.pid, 0, 0)
self.events.on_start()
self.status = DebugState.RUNNING
# TODO: for now, block here??
print("running")
_, status = os.wait()
self.status = DebugState.STOPPED
print("stopped at breakpoint!")
rip = self.get_pc()
self.dec_pc()
print(self.read_mem(rip - 1, 3))
self.events.on_stop()
def dec_pc(self):
""" Decrease pc by 1 """
regs = self.get_registers([x86_registers.rip])
regs[x86_registers.rip] -= 1
self.set_registers(regs)
def step_over_bp(self):
""" Step over a 0xcc breakpoint """
rip = self.get_pc()
cc = self.read_mem(rip, 1)
old_code = self.breakpoint_backup[rip]
self.write_mem(rip, old_code)
self.step()
self.write_mem(rip, cc)
@stopped
def step(self):
self.events.on_start()
self.status = DebugState.RUNNING
libc.ptrace(PTRACE_SINGLESTEP, self.pid, 0, 0)
_, status = os.wait()
self.status = DebugState.STOPPED
if not wifstopped(status):
self.pid = None
self.events.on_stop()
@running
def stop(self):
print("TODO!")
# raise NotImplementedError()
def set_breakpoint(self, address):
new_code = bytes([0xCC])
old_code = self.read_mem(address, 1)
self.write_mem(address, new_code)
if address not in self.breakpoint_backup:
self.breakpoint_backup[address] = old_code
def clear_breakpoint(self, address):
old_code = self.breakpoint_backup[address]
self.write_mem(address, old_code)
# Registers
@stopped
def get_registers(self, registers):
assert self.status == DebugState.STOPPED
regs = UserRegsStruct()
libc.ptrace(PTRACE_GETREGS, self.pid, 0, ctypes.byref(regs))
res = {}
for register in registers:
if hasattr(regs, register.name):
res[register] = getattr(regs, register.name)
return res
def set_registers(self, new_regs):
regs = UserRegsStruct()
libc.ptrace(PTRACE_GETREGS, self.pid, 0, ctypes.byref(regs))
for reg_name, reg_value in new_regs.items():
if hasattr(regs, reg_name):
setattr(regs, reg_name, reg_value)
libc.ptrace(PTRACE_SETREGS, self.pid, 0, ctypes.byref(regs))
# memory:
def read_mem(self, address, size):
res = bytearray()
for offset in range(size):
res.append(self.read_byte(address + offset))
return bytes(res)
def write_mem(self, address, data):
for offset, b in enumerate(data):
self.write_byte(address + offset, b)
def read_byte(self, address):
""" Convenience wrapper """
w = self.read_word(address)
byte = w & 0xFF
return byte
def write_byte(self, address, byte):
""" Convenience function to write a single byte """
w = self.read_word(address)
w = (w & 0xFFFFFFFFFFFFFF00) | byte
self.write_word(address, w)
def read_word(self, address):
res = libc.ptrace(PTRACE_PEEKDATA, self.pid, address, 0)
return res
def write_word(self, address, w):
res = libc.ptrace(PTRACE_POKEDATA, self.pid, address, w)
assert res != -1
# Disasm:
def get_pc(self):
v = self.get_registers([x86_registers.rip])
return v[x86_registers.rip]
def get_fp(self):
v = self.get_registers([x86_registers.rbp])
return v[x86_registers.rbp]
def wifstopped(status):
return (status & 0xFF) == 0x7F
def fork_spawn_stop(argz):
""" Spawn new process and stop it on first instruction """
pid = os.fork()
if pid == 0: # Child process
# Allow the child process to be ptraced
libc.ptrace(PTRACE_TRACEME, 0, 0, 0)
# Launch the intended program:
os.execv(argz[0], argz)
# This point will never be reached!
assert False
else:
_, status = os.wait()
return pid
| |
#!/usr/bin/env python
#
# FSF Client for sending information and generating a report
#
# Jason Batchelor
# Emerson Corporation
# 02/09/2016
"""
Copyright 2016 Emerson Electric Co.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import socket
import argparse
import struct
import time
import hashlib
import random
from conf import config
from datetime import datetime as dt
class FSFClient:
"""FSFClient is the class that you call when you want to send a file to FSF"""
def __init__(self, fullpath, samplename, delete, source, archive, suppress_report, full, sampleobject):
"""
:param fullpath: type(str) path to the file object being submitted. Used for submission metadata.
This gets added to the scan report
:param samplename: type(str) the filename of the file object being submitted. Used for submission metadata.
This gets added to the scan report
:param delete: type(bool) Specifies whether the client will delete the submission file object after it's sent.
:param source: type(dict) any additional metadata about the file object being submitted.
This gets added to the scan report
:param archive: type(string). If an archive condition is met, the file object submitted will be saved
on the FSF Server. Options:
none: Don't Archive, Ever.
file-on-alert: Only archive if an alert condition specified in the dispositioner is met.
all-on-alert: Archive the submission object and all its sub-objects if an alert condition specified in the
dispositioner is met.
all-the-files: Archive every file submitted.
all-the-things: Archive every file submitted and all its sub-objects.
:param suppress_report: Submit the file and close the socket, don't wait for any response content like the
scan report.
:param full: Return the scan report AND all sub-objects of the submitted file object.
:param sampleobject: a buffer containing the file that you're submitting.
"""
self.fullpath = fullpath
self.samplename = samplename
self.delete = delete
self.source = source
self.archive = archive
self.suppress_report = suppress_report
self.full = full
self.sampleobject = sampleobject
# will hold host after verifying connection to server
self.host = '' # todo set this to a default value
self.port = config.SERVER_CONFIG['PORT']
self.logfile = config.CLIENT_CONFIG['LOG_FILE']
self.server_list = config.SERVER_CONFIG['IP_ADDRESS']
archive_options = ['none', 'file-on-alert', 'all-on-alert', 'all-the-files', 'all-the-things']
if args.archive not in archive_options:
error = '''%s Please specify a valid archive option: \'none\', \'file-on-alert\', \'all-on-alert\',
\'all-the-files\' or \'all-the-things\'.\n''' % dt.now()
self.issue_error(error)
sys.exit(1)
def initiate_submission(self):
"""
Test connect to a FSF server in your FSF server pool (if configured)
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
random.shuffle(self.server_list)
attempts = 0
for server in self.server_list:
success = 1
try:
sock.connect((server, self.port))
except:
warning = '%s There was a problem connecting to %s on port %s. Trying another server. <WARN>\n' % \
(dt.now(), server, self.port)
self.issue_error(warning)
success = 0
attempts += 1
if success:
self.host = server
self.process_files()
break
elif attempts == len(self.server_list):
e = sys.exc_info()[0]
error = '%s There are not servers available to send files too. Error: %s\n' % (dt.now(), e)
self.issue_error(error)
def process_files(self):
"""
Send files to the FSF Server for processing; removes or retains the submitted file based on self.delete,
and either sends the connection socket to the FSF Server to FSFClient.process_results or closes the socket based
on self.suppress_report
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
msg = '%sFSF_RPC%sFSF_RPC%sFSF_RPC%sFSF_RPC%sFSF_RPC%s' % \
(self.samplename, self.source, self.archive, self.suppress_report, self.full, self.sampleobject)
buffer = struct.pack('>I', len(msg)) + 'FSF_RPC' + msg
try:
sock.connect((self.host, self.port))
sock.sendall(buffer)
except:
e = sys.exc_info()[0]
error = '%s There was a problem sending file %s to %s on port %s. Error: %s\n' % \
(dt.now(), self.samplename, self.host, self.port, e)
self.issue_error(error)
finally:
if self.delete:
os.remove(self.fullpath)
if not self.suppress_report:
self.process_results(sock)
sock.close()
def process_results(self, sock):
"""
Processes results sent back to the client from the FSF Server,
Required method if you want the scan report or sub-objects
:param sock: the socket
"""
try:
raw_msg_len = sock.recv(4)
msg_len = struct.unpack('>I', raw_msg_len)[0]
data = ''
while len(data) < msg_len:
recv_buff = sock.recv(msg_len - len(data))
data += recv_buff
print data
# Does the user want all sub objects?
if self.full:
# Generate dirname by calculating epoch time and hash of results
dirname = 'fsf_dump_%s_%s' % (int(time.time()), hashlib.md5(data).hexdigest())
self.dump_subobjects(sock, dirname)
except:
e = sys.exc_info()[0]
error = '%s There was a problem getting data for %s from %s on port %s. Error: %s' % \
(dt.now(), self.samplename, self.host, self.port, e)
self.issue_error(error)
def dump_subobjects(self, sock, dirname):
"""
Dumps all subobjects returned by the scanner server into a new directory
:param sock: The Socket
:param dirname: the new directory whe
"""
sub_status = sock.recv(4)
if sub_status == 'Null':
print 'No subobjects were returned from scanner for %s.' % self.samplename
return
os.mkdir(dirname)
while self.full:
raw_sub_count = sock.recv(4)
sub_count = struct.unpack('>I', raw_sub_count)[0]
raw_msg_len = sock.recv(4)
msg_len = struct.unpack('>I', raw_msg_len)[0]
data = ''
while len(data) < msg_len:
recv_buff = sock.recv(msg_len - len(data))
data += recv_buff
fname = hashlib.md5(data).hexdigest()
with open('%s/%s' % (dirname, fname), 'w') as f:
f.write(data)
f.close #todo, this is redundant
if sub_count == 0:
self.full = False
print 'Sub objects of %s successfully written to: %s' % (self.samplename, dirname)
def issue_error(self, error):
"""
Handles local FSFClient errors by logging to a file specified in the configs or printing to stdout
:param error: The error message
"""
if self.suppress_report:
with open(self.logfile, 'a') as f:
f.write(error)
f.close()
else:
print error
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='fsf_client',
description="""Uploads files to scanner server and returns the results to the user
if desired. Results will always be written to a server side log file. Default options for each flag are designed to
accommodate easy analyst interaction. Adjustments can be made to accommodate larger operations.
Read the documentation for more details!""")
parser.add_argument('file', nargs='*', type=argparse.FileType('r'), help='Full path to file(s) to be processed.')
parser.add_argument('--delete', default=False, action='store_true', help="""Remove file from client after sending
to the FSF server. Data can be archived later on server depending on selected options.""")
parser.add_argument('--source', nargs='?', type=str, default='Analyst', help="""Specify the source of the input.
Useful when scaling up to larger operations or supporting multiple input sources, such as; integrating with a
sensor grid or other network defense solutions. Defaults to \'Analyst\' as submission source.""")
parser.add_argument('--archive', nargs='?', type=str, default='none', help="""Specify the archive option to use.
The most common option is \'none\' which will tell the server not to archive for this submission (default).
\'file-on-alert\' will archive the file only if the alert flag is set. \'all-on-alert\' will archive the file and
all sub objects if the alert flag is set. \'all-the-files\' will archive all the files sent to the
scanner regardless of the alert flag. \'all-the-things\' will archive the file and all
sub objects regardless of the alert flag.""")
parser.add_argument('--suppress-report', default=False, action='store_true', help="""Don\'t return a JSON report
back to the client and log client-side errors to the locally configured log directory. Choosing this will log
scan results server-side only. Needed for automated scanning use cases when sending large amount
of files for bulk collection. Set to false by default.""")
parser.add_argument('--full', default=False, action='store_true', help="""Dump all sub objects of submitted file to
current directory of the client. Format or directory name is \'fsf_dump_[epoch time]_[md5 hash of scan results]\'.
Only supported when suppress-report option is false (default).""")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
try:
args = parser.parse_args()
except IOError:
e = sys.exc_info()[1]
print 'The file provided could not be found. Error: %s' % e
sys.exit(1)
if len(args.file) == 0:
print 'A file to scan needs to be provided!'
for f in args.file:
filename = os.path.basename(f.name)
sampleobject = f.read()
fsf = FSFClient(f.name, filename, args.delete, args.source, args.archive, args.suppress_report,
args.full, sampleobject)
fsf.initiate_submission()
| |
# -*- coding: utf-8 -*-
import itertools
import numpy as np
import tensorflow as tf
from tensorpack.models import Conv2D, FixedUnPooling, MaxPooling, layer_register
from tensorpack.tfutils.argscope import argscope
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.tower import get_current_tower_context
from tensorpack.utils.argtools import memoized
from config import config as cfg
from ..utils.box_ops import area as tf_area
from .backbone import GroupNorm
from .model_box import roi_align
from .model_rpn import generate_rpn_proposals, rpn_losses, get_all_anchors
@layer_register(log_shape=True)
def fpn_model(features):
"""
Args:
features ([tf.Tensor]): ResNet features c2-c5
Returns:
[tf.Tensor]: FPN features p2-p6
"""
assert len(features) == 4, features
num_channel = cfg.FPN.NUM_CHANNEL
use_gn = cfg.FPN.NORM == 'GN'
def upsample2x(name, x):
try:
resize = tf.compat.v2.image.resize_images
with tf.name_scope(name):
shp2d = tf.shape(x)[2:]
x = tf.transpose(x, [0, 2, 3, 1])
x = resize(x, shp2d * 2, 'nearest')
x = tf.transpose(x, [0, 3, 1, 2])
return x
except AttributeError:
return FixedUnPooling(
name,
x,
2,
unpool_mat=np.ones((2, 2), dtype='float32'),
data_format='channels_first')
with argscope(
Conv2D,
data_format='channels_first',
activation=tf.identity,
use_bias=True,
kernel_initializer=tf.variance_scaling_initializer(scale=1.)):
lat_2345 = [
Conv2D('lateral_1x1_c{}'.format(i + 2), c, num_channel, 1)
for i, c in enumerate(features)
]
if use_gn:
lat_2345 = [
GroupNorm('gn_c{}'.format(i + 2), c) for i, c in enumerate(lat_2345)
]
lat_sum_5432 = []
for idx, lat in enumerate(lat_2345[::-1]):
if idx == 0:
lat_sum_5432.append(lat)
else:
lat = lat + upsample2x('upsample_lat{}'.format(6 - idx),
lat_sum_5432[-1])
lat_sum_5432.append(lat)
p2345 = [
Conv2D('posthoc_3x3_p{}'.format(i + 2), c, num_channel, 3)
for i, c in enumerate(lat_sum_5432[::-1])
]
if use_gn:
p2345 = [
GroupNorm('gn_p{}'.format(i + 2), c) for i, c in enumerate(p2345)
]
p6 = MaxPooling(
'maxpool_p6',
p2345[-1],
pool_size=1,
strides=2,
data_format='channels_first',
padding='VALID')
return p2345 + [p6]
@under_name_scope()
def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices
of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.cast(
tf.floor(4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))),
tf.int32)
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)
]
level_ids = [
tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)
]
num_in_levels = [
tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)
]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes
@under_name_scope()
def multilevel_roi_align(features, rcnn_boxes, resolution):
"""
Args:
features ([tf.Tensor]): 4 FPN feature level 2-5
rcnn_boxes (tf.Tensor): nx4 boxes
resolution (int): output spatial resolution
Returns:
NxC x res x res
"""
assert len(features) == 4, features
# Reassign rcnn_boxes to levels
level_ids, level_boxes = fpn_map_rois_to_levels(rcnn_boxes)
all_rois = []
# Crop patches from corresponding levels
for i, boxes, featuremap in zip(itertools.count(), level_boxes, features):
with tf.name_scope('roi_level{}'.format(i + 2)):
boxes_on_featuremap = boxes * (1.0 / cfg.FPN.ANCHOR_STRIDES[i])
all_rois.append(roi_align(featuremap, boxes_on_featuremap, resolution))
# this can fail if using TF<=1.8 with MKL build
all_rois = tf.concat(all_rois, axis=0) # NCHW
# Unshuffle to the original order, to match the original samples
level_id_perm = tf.concat(level_ids, axis=0) # A permutation of 1~N
level_id_invert_perm = tf.invert_permutation(level_id_perm)
all_rois = tf.gather(all_rois, level_id_invert_perm, name='output')
return all_rois
def multilevel_rpn_losses(multilevel_anchors, multilevel_label_logits,
multilevel_box_logits):
"""
Args:
multilevel_anchors: #lvl RPNAnchors
multilevel_label_logits: #lvl tensors of shape HxWxA
multilevel_box_logits: #lvl tensors of shape HxWxAx4
Returns:
label_loss, box_loss
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_anchors) == num_lvl
assert len(multilevel_label_logits) == num_lvl
assert len(multilevel_box_logits) == num_lvl
losses = []
with tf.name_scope('rpn_losses'):
for lvl in range(num_lvl):
anchors = multilevel_anchors[lvl]
label_loss, box_loss = rpn_losses(
anchors.gt_labels,
anchors.encoded_gt_boxes(),
multilevel_label_logits[lvl],
multilevel_box_logits[lvl],
name_scope='level{}'.format(lvl + 2))
losses.extend([label_loss, box_loss])
total_label_loss = tf.add_n(losses[::2], name='label_loss')
total_box_loss = tf.add_n(losses[1::2], name='box_loss')
add_moving_summary(total_label_loss, total_box_loss)
return [total_label_loss, total_box_loss]
@under_name_scope()
def generate_fpn_proposals(multilevel_pred_boxes, multilevel_label_logits,
image_shape2d):
"""
Args:
multilevel_pred_boxes: #lvl HxWxAx4 boxes
multilevel_label_logits: #lvl tensors of shape HxWxA
Returns:
boxes: kx4 float
scores: k logits
"""
num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
assert len(multilevel_pred_boxes) == num_lvl
assert len(multilevel_label_logits) == num_lvl
training = get_current_tower_context().is_training
all_boxes = []
all_scores = []
if cfg.FPN.PROPOSAL_MODE == 'Level':
fpn_nms_topk = cfg.RPN.TRAIN_PER_LEVEL_NMS_TOPK if training else cfg.RPN.TEST_PER_LEVEL_NMS_TOPK
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(multilevel_label_logits[lvl], [-1]), image_shape2d,
fpn_nms_topk)
all_boxes.append(proposal_boxes)
all_scores.append(proposal_scores)
proposal_boxes = tf.concat(all_boxes, axis=0) # nx4
proposal_scores = tf.concat(all_scores, axis=0) # n
# Here we are different from Detectron.
# Detectron picks top-k within the batch, rather than within an image. However we do not have a batch.
proposal_topk = tf.minimum(tf.size(proposal_scores), fpn_nms_topk)
proposal_scores, topk_indices = tf.nn.top_k(
proposal_scores, k=proposal_topk, sorted=False)
proposal_boxes = tf.gather(
proposal_boxes, topk_indices, name='all_proposals')
else:
for lvl in range(num_lvl):
with tf.name_scope('Lvl{}'.format(lvl + 2)):
pred_boxes_decoded = multilevel_pred_boxes[lvl]
all_boxes.append(tf.reshape(pred_boxes_decoded, [-1, 4]))
all_scores.append(tf.reshape(multilevel_label_logits[lvl], [-1]))
all_boxes = tf.concat(all_boxes, axis=0)
all_scores = tf.concat(all_scores, axis=0)
proposal_boxes, proposal_scores = generate_rpn_proposals(
all_boxes, all_scores, image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if training else cfg.RPN.TEST_POST_NMS_TOPK)
tf.sigmoid(proposal_scores, name='probs') # for visualization
return tf.stop_gradient(proposal_boxes, name='boxes'), \
tf.stop_gradient(proposal_scores, name='scores')
@memoized
def get_all_anchors_fpn(*, strides, sizes, ratios, max_size):
"""
Returns:
[anchors]: each anchors is a SxSx NUM_ANCHOR_RATIOS x4 array.
"""
assert len(strides) == len(sizes)
foas = []
for stride, size in zip(strides, sizes):
foa = get_all_anchors(
stride=stride, sizes=(size,), ratios=ratios, max_size=max_size)
foas.append(foa)
return foas
| |
# TODO: Test robust skewness
# TODO: Test robust kurtosis
import numpy as np
import pandas as pd
from numpy.testing import (assert_almost_equal, assert_raises, TestCase)
from statsmodels.stats.stattools import (omni_normtest, jarque_bera,
durbin_watson, _medcouple_1d, medcouple,
robust_kurtosis, robust_skewness)
from statsmodels.stats.adnorm import normal_ad
#a random array, rounded to 4 decimals
x = np.array([-0.1184, -1.3403, 0.0063, -0.612, -0.3869, -0.2313, -2.8485,
-0.2167, 0.4153, 1.8492, -0.3706, 0.9726, -0.1501, -0.0337,
-1.4423, 1.2489, 0.9182, -0.2331, -0.6182, 0.183])
def test_durbin_watson():
#benchmark values from R car::durbinWatsonTest(x)
#library("car")
#> durbinWatsonTest(x)
#[1] 1.95298958377419
#> durbinWatsonTest(x**2)
#[1] 1.848802400319998
#> durbinWatsonTest(x[2:20]+0.5*x[1:19])
#[1] 1.09897993228779
#> durbinWatsonTest(x[2:20]+0.8*x[1:19])
#[1] 0.937241876707273
#> durbinWatsonTest(x[2:20]+0.9*x[1:19])
#[1] 0.921488912587806
st_R = 1.95298958377419
assert_almost_equal(durbin_watson(x), st_R, 14)
st_R = 1.848802400319998
assert_almost_equal(durbin_watson(x**2), st_R, 14)
st_R = 1.09897993228779
assert_almost_equal(durbin_watson(x[1:] + 0.5 * x[:-1]), st_R, 14)
st_R = 0.937241876707273
assert_almost_equal(durbin_watson(x[1:] + 0.8 * x[:-1]), st_R, 14)
st_R = 0.921488912587806
assert_almost_equal(durbin_watson(x[1:] + 0.9 * x[:-1]), st_R, 14)
def test_omni_normtest():
#tests against R fBasics
from scipy import stats
st_pv_R = np.array(
[[3.994138321207883, -1.129304302161460, 1.648881473704978],
[0.1357325110375005, 0.2587694866795507, 0.0991719192710234]])
nt = omni_normtest(x)
assert_almost_equal(nt, st_pv_R[:, 0], 14)
st = stats.skewtest(x)
assert_almost_equal(st, st_pv_R[:, 1], 14)
kt = stats.kurtosistest(x)
assert_almost_equal(kt, st_pv_R[:, 2], 11)
st_pv_R = np.array(
[[34.523210399523926, 4.429509162503833, 3.860396220444025],
[3.186985686465249e-08, 9.444780064482572e-06, 1.132033129378485e-04]])
x2 = x**2
#TODO: fix precision in these test with relative tolerance
nt = omni_normtest(x2)
assert_almost_equal(nt, st_pv_R[:, 0], 12)
st = stats.skewtest(x2)
assert_almost_equal(st, st_pv_R[:, 1], 12)
kt = stats.kurtosistest(x2)
assert_almost_equal(kt, st_pv_R[:, 2], 12)
def test_omni_normtest_axis():
#test axis of omni_normtest
x = np.random.randn(25, 3)
nt1 = omni_normtest(x)
nt2 = omni_normtest(x, axis=0)
nt3 = omni_normtest(x.T, axis=1)
assert_almost_equal(nt2, nt1, decimal=13)
assert_almost_equal(nt3, nt1, decimal=13)
def test_jarque_bera():
#tests against R fBasics
st_pv_R = np.array([1.9662677226861689, 0.3741367669648314])
jb = jarque_bera(x)[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([78.329987305556, 0.000000000000])
jb = jarque_bera(x**2)[:2]
assert_almost_equal(jb, st_pv_R, 13)
st_pv_R = np.array([5.7135750796706670, 0.0574530296971343])
jb = jarque_bera(np.log(x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
st_pv_R = np.array([2.6489315748495761, 0.2659449923067881])
jb = jarque_bera(np.exp(-x**2))[:2]
assert_almost_equal(jb, st_pv_R, 14)
def test_shapiro():
#tests against R fBasics
#testing scipy.stats
from scipy.stats import shapiro
st_pv_R = np.array([0.939984787255526, 0.239621898000460])
sh = shapiro(x)
assert_almost_equal(sh, st_pv_R, 4)
#st is ok -7.15e-06, pval agrees at -3.05e-10
st_pv_R = np.array([5.799574255943298e-01, 1.838456834681376e-06 * 1e4])
sh = shapiro(x**2) * np.array([1, 1e4])
assert_almost_equal(sh, st_pv_R, 5)
st_pv_R = np.array([0.91730442643165588, 0.08793704167882448])
sh = shapiro(np.log(x**2))
assert_almost_equal(sh, st_pv_R, 5)
#diff is [ 9.38773155e-07, 5.48221246e-08]
st_pv_R = np.array([0.818361863493919373, 0.001644620895206969])
sh = shapiro(np.exp(-x**2))
assert_almost_equal(sh, st_pv_R, 5)
def test_adnorm():
#tests against R fBasics
st_pv = []
st_pv_R = np.array([0.5867235358882148, 0.1115380760041617])
ad = normal_ad(x)
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([2.976266267594575e+00, 8.753003709960645e-08])
ad = normal_ad(x**2)
assert_almost_equal(ad, st_pv_R, 11)
st_pv.append(st_pv_R)
st_pv_R = np.array([0.4892557856308528, 0.1968040759316307])
ad = normal_ad(np.log(x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
st_pv_R = np.array([1.4599014654282669312, 0.0006380009232897535])
ad = normal_ad(np.exp(-x**2))
assert_almost_equal(ad, st_pv_R, 12)
st_pv.append(st_pv_R)
ad = normal_ad(np.column_stack((x, x**2, np.log(x**2), np.exp(-x**2))).T,
axis=1)
assert_almost_equal(ad, np.column_stack(st_pv), 11)
def test_durbin_watson_pandas():
x = np.random.randn(50)
x_series = pd.Series(x)
assert_almost_equal(durbin_watson(x), durbin_watson(x_series), decimal=13)
class TestStattools(TestCase):
@classmethod
def setup_class(cls):
x = np.random.standard_normal(1000)
e1, e2, e3, e4, e5, e6, e7 = np.percentile(x, (12.5, 25.0, 37.5, 50.0, 62.5, 75.0, 87.5))
c05, c50, c95 = np.percentile(x, (5.0, 50.0, 95.0))
f025, f25, f75, f975 = np.percentile(x, (2.5, 25.0, 75.0, 97.5))
mean = np.mean
kr1 = mean(((x - mean(x)) / np.std(x))**4.0) - 3.0
kr2 = ((e7 - e5) + (e3 - e1)) / (e6 - e2) - 1.2330951154852172
kr3 = (mean(x[x > c95]) - mean(x[x < c05])) / (mean(x[x > c50]) - mean(x[x < c50])) - 2.5852271228708048
kr4 = (f975 - f025) / (f75 - f25) - 2.9058469516701639
cls.kurtosis_x = x
cls.expected_kurtosis = np.array([kr1, kr2, kr3, kr4])
cls.kurtosis_constants = np.array([3.0,1.2330951154852172,2.5852271228708048,2.9058469516701639])
def test_medcouple_no_axis(self):
x = np.reshape(np.arange(100.0), (50, 2))
mc = medcouple(x, axis=None)
assert_almost_equal(mc, medcouple(x.ravel()))
def test_medcouple_1d(self):
x = np.reshape(np.arange(100.0),(50,2))
assert_raises(ValueError, _medcouple_1d, x)
def test_medcouple_symmetric(self):
mc = medcouple(np.arange(5.0))
assert_almost_equal(mc, 0)
def test_medcouple_nonzero(self):
mc = medcouple(np.array([1, 2, 7, 9, 10.0]))
assert_almost_equal(mc, -0.3333333)
def test_medcouple_symmetry(self):
x = np.random.standard_normal(100)
mcp = medcouple(x)
mcn = medcouple(-x)
assert_almost_equal(mcp + mcn, 0)
def test_durbin_watson(self):
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
assert_almost_equal(dw, durbin_watson(x))
def test_durbin_watson_2d(self):
shape = (1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[:, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x))
def test_durbin_watson_3d(self):
shape = (10, 1, 10)
x = np.random.standard_normal(100)
dw = sum(np.diff(x)**2.0) / np.dot(x, x)
x = np.tile(x[None, :, None], shape)
assert_almost_equal(np.squeeze(dw * np.ones(shape)), durbin_watson(x, axis=1))
def test_robust_skewness_1d(self):
x = np.arange(21.0)
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_1d_2d(self):
x = np.random.randn(21)
y = x[:, None]
sk_x = robust_skewness(x)
sk_y = robust_skewness(y, axis=None)
assert_almost_equal(np.array(sk_x), np.array(sk_y))
def test_robust_skewness_symmetric(self):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
sk = robust_skewness(x)
assert_almost_equal(np.array(sk), np.zeros(4))
def test_robust_skewness_3d(self):
x = np.random.standard_normal(100)
x = np.hstack([x, np.zeros(1), -x])
x = np.tile(x, (10, 10, 1))
sk_3d = robust_skewness(x, axis=2)
result = np.zeros((10, 10))
for sk in sk_3d:
assert_almost_equal(sk, result)
def test_robust_kurtosis_1d_2d(self):
x = np.random.randn(100)
y = x[:, None]
kr_x = np.array(robust_kurtosis(x))
kr_y = np.array(robust_kurtosis(y, axis=None))
assert_almost_equal(kr_x, kr_y)
def test_robust_kurtosis(self):
x = self.kurtosis_x
assert_almost_equal(np.array(robust_kurtosis(x)), self.expected_kurtosis)
def test_robust_kurtosis_3d(self):
x = np.tile(self.kurtosis_x, (10, 10, 1))
kurtosis = np.array(robust_kurtosis(x, axis=2))
for i, r in enumerate(self.expected_kurtosis):
assert_almost_equal(r * np.ones((10, 10)), kurtosis[i])
def test_robust_kurtosis_excess_false(self):
x = self.kurtosis_x
expected = self.expected_kurtosis + self.kurtosis_constants
kurtosis = np.array(robust_kurtosis(x, excess=False))
assert_almost_equal(expected, kurtosis)
def test_robust_kurtosis_ab(self):
"""Test custom alpha, beta in kr3"""
x = self.kurtosis_x
alpha, beta = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, ab=(alpha,beta), excess=False)
num = np.mean(x[x>np.percentile(x,100.0 - alpha)]) - np.mean(x[x<np.percentile(x,alpha)])
denom = np.mean(x[x>np.percentile(x,100.0 - beta)]) - np.mean(x[x<np.percentile(x,beta)])
assert_almost_equal(kurtosis[2], num/denom)
def test_robust_kurtosis_dg(self):
"""Test custom delta, gamma in kr4"""
x = self.kurtosis_x
delta, gamma = (10.0, 45.0)
kurtosis = robust_kurtosis(self.kurtosis_x, dg=(delta,gamma), excess=False)
q = np.percentile(x,[delta, 100.0-delta, gamma, 100.0-gamma])
assert_almost_equal(kurtosis[3], (q[1] - q[0]) / (q[3] - q[2]))
if __name__ == "__main__":
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x'], exit=False) #, '--pdb'
# run_module_suite()
#nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
# exit=False)
| |
'''
A few functions to plot stellar mass functions from SAM runs.
:author: Sami-Matias Niemi
:version: 0.1
:contact: niemi@stsci.edu
'''
import matplotlib
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['font.size'] = 15
matplotlib.rc('xtick', labelsize=14)
matplotlib.rc('axes', linewidth=1.2)
matplotlib.rcParams['legend.fontsize'] = 7
matplotlib.rcParams['legend.handlelength'] = 2
matplotlib.rcParams['xtick.major.size'] = 5
matplotlib.rcParams['ytick.major.size'] = 5
matplotlib.use('AGG')
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, NullFormatter, LogLocator
import numpy as N
import pylab as P
import os, logging
#From Sami's Repo
import db.sqlite
import astronomy.stellarMFs as SMF
import astronomy.differentialfunctions as df
import plot.tools as pt
def stellarmassfunc_plot(path, database, redshifts,
output_folder, outfile,
mmax=12.5, mmin=8.0,
nbins=30, nvolumes=15,
lowlim=-4.9):
'''
Plots stellar mass functions as a function of redshift.
Compares to observations.
'''
#get a colour scheme
cols = pt.give_colours()
#scale the SAM's output masses
multiply = 1e9
#make the figure and axes instance
fig = P.figure()
ax = fig.add_subplot(111)
#get obs constrains
obs, ids = SMF.stellarMfs()
o = []
o.append(['$z \sim 1$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 0.99) & (obs.z_up < 1.4)])
#o.append(['$z \sim 1$: Drory et al. 2004', (obs.id == 5) & (obs.z_low > 1.) & (obs.z_up < 1.2)])
o.append(['$z \sim 2$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 1.99) & (obs.z_up < 2.6)])
#o.append(['$z \sim 2$: Fontana et al. 2006', (obs.id == 6) & (obs.z_low > 1.99) & (obs.z_up < 3.01)])
#o.append(['$z \sim 2$: Marchesini et al. 2008', (obs.id == 7) & (obs.z_low > 1.99) & (obs.z_up < 3.01)])
o.append(['$z \sim 3$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 2.99) & (obs.z_up < 3.6)])
o.append(['$z \sim 4$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 3.49) & (obs.z_up < 4.1)])
highred = SMF.highRedshiftMFs()
#make the observational plots
for i, line in enumerate(o):
label, mask = line[0], line[1]
ms = obs.mstar[mask]
mf = obs.mf[mask]
errl = obs.err_low[mask]
errh = obs.err_up[mask]
msk = mf > - 15.0
ax.errorbar(ms[msk],
mf[msk],
yerr=[errh[msk], errl[msk]],
color=cols[i],
ls=':',
label=label)
for i, key in enumerate(sorted(highred.iterkeys())):
if key != 'stellar_mass':
ax.plot(highred['stellar_mass'],
highred[key],
color=cols[i + 2],
ls=':',
marker='s',
label='$%s:$ Gonzalez et al. 2011' % key.replace('=', '\sim'))
#plot the different redshifts
for ii, redshift in enumerate(redshifts):
#get redshift, add 0.1 so that int/floor returns the closest int
tmp = redshift.split()
rd = int(float(tmp[2]) + 0.1)
#generate the SQL query
query = '''select mstar_disk, mbulge, gal_id from galpropz where ''' + redshift
query += ' and (mstar_disk >= 0.0 or mbulge >= 0.0)'
query += ' and mstar_disk + mbulge > 0.0'
#get data from the SQLite3 db
dat = db.sqlite.get_data_sqlite(path, database, query)
#rename data for convenience
disk = dat[:, 0]
bulge = dat[:, 1]
mstar = N.log10((disk * multiply) + (bulge * multiply))
#make a dictionary of data
data = {}
data['stellar_mass'] = mstar
data['bulge_mass'] = bulge
data['galaxy_id'] = dat[:, 2]
#debug output
ngal = len(mstar)
logging.debug('%i galaxies found at z = %i' % (ngal, rd))
#calculate the stellar mass functions
mfs = df.stellarMassFunction(data,
mmin=mmin - 0.2,
#mmax = mmax,
nvols=nvolumes,
nbins=nbins - 2 * rd,
verbose=True)
#plot the simulation data
ax.plot(mfs['mass_bins'],
mfs['mf_stellar_mass'],
color=cols[ii],
label='$z \sim %i$: Bolshoi + SAM' % rd)
# ax.plot(mfs['mass_bins'],
# mfs['mf_central_galaxies'],
# color = cols[ii],
# ls = '--',
# label = '$z \sim %i$: Bolshoi + SAM (CG)' % rd)
#set axes scales and labels
ax.set_xlim(mmin, 12.0)
ax.set_ylim(lowlim, -1.0)
ax.set_xlabel(r'$\log_{10} \left ( M_{\star} \ [\mathrm{M}_{\odot}] \right )$')
ax.set_ylabel(
r'$\log_{10} \left ( \frac{\mathrm{d}N}{\mathrm{d}\log_{10} M_{\star}} \right ) \quad [\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1}] $')
#set small ticks
m = ax.get_yticks()[1] - ax.get_yticks()[0]
yminorLocator = MultipleLocator(m / 5)
yminorFormattor = NullFormatter()
ax.yaxis.set_minor_locator(yminorLocator)
ax.yaxis.set_minor_formatter(yminorFormattor)
m = ax.get_xticks()[1] - ax.get_xticks()[0]
xminorLocator = MultipleLocator(m / 5)
xminorFormattor = NullFormatter()
ax.xaxis.set_minor_locator(xminorLocator)
ax.xaxis.set_minor_formatter(xminorFormattor)
P.legend(shadow=True, fancybox=True, numpoints=1)
P.savefig(output_folder + outfile + '.pdf')
P.close()
def stellarmassfuncSinglePlot(path, database, redshifts,
output_folder, outfile,
mmin=8.0, nbins=30,
nvolumes=15, lowlim=-4.9):
'''
Plots a stellar mass function and compares to observational data.
:note: this function is horribly written, there several loops that
are not really needed.
'''
#get a colour scheme
cols = pt.give_colours()
#scale the SAM's output masses
multiply = 1e9
#get obs constrains
obs, ids = SMF.stellarMfs()
o = []
o.append(['$z \sim 1$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 0.99) & (obs.z_up < 1.4)])
#o.append(['$z \sim 1$: Drory et al. 2004', (obs.id == 5) & (obs.z_low > 1.) & (obs.z_up < 1.2)])
o.append(['$z \sim 2$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 1.99) & (obs.z_up < 2.6)])
#o.append(['$z \sim 2$: Fontana et al. 2006', (obs.id == 6) & (obs.z_low > 1.99) & (obs.z_up < 3.01)])
#o.append(['$z \sim 2$: Marchesini et al. 2008', (obs.id == 7) & (obs.z_low > 1.99) & (obs.z_up < 3.01)])
o.append(['$z \sim 3$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 2.99) & (obs.z_up < 3.6)])
o.append(['$z \sim 4$: Perez-Gonzalez et al. 2007', (obs.id == 1) & (obs.z_low > 3.49) & (obs.z_up < 4.1)])
highred = SMF.highRedshiftMFs()
#plot the different redshifts
for ii, redshift in enumerate(redshifts):
#make the figure and axes instance
fig = P.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
#make the observational plots
for i, line in enumerate(o):
if i == ii:
label, mask = line[0], line[1]
ms = obs.mstar[mask]
mf = obs.mf[mask]
errl = obs.err_low[mask]
errh = obs.err_up[mask]
msk = mf > - 15.0
ax.errorbar(ms[msk],
mf[msk],
yerr=[errh[msk], errl[msk]],
color=cols[i],
ls=':',
label=label)
if i > 1:
for i, key in enumerate(sorted(highred.iterkeys())):
if key != 'stellar_mass' and i == ii - 2:
ax.errorbar(highred['stellar_mass'],
highred[key][0],
yerr=[highred[key][1], highred[key][1]],
color=cols[i + 2],
ls=':',
marker='s',
label='$%s:$ Gonzalez et al. 2011' % key.replace('=', '\sim'))
#get redshift, add 0.1 so that int/floor returns the closest int
tmp = redshift.split()
rd = int(float(tmp[2]) + 0.1)
#generate the SQL query
query = '''select mstar_disk, mbulge, gal_id from galpropz where ''' + redshift
query += ' and (mstar_disk >= 0.0 or mbulge >= 0.0)'
query += ' and mstar_disk + mbulge > 0.0'
#get data from the SQLite3 db
dat = db.sqlite.get_data_sqlite(path, database, query)
#rename data for convenience
disk = dat[:, 0]
bulge = dat[:, 1]
mstar = N.log10((disk * multiply) + (bulge * multiply))
#make a dictionary of data
data = {}
data['stellar_mass'] = mstar
data['bulge_mass'] = bulge
data['galaxy_id'] = dat[:, 2]
#debug output
ngal = len(mstar)
logging.debug('%i galaxies found at z = %i' % (ngal, rd))
#calculate the stellar mass functions
mfs = df.stellarMassFunction(data,
mmin=mmin - 0.2,
nvols=nvolumes,
nbins=nbins - 2 * rd,
verbose=True)
#plot the simulation data
ax.plot(mfs['mass_bins'],
mfs['mf_stellar_mass'],
color=cols[ii],
label='$z \sim %i$: Bolshoi + SAM' % rd)
#set axes scales and labels
ax.set_xlim(mmin, 12.0)
ax.set_ylim(lowlim, -1.0)
ax.set_xlabel(r'$\log_{10} \left ( M_{\star} \ [\mathrm{M}_{\odot}] \right )$')
ax.set_ylabel(
r'$\log_{10} \left ( \frac{\mathrm{d}N}{\mathrm{d}\log_{10} M_{\star}} \right ) \quad [\mathrm{Mpc}^{-3}\ \mathrm{dex}^{-1}] $')
#set small ticks
m = ax.get_yticks()[1] - ax.get_yticks()[0]
yminorLocator = MultipleLocator(m / 5)
yminorFormattor = NullFormatter()
ax.yaxis.set_minor_locator(yminorLocator)
ax.yaxis.set_minor_formatter(yminorFormattor)
m = ax.get_xticks()[1] - ax.get_xticks()[0]
xminorLocator = MultipleLocator(m / 5)
xminorFormattor = NullFormatter()
ax.xaxis.set_minor_locator(xminorLocator)
ax.xaxis.set_minor_formatter(xminorFormattor)
P.legend(shadow=True, fancybox=True, numpoints=1)
P.savefig(output_folder + outfile + str(ii) + '.png')
P.close()
def main(redshifts, path, database, output_folder, outfile):
'''
Driver function, call this with a path to the data,
and label you wish to use for the files.
'''
#stellarmassfunc_plot(path, database, redshifts,
# output_folder, outfile)
stellarmassfuncSinglePlot(path, database, redshifts,
output_folder, outfile)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
#find the home directory, because the output is to dropbox
#and my user name is not always the same, this hack is required.
hm = os.getenv('HOME')
path1 = hm + '/Desktop/Research/run/newtree1/'
database = 'sams.db'
outpath = hm + '/Dropbox/Research/Bolshoi/stellarMFs/'
#redshift bins
redshifts1 = ['galpropz.zgal > 0.95 and galpropz.zgal < 1.05',
'galpropz.zgal > 1.95 and galpropz.zgal < 2.05',
'galpropz.zgal > 2.95 and galpropz.zgal < 3.05',
'galpropz.zgal > 3.95 and galpropz.zgal < 4.05',
'galpropz.zgal > 4.95 and galpropz.zgal < 5.05',
'galpropz.zgal > 5.95 and galpropz.zgal < 6.05',
'galpropz.zgal > 6.95 and galpropz.zgal < 7.05']
# redshifts2 = ['galpropz.zgal > 1.0 and galpropz.zgal <= 1.1',
# 'galpropz.zgal > 2.0 and galpropz.zgal <= 2.1',
# 'galpropz.zgal > 3.0 and galpropz.zgal <= 3.1',
# 'galpropz.zgal > 4.0 and galpropz.zgal <= 4.1',
# 'galpropz.zgal > 5.1 and galpropz.zgal <= 5.2',
# 'galpropz.zgal > 6.5 and galpropz.zgal <= 6.6',
# 'galpropz.zgal > 8.1 and galpropz.zgal <= 8.3']
logging.debug('Making the first plot')
main(redshifts1, path1, database, outpath, 'stellarmfNew1')
| |
# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
from six import moves
import testtools
from testtools import matchers
from neutron.common import exceptions as exc
from neutron.db import api as db
from neutron.plugins.ml2 import driver_api as api
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
HOST_ONE = 'fake_host_one'
HOST_TWO = 'fake_host_two'
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
class TunnelTypeTestMixin(object):
DRIVER_CLASS = None
TYPE = None
def setUp(self):
super(TunnelTypeTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
self.driver.tunnel_ranges = TUNNEL_RANGES
self.driver.sync_allocations()
self.session = db.get_session()
def test_tunnel_type(self):
self.assertEqual(self.TYPE, self.driver.get_type())
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
self.driver.validate_provider_segment(segment)
segment[api.SEGMENTATION_ID] = 1
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MIN - 1)))
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MIN)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MIN + 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MAX - 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MAX)).allocated)
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MAX + 1)))
self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES
self.driver.sync_allocations()
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MIN + 5 - 1)))
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MIN + 5)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session,
(TUN_MIN + 5 + 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session,
(TUN_MAX + 5 - 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.session, (TUN_MAX + 5)).allocated)
self.assertIsNone(
self.driver.get_allocation(self.session, (TUN_MAX + 5 + 1)))
def test_partial_segment_is_partial_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: None}
self.assertTrue(self.driver.is_partial_segment(segment))
def test_specific_segment_is_not_partial_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: 101}
self.assertFalse(self.driver.is_partial_segment(segment))
def test_reserve_provider_segment_full_specs(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: 101}
observed = self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.session, segment)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
observed = self.driver.reserve_provider_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.session, segment)
alloc = self.driver.get_allocation(self.session,
observed[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_reserve_provider_segment(self):
tunnel_ids = set()
specs = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: None}
for x in moves.xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.reserve_provider_segment(self.session,
specs)
self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE])
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
with testtools.ExpectedException(exc.NoNetworkAvailable):
segment = self.driver.reserve_provider_segment(self.session,
specs)
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.reserve_provider_segment(self.session, specs)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in moves.xrange(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.session)
self.assertIsNone(segment)
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.session, segment)
segment = self.driver.allocate_tenant_segment(self.session)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.session, segment)
class TunnelTypeMultiRangeTestMixin(object):
DRIVER_CLASS = None
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(TunnelTypeMultiRangeTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES
self.driver.sync_allocations()
self.session = db.get_session()
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.session)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.session, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_allocation(self.session, key)
self.assertFalse(alloc.allocated)
class TunnelRpcCallbackTestMixin(object):
DRIVER_CLASS = None
TYPE = None
def setUp(self):
super(TunnelRpcCallbackTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
def _test_tunnel_sync(self, kwargs, delete_tunnel=False):
with contextlib.nested(
mock.patch.object(self.notifier, 'tunnel_update'),
mock.patch.object(self.notifier, 'tunnel_delete')
) as (tunnel_update, tunnel_delete):
details = self.callbacks.tunnel_sync('fake_context', **kwargs)
tunnels = details['tunnels']
for tunnel in tunnels:
self.assertEqual(kwargs['tunnel_ip'], tunnel['ip_address'])
self.assertEqual(kwargs['host'], tunnel['host'])
self.assertTrue(tunnel_update.called)
if delete_tunnel:
self.assertTrue(tunnel_delete.called)
else:
self.assertFalse(tunnel_delete.called)
def _test_tunnel_sync_raises(self, kwargs):
with contextlib.nested(
mock.patch.object(self.notifier, 'tunnel_update'),
mock.patch.object(self.notifier, 'tunnel_delete')
) as (tunnel_update, tunnel_delete):
self.assertRaises(exc.InvalidInput,
self.callbacks.tunnel_sync,
'fake_context', **kwargs)
self.assertFalse(tunnel_update.called)
self.assertFalse(tunnel_delete.called)
def test_tunnel_sync_called_without_host_passed(self):
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': None}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_with_host_passed_for_existing_tunnel_ip(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, None)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_with_host_passed(self):
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_for_existing_endpoint(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_for_existing_host_with_tunnel_ip_changed(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
kwargs = {'tunnel_ip': TUNNEL_IP_TWO, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs, True)
def test_tunnel_sync_called_with_used_tunnel_ip_case_one(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_TWO}
self._test_tunnel_sync_raises(kwargs)
def test_tunnel_sync_called_with_used_tunnel_ip_case_two(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, None)
self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_TWO}
self._test_tunnel_sync_raises(kwargs)
def test_tunnel_sync_called_without_tunnel_ip(self):
kwargs = {'tunnel_type': self.TYPE, 'host': None}
self._test_tunnel_sync_raises(kwargs)
def test_tunnel_sync_called_without_tunnel_type(self):
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None}
self._test_tunnel_sync_raises(kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.