gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
"""Tests for the Awair sensor platform."""
from homeassistant.components.awair.const import (
API_CO2,
API_HUMID,
API_LUX,
API_PM10,
API_PM25,
API_SCORE,
API_SPL_A,
API_TEMP,
API_VOC,
ATTR_UNIQUE_ID,
DOMAIN,
SENSOR_TYPES,
)
from homeassistant.const import (
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
UNIT_PERCENTAGE,
)
from .const import (
AWAIR_UUID,
CONFIG,
DEVICES_FIXTURE,
GEN1_DATA_FIXTURE,
GEN2_DATA_FIXTURE,
GLOW_DATA_FIXTURE,
MINT_DATA_FIXTURE,
OFFLINE_FIXTURE,
OMNI_DATA_FIXTURE,
UNIQUE_ID,
USER_FIXTURE,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def setup_awair(hass, fixtures):
"""Add Awair devices to hass, using specified fixtures for data."""
entry = MockConfigEntry(domain=DOMAIN, unique_id=UNIQUE_ID, data=CONFIG)
with patch("python_awair.AwairClient.query", side_effect=fixtures):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
def assert_expected_properties(
hass, registry, name, unique_id, state_value, attributes
):
"""Assert expected properties from a dict."""
entry = registry.async_get(name)
assert entry.unique_id == unique_id
state = hass.states.get(name)
assert state
assert state.state == state_value
for attr, value in attributes.items():
assert state.attributes.get(attr) == value
async def test_awair_gen1_sensors(hass):
"""Test expected sensors on a 1st gen Awair."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GEN1_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"88",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_temperature",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_TEMP][ATTR_UNIQUE_ID]}",
"21.8",
{ATTR_UNIT_OF_MEASUREMENT: TEMP_CELSIUS, "awair_index": 1.0},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_humidity",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_HUMID][ATTR_UNIQUE_ID]}",
"41.59",
{ATTR_UNIT_OF_MEASUREMENT: UNIT_PERCENTAGE, "awair_index": 0.0},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_carbon_dioxide",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_CO2][ATTR_UNIQUE_ID]}",
"654.0",
{
ATTR_ICON: "mdi:cloud",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_PARTS_PER_MILLION,
"awair_index": 0.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_volatile_organic_compounds",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_VOC][ATTR_UNIQUE_ID]}",
"366",
{
ATTR_ICON: "mdi:cloud",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_PARTS_PER_BILLION,
"awair_index": 1.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm2_5",
# gen1 unique_id should be awair_12345-DUST, which matches old integration behavior
f"{AWAIR_UUID}_DUST",
"14.3",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 1.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm10",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_PM10][ATTR_UNIQUE_ID]}",
"14.3",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 1.0,
},
)
# We should not have a dust sensor; it's aliased as pm2.5
# and pm10 sensors.
assert hass.states.get("sensor.living_room_dust") is None
# We should not have sound or lux sensors.
assert hass.states.get("sensor.living_room_sound_level") is None
assert hass.states.get("sensor.living_room_illuminance") is None
async def test_awair_gen2_sensors(hass):
"""Test expected sensors on a 2nd gen Awair."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GEN2_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"97",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm2_5",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_PM25][ATTR_UNIQUE_ID]}",
"2.0",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 0.0,
},
)
# The Awair 2nd gen reports specifically a pm2.5 sensor,
# and so we don't alias anything. Make sure we didn't do that.
assert hass.states.get("sensor.living_room_pm10") is None
async def test_awair_mint_sensors(hass):
"""Test expected sensors on an Awair mint."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, MINT_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"98",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_pm2_5",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_PM25][ATTR_UNIQUE_ID]}",
"1.0",
{
ATTR_ICON: "mdi:blur",
ATTR_UNIT_OF_MEASUREMENT: CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
"awair_index": 0.0,
},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_illuminance",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_LUX][ATTR_UNIQUE_ID]}",
"441.7",
{ATTR_UNIT_OF_MEASUREMENT: "lx"},
)
# The Mint does not have a CO2 sensor.
assert hass.states.get("sensor.living_room_carbon_dioxide") is None
async def test_awair_glow_sensors(hass):
"""Test expected sensors on an Awair glow."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GLOW_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"93",
{ATTR_ICON: "mdi:blur"},
)
# The glow does not have a particle sensor
assert hass.states.get("sensor.living_room_pm2_5") is None
async def test_awair_omni_sensors(hass):
"""Test expected sensors on an Awair omni."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, OMNI_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"99",
{ATTR_ICON: "mdi:blur"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_sound_level",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SPL_A][ATTR_UNIQUE_ID]}",
"47.0",
{ATTR_ICON: "mdi:ear-hearing", ATTR_UNIT_OF_MEASUREMENT: "dBa"},
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_illuminance",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_LUX][ATTR_UNIQUE_ID]}",
"804.9",
{ATTR_UNIT_OF_MEASUREMENT: "lx"},
)
async def test_awair_offline(hass):
"""Test expected behavior when an Awair is offline."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, OFFLINE_FIXTURE]
await setup_awair(hass, fixtures)
# The expected behavior is that we won't have any sensors
# if the device is not online when we set it up. python_awair
# does not make any assumptions about what sensors a device
# might have - they are created dynamically.
# We check for the absence of the "awair score", which every
# device *should* have if it's online. If we don't see it,
# then we probably didn't set anything up. Which is correct,
# in this case.
assert hass.states.get("sensor.living_room_awair_score") is None
async def test_awair_unavailable(hass):
"""Test expected behavior when an Awair becomes offline later."""
fixtures = [USER_FIXTURE, DEVICES_FIXTURE, GEN1_DATA_FIXTURE]
await setup_awair(hass, fixtures)
registry = await hass.helpers.entity_registry.async_get_registry()
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
"88",
{ATTR_ICON: "mdi:blur"},
)
with patch("python_awair.AwairClient.query", side_effect=OFFLINE_FIXTURE):
await hass.helpers.entity_component.async_update_entity(
"sensor.living_room_awair_score"
)
assert_expected_properties(
hass,
registry,
"sensor.living_room_awair_score",
f"{AWAIR_UUID}_{SENSOR_TYPES[API_SCORE][ATTR_UNIQUE_ID]}",
STATE_UNAVAILABLE,
{ATTR_ICON: "mdi:blur"},
)
| |
from __future__ import unicode_literals
import datetime
import warnings
from django.contrib.admin.templatetags.admin_urls import add_preserved_filters
from django.contrib.admin.utils import (
display_for_field, display_for_value, label_for_field, lookup_field,
)
from django.contrib.admin.views.main import (
ALL_VAR, ORDER_VAR, PAGE_VAR, SEARCH_VAR,
)
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.template import Library
from django.template.loader import get_template
from django.templatetags.static import static
from django.urls import NoReverseMatch
from django.utils import formats
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl, i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return '... '
elif i == cl.page_num:
return format_html('<span class="this-page">{}</span> ', i + 1)
else:
return format_html('<a href="{}"{}>{}</a> ',
cl.get_query_string({PAGE_VAR: i}),
mark_safe(' class="end"' if i == cl.paginator.num_pages - 1 else ''),
i + 1)
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_ENDS))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(
field_name, cl.model,
model_admin=cl.model_admin,
return_attr=True
)
if attr:
field_name = _coerce_field_name(field_name, i)
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"class_attrib": format_html(' class="column-{}"', field_name),
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable', 'column-{}'.format(field_name)]
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = list(ordering_field_columns).index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
def make_qs_param(t, n):
return ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": format_html(' class="{}"', ' '.join(th_classes)) if th_classes else '',
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.svg' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return format_html('<img src="{}" alt="{}" />', icon_url, field_val)
def _coerce_field_name(field_name, field_index):
"""
Coerce a field_name (which may be a callable) to a string.
"""
if callable(field_name):
if field_name.__name__ == '<lambda>':
return 'lambda' + str(field_index)
else:
return field_name.__name__
return field_name
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if allow_tags:
warnings.warn(
"Deprecated allow_tags attribute used on field {}. "
"Use django.utils.safestring.format_html(), "
"format_html_join(), or mark_safe() instead.".format(field_name),
RemovedInDjango20Warning
)
result_repr = mark_safe(result_repr)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
if force_text(result_repr) == '':
result_repr = mark_safe(' ')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'th' if first else 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = format_html(
'<a href="{}"{}>{}</a>',
url,
format_html(
' data-popup-opener="{}"', value
) if cl.is_popup else '',
result_repr)
yield format_html('<{}{}>{}</{}>',
table_tag,
row_class,
link_or_text,
table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', force_text(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_text(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
field = cl.opts.get_field(field_name)
dates_or_datetimes = 'datetimes' if isinstance(field, models.DateTimeField) else 'dates'
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
def link(filters):
return cl.get_query_string(filters, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.queryset.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.queryset.filter(**{year_field: year_lookup, month_field: month_lookup})
days = getattr(days, dates_or_datetimes)(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.queryset.filter(**{year_field: year_lookup})
months = getattr(months, dates_or_datetimes)(field_name, 'month')
return {
'show': True,
'back': {
'link': link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = getattr(cl.queryset, dates_or_datetimes)(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render({
'title': spec.title,
'choices': list(spec.choices(cl)),
'spec': spec,
})
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.net.proto import ProtocolBuffer
import array
import dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.datastore.datastore_v3_pb import *
import google.appengine.datastore.datastore_v3_pb
from google.appengine.datastore.entity_pb import *
import google.appengine.datastore.entity_pb
class Request(ProtocolBuffer.ProtocolMessage):
has_service_name_ = 0
service_name_ = ""
has_method_ = 0
method_ = ""
has_request_ = 0
request_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def service_name(self): return self.service_name_
def set_service_name(self, x):
self.has_service_name_ = 1
self.service_name_ = x
def clear_service_name(self):
if self.has_service_name_:
self.has_service_name_ = 0
self.service_name_ = ""
def has_service_name(self): return self.has_service_name_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = ""
def has_method(self): return self.has_method_
def request(self): return self.request_
def set_request(self, x):
self.has_request_ = 1
self.request_ = x
def clear_request(self):
if self.has_request_:
self.has_request_ = 0
self.request_ = ""
def has_request(self): return self.has_request_
def MergeFrom(self, x):
assert x is not self
if (x.has_service_name()): self.set_service_name(x.service_name())
if (x.has_method()): self.set_method(x.method())
if (x.has_request()): self.set_request(x.request())
def Equals(self, x):
if x is self: return 1
if self.has_service_name_ != x.has_service_name_: return 0
if self.has_service_name_ and self.service_name_ != x.service_name_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_request_ != x.has_request_: return 0
if self.has_request_ and self.request_ != x.request_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_service_name_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: service_name not set.')
if (not self.has_method_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: method not set.')
if (not self.has_request_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: request not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.service_name_))
n += self.lengthString(len(self.method_))
n += self.lengthString(len(self.request_))
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_service_name_):
n += 1
n += self.lengthString(len(self.service_name_))
if (self.has_method_):
n += 1
n += self.lengthString(len(self.method_))
if (self.has_request_):
n += 1
n += self.lengthString(len(self.request_))
return n
def Clear(self):
self.clear_service_name()
self.clear_method()
self.clear_request()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
out.putVarInt32(26)
out.putPrefixedString(self.method_)
out.putVarInt32(34)
out.putPrefixedString(self.request_)
def OutputPartial(self, out):
if (self.has_service_name_):
out.putVarInt32(18)
out.putPrefixedString(self.service_name_)
if (self.has_method_):
out.putVarInt32(26)
out.putPrefixedString(self.method_)
if (self.has_request_):
out.putVarInt32(34)
out.putPrefixedString(self.request_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
self.set_service_name(d.getPrefixedString())
continue
if tt == 26:
self.set_method(d.getPrefixedString())
continue
if tt == 34:
self.set_request(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_service_name_: res+=prefix+("service_name: %s\n" % self.DebugFormatString(self.service_name_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
if self.has_request_: res+=prefix+("request: %s\n" % self.DebugFormatString(self.request_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kservice_name = 2
kmethod = 3
krequest = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "service_name",
3: "method",
4: "request",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Request'
class ApplicationError(ProtocolBuffer.ProtocolMessage):
has_code_ = 0
code_ = 0
has_detail_ = 0
detail_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def code(self): return self.code_
def set_code(self, x):
self.has_code_ = 1
self.code_ = x
def clear_code(self):
if self.has_code_:
self.has_code_ = 0
self.code_ = 0
def has_code(self): return self.has_code_
def detail(self): return self.detail_
def set_detail(self, x):
self.has_detail_ = 1
self.detail_ = x
def clear_detail(self):
if self.has_detail_:
self.has_detail_ = 0
self.detail_ = ""
def has_detail(self): return self.has_detail_
def MergeFrom(self, x):
assert x is not self
if (x.has_code()): self.set_code(x.code())
if (x.has_detail()): self.set_detail(x.detail())
def Equals(self, x):
if x is self: return 1
if self.has_code_ != x.has_code_: return 0
if self.has_code_ and self.code_ != x.code_: return 0
if self.has_detail_ != x.has_detail_: return 0
if self.has_detail_ and self.detail_ != x.detail_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_code_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: code not set.')
if (not self.has_detail_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: detail not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.code_)
n += self.lengthString(len(self.detail_))
return n + 2
def ByteSizePartial(self):
n = 0
if (self.has_code_):
n += 1
n += self.lengthVarInt64(self.code_)
if (self.has_detail_):
n += 1
n += self.lengthString(len(self.detail_))
return n
def Clear(self):
self.clear_code()
self.clear_detail()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt32(self.code_)
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def OutputPartial(self, out):
if (self.has_code_):
out.putVarInt32(8)
out.putVarInt32(self.code_)
if (self.has_detail_):
out.putVarInt32(18)
out.putPrefixedString(self.detail_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_code(d.getVarInt32())
continue
if tt == 18:
self.set_detail(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_code_: res+=prefix+("code: %s\n" % self.DebugFormatInt32(self.code_))
if self.has_detail_: res+=prefix+("detail: %s\n" % self.DebugFormatString(self.detail_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kcode = 1
kdetail = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "code",
2: "detail",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.ApplicationError'
class Response(ProtocolBuffer.ProtocolMessage):
has_response_ = 0
response_ = ""
has_exception_ = 0
exception_ = ""
has_application_error_ = 0
application_error_ = None
has_java_exception_ = 0
java_exception_ = ""
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def response(self): return self.response_
def set_response(self, x):
self.has_response_ = 1
self.response_ = x
def clear_response(self):
if self.has_response_:
self.has_response_ = 0
self.response_ = ""
def has_response(self): return self.has_response_
def exception(self): return self.exception_
def set_exception(self, x):
self.has_exception_ = 1
self.exception_ = x
def clear_exception(self):
if self.has_exception_:
self.has_exception_ = 0
self.exception_ = ""
def has_exception(self): return self.has_exception_
def application_error(self):
if self.application_error_ is None:
self.lazy_init_lock_.acquire()
try:
if self.application_error_ is None: self.application_error_ = ApplicationError()
finally:
self.lazy_init_lock_.release()
return self.application_error_
def mutable_application_error(self): self.has_application_error_ = 1; return self.application_error()
def clear_application_error(self):
if self.has_application_error_:
self.has_application_error_ = 0;
if self.application_error_ is not None: self.application_error_.Clear()
def has_application_error(self): return self.has_application_error_
def java_exception(self): return self.java_exception_
def set_java_exception(self, x):
self.has_java_exception_ = 1
self.java_exception_ = x
def clear_java_exception(self):
if self.has_java_exception_:
self.has_java_exception_ = 0
self.java_exception_ = ""
def has_java_exception(self): return self.has_java_exception_
def MergeFrom(self, x):
assert x is not self
if (x.has_response()): self.set_response(x.response())
if (x.has_exception()): self.set_exception(x.exception())
if (x.has_application_error()): self.mutable_application_error().MergeFrom(x.application_error())
if (x.has_java_exception()): self.set_java_exception(x.java_exception())
def Equals(self, x):
if x is self: return 1
if self.has_response_ != x.has_response_: return 0
if self.has_response_ and self.response_ != x.response_: return 0
if self.has_exception_ != x.has_exception_: return 0
if self.has_exception_ and self.exception_ != x.exception_: return 0
if self.has_application_error_ != x.has_application_error_: return 0
if self.has_application_error_ and self.application_error_ != x.application_error_: return 0
if self.has_java_exception_ != x.has_java_exception_: return 0
if self.has_java_exception_ and self.java_exception_ != x.java_exception_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (self.has_application_error_ and not self.application_error_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSize())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_response_): n += 1 + self.lengthString(len(self.response_))
if (self.has_exception_): n += 1 + self.lengthString(len(self.exception_))
if (self.has_application_error_): n += 1 + self.lengthString(self.application_error_.ByteSizePartial())
if (self.has_java_exception_): n += 1 + self.lengthString(len(self.java_exception_))
return n
def Clear(self):
self.clear_response()
self.clear_exception()
self.clear_application_error()
self.clear_java_exception()
def OutputUnchecked(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSize())
self.application_error_.OutputUnchecked(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
def OutputPartial(self, out):
if (self.has_response_):
out.putVarInt32(10)
out.putPrefixedString(self.response_)
if (self.has_exception_):
out.putVarInt32(18)
out.putPrefixedString(self.exception_)
if (self.has_application_error_):
out.putVarInt32(26)
out.putVarInt32(self.application_error_.ByteSizePartial())
self.application_error_.OutputPartial(out)
if (self.has_java_exception_):
out.putVarInt32(34)
out.putPrefixedString(self.java_exception_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_response(d.getPrefixedString())
continue
if tt == 18:
self.set_exception(d.getPrefixedString())
continue
if tt == 26:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_application_error().TryMerge(tmp)
continue
if tt == 34:
self.set_java_exception(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_response_: res+=prefix+("response: %s\n" % self.DebugFormatString(self.response_))
if self.has_exception_: res+=prefix+("exception: %s\n" % self.DebugFormatString(self.exception_))
if self.has_application_error_:
res+=prefix+"application_error <\n"
res+=self.application_error_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_java_exception_: res+=prefix+("java_exception: %s\n" % self.DebugFormatString(self.java_exception_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kresponse = 1
kexception = 2
kapplication_error = 3
kjava_exception = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "response",
2: "exception",
3: "application_error",
4: "java_exception",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.Response'
class TransactionRequest_Precondition(ProtocolBuffer.ProtocolMessage):
has_key_ = 0
has_hash_ = 0
hash_ = ""
def __init__(self, contents=None):
self.key_ = Reference()
if contents is not None: self.MergeFromString(contents)
def key(self): return self.key_
def mutable_key(self): self.has_key_ = 1; return self.key_
def clear_key(self):self.has_key_ = 0; self.key_.Clear()
def has_key(self): return self.has_key_
def hash(self): return self.hash_
def set_hash(self, x):
self.has_hash_ = 1
self.hash_ = x
def clear_hash(self):
if self.has_hash_:
self.has_hash_ = 0
self.hash_ = ""
def has_hash(self): return self.has_hash_
def MergeFrom(self, x):
assert x is not self
if (x.has_key()): self.mutable_key().MergeFrom(x.key())
if (x.has_hash()): self.set_hash(x.hash())
def Equals(self, x):
if x is self: return 1
if self.has_key_ != x.has_key_: return 0
if self.has_key_ and self.key_ != x.key_: return 0
if self.has_hash_ != x.has_hash_: return 0
if self.has_hash_ and self.hash_ != x.hash_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_key_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: key not set.')
elif not self.key_.IsInitialized(debug_strs): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(self.key_.ByteSize())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_key_):
n += 1
n += self.lengthString(self.key_.ByteSizePartial())
if (self.has_hash_): n += 1 + self.lengthString(len(self.hash_))
return n
def Clear(self):
self.clear_key()
self.clear_hash()
def OutputUnchecked(self, out):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSize())
self.key_.OutputUnchecked(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def OutputPartial(self, out):
if (self.has_key_):
out.putVarInt32(18)
out.putVarInt32(self.key_.ByteSizePartial())
self.key_.OutputPartial(out)
if (self.has_hash_):
out.putVarInt32(26)
out.putPrefixedString(self.hash_)
def TryMerge(self, d):
while 1:
tt = d.getVarInt32()
if tt == 12: break
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_key().TryMerge(tmp)
continue
if tt == 26:
self.set_hash(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_key_:
res+=prefix+"key <\n"
res+=self.key_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_hash_: res+=prefix+("hash: %s\n" % self.DebugFormatString(self.hash_))
return res
class TransactionRequest(ProtocolBuffer.ProtocolMessage):
has_puts_ = 0
puts_ = None
has_deletes_ = 0
deletes_ = None
def __init__(self, contents=None):
self.precondition_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def precondition_size(self): return len(self.precondition_)
def precondition_list(self): return self.precondition_
def precondition(self, i):
return self.precondition_[i]
def mutable_precondition(self, i):
return self.precondition_[i]
def add_precondition(self):
x = TransactionRequest_Precondition()
self.precondition_.append(x)
return x
def clear_precondition(self):
self.precondition_ = []
def puts(self):
if self.puts_ is None:
self.lazy_init_lock_.acquire()
try:
if self.puts_ is None: self.puts_ = PutRequest()
finally:
self.lazy_init_lock_.release()
return self.puts_
def mutable_puts(self): self.has_puts_ = 1; return self.puts()
def clear_puts(self):
if self.has_puts_:
self.has_puts_ = 0;
if self.puts_ is not None: self.puts_.Clear()
def has_puts(self): return self.has_puts_
def deletes(self):
if self.deletes_ is None:
self.lazy_init_lock_.acquire()
try:
if self.deletes_ is None: self.deletes_ = DeleteRequest()
finally:
self.lazy_init_lock_.release()
return self.deletes_
def mutable_deletes(self): self.has_deletes_ = 1; return self.deletes()
def clear_deletes(self):
if self.has_deletes_:
self.has_deletes_ = 0;
if self.deletes_ is not None: self.deletes_.Clear()
def has_deletes(self): return self.has_deletes_
def MergeFrom(self, x):
assert x is not self
for i in xrange(x.precondition_size()): self.add_precondition().CopyFrom(x.precondition(i))
if (x.has_puts()): self.mutable_puts().MergeFrom(x.puts())
if (x.has_deletes()): self.mutable_deletes().MergeFrom(x.deletes())
def Equals(self, x):
if x is self: return 1
if len(self.precondition_) != len(x.precondition_): return 0
for e1, e2 in zip(self.precondition_, x.precondition_):
if e1 != e2: return 0
if self.has_puts_ != x.has_puts_: return 0
if self.has_puts_ and self.puts_ != x.puts_: return 0
if self.has_deletes_ != x.has_deletes_: return 0
if self.has_deletes_ and self.deletes_ != x.deletes_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.precondition_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_puts_ and not self.puts_.IsInitialized(debug_strs)): initialized = 0
if (self.has_deletes_ and not self.deletes_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSize()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSize())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 2 * len(self.precondition_)
for i in xrange(len(self.precondition_)): n += self.precondition_[i].ByteSizePartial()
if (self.has_puts_): n += 1 + self.lengthString(self.puts_.ByteSizePartial())
if (self.has_deletes_): n += 1 + self.lengthString(self.deletes_.ByteSizePartial())
return n
def Clear(self):
self.clear_precondition()
self.clear_puts()
self.clear_deletes()
def OutputUnchecked(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputUnchecked(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSize())
self.puts_.OutputUnchecked(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSize())
self.deletes_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in xrange(len(self.precondition_)):
out.putVarInt32(11)
self.precondition_[i].OutputPartial(out)
out.putVarInt32(12)
if (self.has_puts_):
out.putVarInt32(34)
out.putVarInt32(self.puts_.ByteSizePartial())
self.puts_.OutputPartial(out)
if (self.has_deletes_):
out.putVarInt32(42)
out.putVarInt32(self.deletes_.ByteSizePartial())
self.deletes_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 11:
self.add_precondition().TryMerge(d)
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_puts().TryMerge(tmp)
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_deletes().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.precondition_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("Precondition%s {\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+"}\n"
cnt+=1
if self.has_puts_:
res+=prefix+"puts <\n"
res+=self.puts_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_deletes_:
res+=prefix+"deletes <\n"
res+=self.deletes_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in xrange(0, 1+maxtag)])
kPreconditionGroup = 1
kPreconditionkey = 2
kPreconditionhash = 3
kputs = 4
kdeletes = 5
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "Precondition",
2: "key",
3: "hash",
4: "puts",
5: "deletes",
}, 5)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STARTGROUP,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
}, 5, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.ext.remote_api.TransactionRequest'
if _extension_runtime:
pass
__all__ = ['Request','ApplicationError','Response','TransactionRequest','TransactionRequest_Precondition']
| |
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import sys
import os
import inspect
import copydir
import command
from paste.util.template import paste_script_template_renderer
class Template(object):
# Subclasses must define:
# _template_dir (or template_dir())
# summary
# Variables this template uses (mostly for documentation now)
# a list of instances of var()
vars = []
# Eggs that should be added as plugins:
egg_plugins = []
# Templates that must be applied first:
required_templates = []
# Use Cheetah for substituting templates:
use_cheetah = False
# If true, then read all the templates to find the variables:
read_vars_from_templates = False
# You can also give this function/method to use something other
# than Cheetah or string.Template. The function should be of the
# signature template_renderer(content, vars, filename=filename).
# Careful you don't turn this into a method by putting a function
# here (without staticmethod)!
template_renderer = None
def __init__(self, name):
self.name = name
self._read_vars = None
def module_dir(self):
"""Returns the module directory of this template."""
mod = sys.modules[self.__class__.__module__]
return os.path.dirname(mod.__file__)
def template_dir(self):
assert self._template_dir is not None, (
"Template %r didn't set _template_dir" % self)
if isinstance( self._template_dir, tuple):
return self._template_dir
else:
return os.path.join(self.module_dir(), self._template_dir)
def run(self, command, output_dir, vars):
self.pre(command, output_dir, vars)
self.write_files(command, output_dir, vars)
self.post(command, output_dir, vars)
def check_vars(self, vars, cmd):
expect_vars = self.read_vars(cmd)
if not expect_vars:
# Assume that variables aren't defined
return vars
converted_vars = {}
unused_vars = vars.copy()
errors = []
for var in expect_vars:
if var.name not in unused_vars:
if cmd.interactive:
prompt = 'Enter %s' % var.full_description()
response = cmd.challenge(prompt, var.default, var.should_echo)
converted_vars[var.name] = response
elif var.default is command.NoDefault:
errors.append('Required variable missing: %s'
% var.full_description())
else:
converted_vars[var.name] = var.default
else:
converted_vars[var.name] = unused_vars.pop(var.name)
if errors:
raise command.BadCommand(
'Errors in variables:\n%s' % '\n'.join(errors))
converted_vars.update(unused_vars)
vars.update(converted_vars)
return converted_vars
def read_vars(self, command=None):
if self._read_vars is not None:
return self._read_vars
assert (not self.read_vars_from_templates
or self.use_cheetah), (
"You can only read variables from templates if using Cheetah")
if not self.read_vars_from_templates:
self._read_vars = self.vars
return self.vars
vars = self.vars[:]
var_names = [var.name for var in self.vars]
read_vars = find_args_in_dir(
self.template_dir(),
verbose=command and command.verbose > 1).items()
read_vars.sort()
for var_name, var in read_vars:
if var_name not in var_names:
vars.append(var)
self._read_vars = vars
return vars
def write_files(self, command, output_dir, vars):
template_dir = self.template_dir()
if not os.path.exists(output_dir):
print "Creating directory %s" % output_dir
if not command.simulate:
# Don't let copydir create this top-level directory,
# since copydir will svn add it sometimes:
os.makedirs(output_dir)
copydir.copy_dir(template_dir, output_dir,
vars,
verbosity=command.verbose,
simulate=command.options.simulate,
interactive=command.interactive,
overwrite=command.options.overwrite,
indent=1,
use_cheetah=self.use_cheetah,
template_renderer=self.template_renderer)
def print_vars(self, indent=0):
vars = self.read_vars()
var.print_vars(vars)
def pre(self, command, output_dir, vars):
"""
Called before template is applied.
"""
pass
def post(self, command, output_dir, vars):
"""
Called after template is applied.
"""
pass
NoDefault = command.NoDefault
class var(object):
def __init__(self, name, description,
default='', should_echo=True):
self.name = name
self.description = description
self.default = default
self.should_echo = should_echo
def __repr__(self):
return '<%s %s default=%r should_echo=%s>' % (
self.__class__.__name__,
self.name, self.default, self.should_echo)
def full_description(self):
if self.description:
return '%s (%s)' % (self.name, self.description)
else:
return self.name
def print_vars(cls, vars, indent=0):
max_name = max([len(v.name) for v in vars])
for var in vars:
if var.description:
print '%s%s%s %s' % (
' '*indent,
var.name,
' '*(max_name-len(var.name)),
var.description)
else:
print ' %s' % var.name
if var.default is not command.NoDefault:
print ' default: %r' % var.default
if var.should_echo is True:
print ' should_echo: %s' % var.should_echo
print
print_vars = classmethod(print_vars)
class BasicPackage(Template):
_template_dir = 'paster-templates/basic_package'
summary = "A basic setuptools-enabled package"
vars = [
var('version', 'Version (like 0.1)'),
var('description', 'One-line description of the package'),
var('long_description', 'Multi-line description (in reST)'),
var('keywords', 'Space-separated keywords/tags'),
var('author', 'Author name'),
var('author_email', 'Author email'),
var('url', 'URL of homepage'),
var('license_name', 'License name'),
var('zip_safe', 'True/False: if the package can be distributed as a .zip file', default=False),
]
template_renderer = staticmethod(paste_script_template_renderer)
_skip_variables = ['VFN', 'currentTime', 'self', 'VFFSL', 'dummyTrans',
'getmtime', 'trans']
def find_args_in_template(template):
if isinstance(template, (str, unicode)):
# Treat as filename:
import Cheetah.Template
template = Cheetah.Template.Template(file=template)
if not hasattr(template, 'body'):
# Don't know...
return None
method = template.body
args, varargs, varkw, defaults = inspect.getargspec(method)
defaults=list(defaults or [])
vars = []
while args:
if len(args) == len(defaults):
default = defaults.pop(0)
else:
default = command.NoDefault
arg = args.pop(0)
if arg in _skip_variables:
continue
# @@: No way to get description yet
vars.append(
var(arg, description=None,
default=default))
return vars
def find_args_in_dir(dir, verbose=False):
all_vars = {}
for fn in os.listdir(dir):
if fn.startswith('.') or fn == 'CVS' or fn == '_darcs':
continue
full = os.path.join(dir, fn)
if os.path.isdir(full):
inner_vars = find_args_in_dir(full)
elif full.endswith('_tmpl'):
inner_vars = {}
found = find_args_in_template(full)
if found is None:
# Couldn't read variables
if verbose:
print 'Template %s has no parseable variables' % full
continue
for var in found:
inner_vars[var.name] = var
else:
# Not a template, don't read it
continue
if verbose:
print 'Found variable(s) %s in Template %s' % (
', '.join(inner_vars.keys()), full)
for var_name, var in inner_vars.items():
# Easy case:
if var_name not in all_vars:
all_vars[var_name] = var
continue
# Emit warnings if the variables don't match well:
cur_var = all_vars[var_name]
if not cur_var.description:
cur_var.description = var.description
elif (cur_var.description and var.description
and var.description != cur_var.description):
print >> sys.stderr, (
"Variable descriptions do not match: %s: %s and %s"
% (var_name, cur_var.description, var.description))
if (cur_var.default is not command.NoDefault
and var.default is not command.NoDefault
and cur_var.default != var.default):
print >> sys.stderr, (
"Variable defaults do not match: %s: %r and %r"
% (var_name, cur_var.default, var.default))
return all_vars
| |
#!/usr/bin/env python
"""html2text: Turn HTML into equivalent Markdown-structured text."""
__version__ = "2.292"
__author__ = "Aaron Swartz (me@aaronsw.com)"
__copyright__ = "(C) 2004-2008 Aaron Swartz. GNU GPL 3."
__contributors__ = ["Martin 'Joey' Schulze", "Ricardo Reyes"]
# TODO:
# Support decoded entities with unifiable.
# Relative URL resolution
if not hasattr(__builtins__, 'True'): True, False = 1, 0
import re, sys, urllib, htmlentitydefs, codecs, StringIO, types
import sgmllib
sgmllib.charref = re.compile('&#([xX]?[0-9a-fA-F]+)[^0-9a-fA-F]')
try: from textwrap import wrap
except: pass
# Use Unicode characters instead of their ascii psuedo-replacements
UNICODE_SNOB = 0
# Put the links after each paragraph instead of at the end.
LINKS_EACH_PARAGRAPH = 0
# Wrap long lines at position. 0 for no wrapping. (Requires Python 2.3.)
BODY_WIDTH = 78
# Don't show internal links (href="#local-anchor") -- corresponding link targets
# won't be visible in the plain text file anyway.
SKIP_INTERNAL_LINKS = False
### Entity Nonsense ###
def name2cp(k):
if k == 'apos': return ord("'")
if hasattr(htmlentitydefs, "name2codepoint"): # requires Python 2.3
return htmlentitydefs.name2codepoint[k]
else:
k = htmlentitydefs.entitydefs[k]
if k.startswith("&#") and k.endswith(";"): return int(k[2:-1]) # not in latin-1
return ord(codecs.latin_1_decode(k)[0])
unifiable = {'rsquo':"'", 'lsquo':"'", 'rdquo':'"', 'ldquo':'"',
'copy':'(C)', 'mdash':'--', 'nbsp':' ', 'rarr':'->', 'larr':'<-', 'middot':'*',
'ndash':'-', 'oelig':'oe', 'aelig':'ae',
'agrave':'a', 'aacute':'a', 'acirc':'a', 'atilde':'a', 'auml':'a', 'aring':'a',
'egrave':'e', 'eacute':'e', 'ecirc':'e', 'euml':'e',
'igrave':'i', 'iacute':'i', 'icirc':'i', 'iuml':'i',
'ograve':'o', 'oacute':'o', 'ocirc':'o', 'otilde':'o', 'ouml':'o',
'ugrave':'u', 'uacute':'u', 'ucirc':'u', 'uuml':'u'}
unifiable_n = {}
for k in unifiable.keys():
unifiable_n[name2cp(k)] = unifiable[k]
def charref(name):
if name[0] in ['x','X']:
c = int(name[1:], 16)
else:
c = int(name)
if not UNICODE_SNOB and c in unifiable_n.keys():
return unifiable_n[c]
else:
return unichr(c)
def entityref(c):
if not UNICODE_SNOB and c in unifiable.keys():
return unifiable[c]
else:
try: name2cp(c)
except KeyError: return "&" + c
else: return unichr(name2cp(c))
def replaceEntities(s):
s = s.group(1)
if s[0] == "#":
return charref(s[1:])
else: return entityref(s)
r_unescape = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape(s):
return r_unescape.sub(replaceEntities, s)
def fixattrs(attrs):
# Fix bug in sgmllib.py
if not attrs: return attrs
newattrs = []
for attr in attrs:
newattrs.append((attr[0], unescape(attr[1])))
return newattrs
### End Entity Nonsense ###
def onlywhite(line):
"""Return true if the line does only consist of whitespace characters."""
for c in line:
if c is not ' ' and c is not ' ':
return c is ' '
return line
def optwrap(text):
"""Wrap all paragraphs in the provided text."""
if not BODY_WIDTH:
return text
assert wrap # Requires Python 2.3.
result = ''
newlines = 0
for para in text.split("\n"):
if len(para) > 0:
if para[0] is not ' ' and para[0] is not '-' and para[0] is not '*':
for line in wrap(para, BODY_WIDTH):
result += line + "\n"
result += "\n"
newlines = 2
else:
if not onlywhite(para):
result += para + "\n"
newlines = 1
else:
if newlines < 2:
result += "\n"
newlines += 1
return result
def hn(tag):
if tag[0] == 'h' and len(tag) == 2:
try:
n = int(tag[1])
if n in range(1, 10): return n
except ValueError: return 0
class _html2text(sgmllib.SGMLParser):
def __init__(self, out=sys.stdout.write):
sgmllib.SGMLParser.__init__(self)
if out is None: self.out = self.outtextf
else: self.out = out
self.outtext = u''
self.quiet = 0
self.p_p = 0
self.outcount = 0
self.start = 1
self.space = 0
self.a = []
self.astack = []
self.acount = 0
self.list = []
self.blockquote = 0
self.pre = 0
self.startpre = 0
self.lastWasNL = 0
def outtextf(self, s):
if type(s) is type(''): s = codecs.utf_8_decode(s)[0]
self.outtext += s
def close(self):
sgmllib.SGMLParser.close(self)
self.pbr()
self.o('', 0, 'end')
return self.outtext
def handle_charref(self, c):
self.o(charref(c))
def handle_entityref(self, c):
self.o(entityref(c))
def unknown_starttag(self, tag, attrs):
self.handle_tag(tag, attrs, 1)
def unknown_endtag(self, tag):
self.handle_tag(tag, None, 0)
def previousIndex(self, attrs):
""" returns the index of certain set of attributes (of a link) in the
self.a list
If the set of attributes is not found, returns None
"""
if not attrs.has_key('href'): return None
i = -1
for a in self.a:
i += 1
match = 0
if a.has_key('href') and a['href'] == attrs['href']:
if a.has_key('title') or attrs.has_key('title'):
if (a.has_key('title') and attrs.has_key('title') and
a['title'] == attrs['title']):
match = True
else:
match = True
if match: return i
def handle_tag(self, tag, attrs, start):
attrs = fixattrs(attrs)
if hn(tag):
self.p()
if start: self.o(hn(tag)*"#" + ' ')
if tag in ['p', 'div']: self.p()
if tag == "br" and start: self.o(" \n")
if tag == "hr" and start:
self.p()
self.o("* * *")
self.p()
if tag in ["head", "style", 'script']:
if start: self.quiet += 1
else: self.quiet -= 1
if tag in ["body"]:
self.quiet = 0 # sites like 9rules.com never close <head>
if tag == "blockquote":
if start:
self.p(); self.o('> ', 0, 1); self.start = 1
self.blockquote += 1
else:
self.blockquote -= 1
self.p()
if tag in ['em', 'i', 'u']: self.o("_")
if tag in ['strong', 'b']: self.o("**")
if tag == "code" and not self.pre: self.o('`') #TODO: `` `this` ``
if tag == "a":
if start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
if attrs.has_key('href') and not (SKIP_INTERNAL_LINKS and attrs['href'].startswith('#')):
self.astack.append(attrs)
self.o("[")
else:
self.astack.append(None)
else:
if self.astack:
a = self.astack.pop()
if a:
i = self.previousIndex(a)
if i is not None:
a = self.a[i]
else:
self.acount += 1
a['count'] = self.acount
a['outcount'] = self.outcount
self.a.append(a)
self.o("][" + `a['count']` + "]")
if tag == "img" and start:
attrsD = {}
for (x, y) in attrs: attrsD[x] = y
attrs = attrsD
if attrs.has_key('src'):
attrs['href'] = attrs['src']
alt = attrs.get('alt', '')
i = self.previousIndex(attrs)
if i is not None:
attrs = self.a[i]
else:
self.acount += 1
attrs['count'] = self.acount
attrs['outcount'] = self.outcount
self.a.append(attrs)
self.o("![")
self.o(alt)
self.o("]["+`attrs['count']`+"]")
if tag == 'dl' and start: self.p()
if tag == 'dt' and not start: self.pbr()
if tag == 'dd' and start: self.o(' ')
if tag == 'dd' and not start: self.pbr()
if tag in ["ol", "ul"]:
if start:
self.list.append({'name':tag, 'num':0})
else:
if self.list: self.list.pop()
self.p()
if tag == 'li':
if start:
self.pbr()
if self.list: li = self.list[-1]
else: li = {'name':'ul', 'num':0}
self.o(" "*len(self.list)) #TODO: line up <ol><li>s > 9 correctly.
if li['name'] == "ul": self.o("* ")
elif li['name'] == "ol":
li['num'] += 1
self.o(`li['num']`+". ")
self.start = 1
else:
self.pbr()
if tag in ["table", "tr"] and start: self.p()
if tag == 'td': self.pbr()
if tag == "pre":
if start:
self.startpre = 1
self.pre = 1
else:
self.pre = 0
self.p()
def pbr(self):
if self.p_p == 0: self.p_p = 1
def p(self): self.p_p = 2
def o(self, data, puredata=0, force=0):
if not self.quiet:
if puredata and not self.pre:
data = re.sub('\s+', ' ', data)
if data and data[0] == ' ':
self.space = 1
data = data[1:]
if not data and not force: return
if self.startpre:
#self.out(" :") #TODO: not output when already one there
self.startpre = 0
bq = (">" * self.blockquote)
if not (force and data and data[0] == ">") and self.blockquote: bq += " "
if self.pre:
bq += " "
data = data.replace("\n", "\n"+bq)
if self.start:
self.space = 0
self.p_p = 0
self.start = 0
if force == 'end':
# It's the end.
self.p_p = 0
self.out("\n")
self.space = 0
if self.p_p:
self.out(('\n'+bq)*self.p_p)
self.space = 0
if self.space:
if not self.lastWasNL: self.out(' ')
self.space = 0
if self.a and ((self.p_p == 2 and LINKS_EACH_PARAGRAPH) or force == "end"):
if force == "end": self.out("\n")
newa = []
for link in self.a:
if self.outcount > link['outcount']:
self.out(" ["+`link['count']`+"]: " + link['href']) #TODO: base href
if link.has_key('title'): self.out(" ("+link['title']+")")
self.out("\n")
else:
newa.append(link)
if self.a != newa: self.out("\n") # Don't need an extra line when nothing was done.
self.a = newa
self.p_p = 0
self.out(data)
self.lastWasNL = data and data[-1] == '\n'
self.outcount += 1
def handle_data(self, data):
self.o(data, 1)
def unknown_decl(self, data): pass
def wrapwrite(text): sys.stdout.write(text.encode('utf8'))
def html2text_file(html, out=wrapwrite):
h = _html2text(out)
h.feed(html)
h.feed("")
return h.close()
def html2text(html):
return optwrap(html2text_file(html, None))
if __name__ == "__main__":
if sys.argv[1:]:
arg = sys.argv[1]
if arg.startswith('http://'):
j = urllib.urlopen(arg)
try:
from feedparser import _getCharacterEncoding as enc
except ImportError:
enc = lambda x, y: ('utf-8', 1)
text = j.read()
encoding = enc(j.headers, text)[0]
if encoding == 'us-ascii': encoding = 'utf-8'
data = text.decode(encoding)
else:
data = open(arg, 'r').read()
else:
data = sys.stdin.read()
wrapwrite(html2text(data))
| |
"""
Start playing wtih some of the parts of
https://github.com/hughperkins/neonCl-underconstruction/blob/master/winograd_kernels_cl.py
"""
from __future__ import print_function, division
import time
import string
import random
import jinja2
import argparse
import numpy as np
import pyopencl as cl
import subprocess
import os
from os.path import join
from gpuexperiments.callkernel import call_cl_kernel
#import gpuexperiments.cpu_check
from gpuexperiments.timecheck import inittime, timecheck
import lib_clgpuexp
from lib_clgpuexp import clearComputeCache, getPtx, timeKernel3d, buildKernel, initClGpu
from lib_clgpuexp import dumpSass
parser = argparse.ArgumentParser()
parser.add_argument('--printptx', type=bool, default=False)
args = parser.parse_args()
initClGpu()
times = []
compute_units = lib_clgpuexp.device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
maxShared = lib_clgpuexp.device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
compute_capability = (
lib_clgpuexp.device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
lib_clgpuexp.device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
deviceName = lib_clgpuexp.device.get_info(cl.device_info.NAME)
deviceSimpleName = deviceName.replace('GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', deviceName, 'compute capability', compute_capability)
print('compute units', compute_units, 'max shared memory', maxShared)
shared_memory_per_sm = None
# data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
if compute_capability[0] == 5:
if compute_capability[1] == 0:
shared_memory_per_sm = 65536
elif compute_capability[1] == 2:
shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert shared_memory_per_sm is not None
code_template = r"""
kernel void {{kernelname}}(
global float *data, global float * M,
int tiles, int GN, int GK
) {
int tid = get_local_id(0); // ci % 32
int tid1 = get_local_id(1); // n % 32
int linearid = (tid1 << 5) + tid;
int b = get_group_id(0);
int tiles266 = tiles * tiles * 6 * 6;
int b36 = 36 * b;
for(int gn = 0; gn < GN; gn++) {
int gn32 = gn << 5;
int gn32offset = (gn << 5) * GK * 32 * tiles266;
for(int gk = 0; gk < GK; gk++) {
int gk32 = gk << 5;
int offset = gn32offset +
(gk32) * tiles266 +
b36
;
int n_stride = GK * 32 * tiles266;
int co_stride = tiles266;
offset += tid1 * n_stride + tid * co_stride;
float sum0 = 0.0f;
float sum1 = 0.0f;
float sum2 = 0.0f;
float sum3 = 0.0f;
for(int xinu = 0; xinu < 36; xinu+=4) {
M[offset + xinu] = sum0;
M[offset + xinu + 1] = sum1;
M[offset + xinu + 2] = sum2;
M[offset + xinu + 3] = sum3;
}
}
}
}
"""
code_template2 = r"""
kernel void {{kernelname}}(
global float *data, global float * M,
int tiles, int GN, int GK
) {
int tid = get_local_id(0); // ci % 32
int tid1 = get_local_id(1); // n % 32
int b = get_group_id(0);
//int tiles266 = tiles * tiles * 6 * 6;
//int b36 = 36 * b;
//int block_offset = (get_group_id(0) * 36) << 10;
int tilessq = tiles * tiles;
for(int gn = 0; gn < GN; gn++) {
for(int gk = 0; gk < GK; gk++) {
int offset = ((((gn * GK + gk) * tilessq + b) * 36) << 10)
+ (tid << 5) + tid1;
float sum0 = 0.0f;
float sum1 = 0.0f;
float sum2 = 0.0f;
float sum3 = 0.0f;
for(int xinush10 = 0; xinush10 < (36 << 10); xinush10+=(4<<10)) {
M[offset + xinush10] = sum0;
M[offset + xinush10 + (1<<10)] = sum1;
M[offset + xinush10 + (2<<10)] = sum2;
M[offset + xinush10 + (3<<10)] = sum3;
}
}
}
}
"""
blocksize = 32
K = 32
GK = 1
N = 32
batchsize = 32
GN = 1
H = 56
W = 56
tiles = H // 4
S = 32
experiments = [
{'name': 'template1', 'code': code_template, 'block': (blocksize, blocksize, 1), 'outs': 1},
{'name': 'template1', 'code': code_template, 'block': (blocksize, blocksize, 1), 'outs': 1},
{'name': 'template2', 'code': code_template2, 'block': (blocksize, blocksize, 1), 'outs': 1}
]
times = []
full_occupancy_bsm = 32 # this should probably not be hard coded...
if args.printptx:
clearComputeCache()
for experiment in experiments:
#batchsize = 1
#while batchsize <= 1024:
name = experiment['name'].format(batchsize=batchsize)
template = jinja2.Template(experiment['code'], undefined=jinja2.StrictUndefined)
source = template.render(kernelname=name, BLOCK_SIZE=blocksize, **experiment)
# print('source', source)
kernel = buildKernel(name, source)
print('tiles', tiles)
print('tiles * tiles', tiles * tiles)
grid = (tiles * tiles, 1, 1)
block = experiment['block']
for it in range(2):
t = timeKernel3d(name, kernel, grid=grid, block=block, add_args=[
tiles, GN, GK
])
t_sum = 0
its = 1
# its *= (1024 // batchsize)
for it in range(its):
t_sum += timeKernel3d(name, kernel, grid=grid, block=block, add_args=[
tiles, GN, GK
])
t = t_sum / its
gib = grid[0] * grid[1] * block[0] * block[1] * GN * GK * 36 * 4 / 1024 / 1024 / 1024
bw_gib = gib / (t/1000)
# ops = S * S * S * 2
#ops = S * S * S * 2 * batchsize
#gflops = ops / (t/1000) / 1000 / 1000 / 1000
# gflops = -1
# print(getPtx(name))
# dumpSass(name)
times.append({'name': name, 'time': t, 'bw gib': bw_gib})
f = open('/tmp/winograd_calcm_output_%s.tsv' % deviceSimpleName, 'w')
print('')
line = 'name\ttime\tbw gib'
print(line)
f.write(line + '\n')
for timeinfo in times:
line = '%s\t%.1f\t%.1f' % (timeinfo['name'], timeinfo['time'], timeinfo['bw gib'])
print(line)
f.write(line + '\n')
f.close()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Testing topi conv2d operator for VTA"""
import json
import os
import numpy as np
from collections import namedtuple
import tvm
from tvm import autotvm
from tvm.contrib import util
from tvm.contrib.pickle_memoize import memoize
import topi
import topi.testing
import vta
from vta import program_fpga, reconfig_runtime
import vta.testing
from vta.testing import simulator
Workload = namedtuple("Conv2DWorkload",
['batch', 'height', 'width', 'in_filter', 'out_filter',
'hkernel', 'wkernel', 'hpad', 'wpad', 'hstride', 'wstride'])
# Get batch info from env
env = vta.get_env()
# ResNet18 workloads
resnet_wkls = [
# Workloads of resnet18 on imagenet
# ('resnet-18.C1', Workload(env.BATCH, 224, 224, 3, 64, 7, 7, 3, 3, 2, 2)),
('resnet-18.C2', Workload(env.BATCH, 56, 56, 64, 64, 3, 3, 1, 1, 1, 1)),
('resnet-18.C3', Workload(env.BATCH, 56, 56, 64, 128, 3, 3, 1, 1, 2, 2)),
('resnet-18.C4', Workload(env.BATCH, 56, 56, 64, 128, 1, 1, 0, 0, 2, 2)),
('resnet-18.C5', Workload(env.BATCH, 28, 28, 128, 128, 3, 3, 1, 1, 1, 1)),
('resnet-18.C6', Workload(env.BATCH, 28, 28, 128, 256, 3, 3, 1, 1, 2, 2)),
('resnet-18.C7', Workload(env.BATCH, 28, 28, 128, 256, 1, 1, 0, 0, 2, 2)),
('resnet-18.C8', Workload(env.BATCH, 14, 14, 256, 256, 3, 3, 1, 1, 1, 1)),
('resnet-18.C9', Workload(env.BATCH, 14, 14, 256, 512, 3, 3, 1, 1, 2, 2)),
('resnet-18.C10', Workload(env.BATCH, 14, 14, 256, 512, 1, 1, 0, 0, 2, 2)),
('resnet-18.C11', Workload(env.BATCH, 7, 7, 512, 512, 3, 3, 1, 1, 1, 1)),
]
# FIXME: we need a custom clip operator to circumvent a pattern detection limitation
@tvm.tag_scope(tag=topi.tag.ELEMWISE)
def my_clip(x, a_min, a_max):
"""Unlike topi's current clip, put min and max into two stages."""
const_min = tvm.const(a_min, x.dtype)
const_max = tvm.const(a_max, x.dtype)
x = tvm.compute(x.shape, lambda *i: tvm.min(x(*i), const_max), name="clipA")
x = tvm.compute(x.shape, lambda *i: tvm.max(x(*i), const_min), name="clipB")
return x
def run_conv2d(env, remote, wl, target,
check_correctness=True, print_ir=False,
samples=4):
# Workload assertions
assert wl.hpad == wl.wpad
# Perform packing only if we are targeting the accelerator
if "arm_cpu" in target.keys:
data_pack = False
layout = "NCHW"
elif "vta" in target.keys:
data_pack = True
layout = "NCHW%dn%dc" % (env.BATCH, env.BLOCK_IN)
# Derive shapes depending upon packing
a_shape = (wl.batch, wl.in_filter, wl.height, wl.width)
w_shape = (wl.out_filter, wl.in_filter, wl.hkernel, wl.wkernel)
b_shape = (wl.batch, wl.out_filter, 1, 1)
if data_pack:
data_shape = (wl.batch//env.BATCH, wl.in_filter//env.BLOCK_IN,
wl.height, wl.width, env.BATCH, env.BLOCK_IN)
kernel_shape = (wl.out_filter//env.BLOCK_OUT, wl.in_filter//env.BLOCK_IN,
wl.hkernel, wl.wkernel, env.BLOCK_OUT, env.BLOCK_IN)
bias_shape = (wl.batch//env.BATCH, wl.out_filter//env.BLOCK_OUT,
1, 1, env.BATCH, env.BLOCK_OUT)
else:
data_shape = a_shape
kernel_shape = w_shape
bias_shape = b_shape
data = tvm.placeholder(data_shape, name="data", dtype=env.inp_dtype)
kernel = tvm.placeholder(kernel_shape, name="kernel", dtype=env.wgt_dtype)
bias = tvm.placeholder(bias_shape, name="bias", dtype=env.acc_dtype)
# Define base computation schedule
with target:
res = topi.nn.conv2d(
data, kernel, (wl.hstride, wl.wstride), (wl.hpad, wl.wpad), (1, 1),
layout, env.acc_dtype)
res = topi.right_shift(res, 8)
res = topi.add(res, bias)
res = my_clip(res, 0, (1 << env.OUT_WIDTH - 1) - 1)
res = topi.cast(res, env.out_dtype)
# Derive base schedule
s = topi.generic.schedule_conv2d_nchw([res])
if print_ir:
print(vta.lower(s, [data, kernel, bias, res], simple_mode=True))
# Derive number of ops
fout_height = (wl.height + 2 * wl.hpad - wl.hkernel) // wl.hstride + 1
fout_width = (wl.width + 2 * wl.wpad - wl.wkernel) // wl.wstride + 1
num_ops = 2 * wl.batch * fout_height * fout_width * wl.hkernel * wl.wkernel * wl.out_filter * wl.in_filter
# @memoize("vta.tests.test_benchmark_topi.conv2d.verify_nhwc")
def get_ref_data():
# derive min max for act, wgt, and bias types (max non inclusive)
a_min, a_max = 0 - (1 << (env.INP_WIDTH - 1)), (1 << (env.INP_WIDTH - 1))
w_min, w_max = 0 - (1 << (env.WGT_WIDTH - 1)), (1 << (env.WGT_WIDTH - 1))
b_min, b_max = 0 - 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2), 1 << (env.INP_WIDTH + env.WGT_WIDTH - 2)
a_np = np.random.randint(a_min, a_max, size=a_shape).astype(data.dtype)
w_np = np.random.randint(w_min, w_max, size=w_shape).astype(kernel.dtype)
b_np = np.random.randint(b_min, b_max, size=b_shape).astype(env.acc_dtype)
r_np = topi.testing.conv2d_nchw_python(
a_np.astype(env.acc_dtype), w_np.astype(env.acc_dtype), (wl.hstride, wl.wstride), wl.hpad).astype(env.acc_dtype)
return a_np, w_np, b_np, r_np
# Data in original format
data_np, kernel_np, bias_np, res_ref = get_ref_data()
if data_pack:
data_np = data_np.reshape(
wl.batch//env.BATCH, env.BATCH,
wl.in_filter//env.BLOCK_IN, env.BLOCK_IN,
wl.height, wl.width).transpose((0, 2, 4, 5, 1, 3))
kernel_np = kernel_np.reshape(
wl.out_filter//env.BLOCK_OUT, env.BLOCK_OUT,
wl.in_filter//env.BLOCK_IN, env.BLOCK_IN,
wl.hkernel, wl.wkernel).transpose((0, 2, 4, 5, 1, 3))
bias_np = bias_np.reshape(
wl.batch//env.BATCH, wl.out_filter//env.BLOCK_OUT,
1, 1, env.BATCH, env.BLOCK_OUT)
# Build
if "vta" in target.keys:
mod = vta.build(s, [data, kernel, bias, res],
target=target,
target_host=env.target_host,
name="conv2d")
else:
mod = tvm.build(s, [data, kernel, bias, res],
target=target,
target_host=env.target_host,
name="conv2d")
temp = util.tempdir()
mod.save(temp.relpath("conv2d.o"))
remote.upload(temp.relpath("conv2d.o"))
f = remote.load_module("conv2d.o")
ctx = remote.context(str(target))
res_np = np.zeros(topi.util.get_const_tuple(res.shape)).astype(res.dtype)
data_arr = tvm.nd.array(data_np, ctx)
kernel_arr = tvm.nd.array(kernel_np, ctx)
bias_arr = tvm.nd.array(bias_np, ctx)
res_arr = tvm.nd.array(res_np, ctx)
time_f = f.time_evaluator("conv2d", ctx, number=samples)
# In vta sim mode, collect simulator runtime statistics
stats = {}
cost = None
if env.TARGET in ["sim", "tsim"]:
# Check if we're in local RPC mode (allows us to rebuild the
# runtime on the fly when varying the VTA designs)
local_rpc = int(os.environ.get("VTA_LOCAL_SIM_RPC", "0"))
if local_rpc:
if env.TARGET == "sim":
remote.get_function("vta.simulator.profiler_clear")()
else:
remote.get_function("vta.tsim.profiler_clear")()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
if env.TARGET == "sim":
stats = json.loads(remote.get_function("vta.simulator.profiler_status")())
else:
stats = json.loads(remote.get_function("vta.tsim.profiler_status")())
else:
simulator.clear_stats()
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
stats = simulator.stats()
else:
cost = time_f(data_arr, kernel_arr, bias_arr, res_arr)
# Check correctness
correct = False
if check_correctness:
res_orig = res_arr.asnumpy()
if data_pack:
res_orig = res_orig.transpose(
(0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, fout_height, fout_width)
bias_np = bias_np.transpose(
(0, 4, 1, 5, 2, 3)).reshape(wl.batch, wl.out_filter, 1, 1)
res_ref = res_ref >> env.WGT_WIDTH
res_ref += bias_np
res_ref = np.clip(res_ref, 0, (1 << env.OUT_WIDTH - 1) - 1)
res_ref = res_ref.astype(env.out_dtype)
correct = np.allclose(res_orig, res_ref)
gops = (num_ops / cost.mean) / float(10 ** 9)
status = "PASSED" if correct else "FAILED"
if "arm_cpu" in target.keys:
device = "CPU"
elif "vta" in target.keys:
device = "VTA"
print("%s CONV2D TEST %s: Time cost = %g sec/op, %g GOPS" % (device, status, cost.mean, gops))
return correct, cost, stats
def test_conv2d(device="vta"):
def _run(env, remote):
if device == "vta":
target = env.target
if env.TARGET not in ["sim", "tsim"]:
assert tvm.module.enabled("rpc")
program_fpga(remote, bitstream=None)
reconfig_runtime(remote)
elif device == "arm_cpu":
target = env.target_vta_cpu
with autotvm.tophub.context(target): # load pre-tuned schedule parameters
for _, wl in resnet_wkls:
print(wl)
run_conv2d(env, remote, wl, target)
vta.testing.run(_run)
if __name__ == "__main__":
test_conv2d(device="arm_cpu")
test_conv2d(device="vta")
| |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import os
import zipfile
from abc import abstractmethod
from contextlib import contextmanager
from pex.common import atomic_directory, is_python_script, open_zip, safe_copy, safe_mkdir
from pex.enum import Enum
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING
from pex.variables import unzip_dir
if TYPE_CHECKING:
from typing import Iterator, Optional
BOOTSTRAP_DIR = ".bootstrap"
DEPS_DIR = ".deps"
PEX_INFO_PATH = "PEX-INFO"
class Layout(Enum["Layout.Value"]):
class Value(Enum.Value):
pass
ZIPAPP = Value("zipapp")
PACKED = Value("packed")
LOOSE = Value("loose")
class _Layout(object):
def __init__(self, path):
# type: (str) -> None
self._path = os.path.normpath(path)
@property
def path(self):
# type: () -> str
return self._path
def bootstrap_strip_prefix(self):
# type: () -> Optional[str]
return None
@abstractmethod
def extract_bootstrap(self, dest_dir):
# type: (str) -> None
raise NotImplementedError()
def dist_strip_prefix(self, dist_name):
# type: (str) -> Optional[str]
return None
@abstractmethod
def extract_dist(
self,
dest_dir, # type: str
dist_relpath, # type: str
):
# type: (...) -> None
raise NotImplementedError()
@abstractmethod
def extract_code(self, dest_dir):
# type: (str) -> None
raise NotImplementedError()
@abstractmethod
def extract_pex_info(self, dest_dir):
# type: (str) -> None
raise NotImplementedError()
@abstractmethod
def extract_main(self, dest_dir):
# type: (str) -> None
raise NotImplementedError()
def _install(
layout, # type: _Layout
pex_root, # type: str
pex_hash, # type: str
):
# type: (...) -> str
with TRACER.timed("Laying out {}".format(layout)):
pex = layout.path
install_to = unzip_dir(pex_root=pex_root, pex_hash=pex_hash)
with atomic_directory(install_to, exclusive=True) as chroot:
if not chroot.is_finalized():
with TRACER.timed("Installing {} to {}".format(pex, install_to)):
from pex.pex_info import PexInfo
pex_info = PexInfo.from_pex(pex)
pex_info.update(PexInfo.from_env())
bootstrap_cache = pex_info.bootstrap_cache
if bootstrap_cache is None:
raise AssertionError(
"Expected bootstrap_cache to be populated for {}.".format(layout)
)
code_hash = pex_info.code_hash
if code_hash is None:
raise AssertionError(
"Expected code_hash to be populated for {}.".format(layout)
)
with atomic_directory(
bootstrap_cache, source=layout.bootstrap_strip_prefix(), exclusive=True
) as bootstrap_zip_chroot:
if not bootstrap_zip_chroot.is_finalized():
layout.extract_bootstrap(bootstrap_zip_chroot.work_dir)
os.symlink(
os.path.join(os.path.relpath(bootstrap_cache, install_to)),
os.path.join(chroot.work_dir, BOOTSTRAP_DIR),
)
for location, sha in pex_info.distributions.items():
spread_dest = os.path.join(pex_info.install_cache, sha, location)
dist_relpath = os.path.join(DEPS_DIR, location)
with atomic_directory(
spread_dest,
source=layout.dist_strip_prefix(location),
exclusive=True,
) as spread_chroot:
if not spread_chroot.is_finalized():
layout.extract_dist(spread_chroot.work_dir, dist_relpath)
symlink_dest = os.path.join(chroot.work_dir, dist_relpath)
safe_mkdir(os.path.dirname(symlink_dest))
os.symlink(
os.path.relpath(
spread_dest,
os.path.join(install_to, os.path.dirname(dist_relpath)),
),
symlink_dest,
)
code_dest = os.path.join(pex_info.zip_unsafe_cache, code_hash)
with atomic_directory(code_dest, exclusive=True) as code_chroot:
if not code_chroot.is_finalized():
layout.extract_code(code_chroot.work_dir)
for path in os.listdir(code_dest):
os.symlink(
os.path.join(os.path.relpath(code_dest, install_to), path),
os.path.join(chroot.work_dir, path),
)
layout.extract_pex_info(chroot.work_dir)
layout.extract_main(chroot.work_dir)
return install_to
class _ZipAppPEX(_Layout):
def __init__(
self,
path, # type: str
zfp, # type: zipfile.ZipFile
):
# type: (...) -> None
super(_ZipAppPEX, self).__init__(path)
self._zfp = zfp
self._names = tuple(zfp.namelist())
def bootstrap_strip_prefix(self):
# type: () -> Optional[str]
return BOOTSTRAP_DIR
def extract_bootstrap(self, dest_dir):
# type: (str) -> None
for name in self._names:
if name.startswith(BOOTSTRAP_DIR) and not name.endswith("/"):
self._zfp.extract(name, dest_dir)
def dist_strip_prefix(self, dist_name):
# type: (str) -> Optional[str]
return os.path.join(DEPS_DIR, dist_name)
def extract_dist(
self,
dest_dir, # type: str
dist_relpath, # type: str
):
for name in self._names:
if name.startswith(dist_relpath) and not name.endswith("/"):
self._zfp.extract(name, dest_dir)
def extract_code(self, dest_dir):
# type: (str) -> None
for name in self._names:
if name not in ("__main__.py", PEX_INFO_PATH) and not name.startswith(
(BOOTSTRAP_DIR, DEPS_DIR)
):
self._zfp.extract(name, dest_dir)
def extract_pex_info(self, dest_dir):
# type: (str) -> None
self._zfp.extract(PEX_INFO_PATH, dest_dir)
def extract_main(self, dest_dir):
# type: (str) -> None
self._zfp.extract("__main__.py", dest_dir)
def __str__(self):
return "PEX zipfile {}".format(self._path)
class _PackedPEX(_Layout):
def extract_bootstrap(self, dest_dir):
# type: (str) -> None
with open_zip(os.path.join(self._path, BOOTSTRAP_DIR)) as zfp:
zfp.extractall(dest_dir)
def extract_dist(
self,
dest_dir, # type: str
dist_relpath, # type: str
):
with open_zip(os.path.join(self._path, dist_relpath)) as zfp:
zfp.extractall(dest_dir)
def extract_code(self, dest_dir):
# type: (str) -> None
for root, dirs, files in os.walk(self._path):
rel_root = os.path.relpath(root, self._path)
if root == self._path:
dirs[:] = [d for d in dirs if d != DEPS_DIR]
files[:] = [
f for f in files if f not in ("__main__.py", PEX_INFO_PATH, BOOTSTRAP_DIR)
]
for d in dirs:
safe_mkdir(os.path.join(dest_dir, rel_root, d))
for f in files:
safe_copy(
os.path.join(root, f),
os.path.join(dest_dir, rel_root, f),
)
def extract_pex_info(self, dest_dir):
# type: (str) -> None
safe_copy(os.path.join(self._path, PEX_INFO_PATH), os.path.join(dest_dir, PEX_INFO_PATH))
def extract_main(self, dest_dir):
# type: (str) -> None
safe_copy(os.path.join(self._path, "__main__.py"), os.path.join(dest_dir, "__main__.py"))
def __str__(self):
return "Spread PEX directory {}".format(self._path)
@contextmanager
def _identify_layout(pex):
# type: (str) -> Iterator[Optional[_Layout]]
if zipfile.is_zipfile(pex) and is_python_script(
pex,
# N.B.: A PEX file need not be executable since it can always be run via `python a.pex`.
check_executable=False,
):
with open_zip(pex) as zfp:
yield _ZipAppPEX(pex, zfp)
elif os.path.isdir(pex) and zipfile.is_zipfile(os.path.join(pex, BOOTSTRAP_DIR)):
yield _PackedPEX(pex)
else:
# A loose PEX which needs no layout.
yield None
def maybe_install(
pex, # type: str
pex_root, # type: str
pex_hash, # type: str
):
# type: (...) -> Optional[str]
"""Installs a zipapp or packed PEX into the pex root as a loose PEX.
Returns the path of the installed PEX or `None` if the PEX needed no installation and can be
executed directly.
"""
with _identify_layout(pex) as layout:
if layout:
return _install(layout, pex_root, pex_hash)
return None
| |
# coding=utf-8
"""
This module, core_server_generator.py, contains the code for the outline of the Core Server.
"""
from code_manager.abstract_definitions.classes import ParentClass
from code_manager.abstract_definitions.classes import ChildClass
from code_manager.abstract_definitions.variables import PassedInStringField
from code_manager.abstract_definitions.variables import PassedInObjectField
from code_manager.abstract_definitions.variables import PassedInBooleanField
from code_manager.abstract_definitions.variables import IntegerField
from code_manager.abstract_definitions.variables import DictionaryField
from code_manager.abstract_definitions.variables import StringField
from code_manager.abstract_definitions.variables import StringFieldWithGetter
from code_manager.abstract_definitions.variables import BooleanField
from code_manager.abstract_definitions.variables import ObjectField
from code_manager.abstract_definitions.code_file import CodeFile
from code_manager.abstract_definitions.variables import ObjectParameter
from code_manager.abstract_definitions.variables import StringParameter
from code_manager.abstract_definitions.variables import IntegerParameter
from code_manager.abstract_definitions.variables import DictionaryParameter
from code_manager.abstract_definitions.variables import ParameterValue
from code_manager.abstract_definitions.conditionals import IfStatementBooleanSplit
from code_manager.abstract_definitions.conditionals import SwitchStatement
from code_manager.abstract_definitions.functions import ClassFunctionThatChildrenOverrideAndCallOnceAtEnd
from code_manager.abstract_definitions.functions import FunctionalityCustom
from code_manager.abstract_definitions.functions import FunctionalityBlock
from code_manager.abstract_definitions.functions import Function
from code_manager.abstract_definitions import variables
# The 'physical' file to make.
core_server = CodeFile('basis for Nexus and Peon servers.')
# Global functions.
get_pid_directory = Function('Returns the PID directory from the provided environment.')
server_name_and_environment = StringParameter('The sever name to extract the environment from.')
ss = SwitchStatement()
ss.add_condition_and_value_pair('ENVIRONMENT_DEV in server_name_and_environment', 'return DEV_PID_DIRECTORY')
ss.add_condition_and_value_pair('ENVIRONMENT_QA in server_name_and_environment', 'return QA_PID_DIRECTORY')
ss.add_condition_and_value_pair('ENVIRONMENT_PROD in server_name_and_environment', 'return PROD_PID_DIRECTORY')
get_server_pid = Function('Returns the pid if one exists for the provided server_name_and_environment.')
server_name_and_environment = StringParameter('The specific server to get a PID from.')
FunctionalityBlock("""
for file_name in os.listdir(get_pid_directory(server_name_and_environment)):
if file_name.endswith(\'.\' + server_name_and_environment):
return str(file_name).replace(\'.\' + server_name_and_environment, \'\')
return '-1' ### The PID of the alive server instance.
""")
is_pid_alive = Function('Return a boolean indicating if this pid is alive or not (done by checking if PID exists and if PID is alive.')
pid = StringParameter('The PID to check.')
FunctionalityBlock("""
p = subprocess.Popen(['top', '-p', str(pid), '-n', '1', '-b'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
top_output = out.decode('ascii').split('\\n')
output = top_output[len(top_output) - 2:len(top_output) - 1] # Grab the single line we want.
output = output[0].split('\\t')
if 'PID' in output[0]:
return False
return True ### If the pid is currently alive or not.
""")
#remove_server_pid_file = Function('Removes the specified server PID file.')
#server_name_and_environment = StringParameter('The server instance of PID to remove.')
#FunctionalityBlock("""
#pid = get_server_pid(server_name_and_environment)
#if pid != -1:
# os.remove(get_pid_directory(server_name_and_environment) + pid + '.' + server_name_and_environment)
#else:
# dbg.terminate('The server pid file does not exist for {\' + pid + \'}')
#""")
kill_pid = Function('Kill the PID specified.')
pid = StringParameter('The PID to kill.')
FunctionalityCustom('os.kill(int(pid), signal.SIGTERM)')
is_sever_up = Function('Return a boolean indicating if this server is alive or not.')
server_name_and_environment = StringParameter('The specific server to check for alive status.')
FunctionalityBlock("""
pid = get_server_pid(server_name_and_environment)
if pid is None:
return False
if not is_pid_alive(pid):
return False
return True ### If the server is currently alive or not.
""")
get_next_available_port = Function('This will return the next available pid that can be used.')
FunctionalityBlock("""
global STARTING_PORT
ports_used = []
for pid_directory in ALL_PID_DIRECTORIES:
if len(os.listdir(pid_directory)) == 0:
STARTING_PORT += 1
return STARTING_PORT - 1
for file_name in os.listdir(pid_directory):
if 'nexus' in file_name: # The Nexus is the only one to actually bind to ports so we only check the Nexus. Peons will contain what the Nexus already contains.
with open(pid_directory + file_name) as file_handler:
file_content = file_handler.readlines()
file_content = [x.strip() for x in file_content] # Remove whitespace characters like \\n from each line.
for line in file_content:
if 'port' in line:
ports_used.append(int(line[line.index(' : ') + 3:]))
else:
STARTING_PORT += 1
return STARTING_PORT - 1
current_port = STARTING_PORT
while True:
if current_port not in ports_used:
return current_port ### A port that the system is not currently using.#RT:int
current_port += 1
""")
#safely_terminate_server = GlobalFunction('Turn off a specified server in a safe manner.')
#server_name_and_environment = FunctionStringParameter('The specific server to terminate.')
#FunctionalityBlock("""
#server_pid = get_server_pid(server_name_and_environment)
#if server_pid != -1:
# if is_pid_alive(server_pid):
# timer = atr.StopClock(0.5, 5)
# print("Waiting for the server '" + server_name_and_environment + "' to terminate...", end='')
# else:
# dbg.terminate("Only the server pid file was found for '" + server_name_and_environment + "', but not a running server. So just the file will be removed.")
#else:
# dbg.terminate("No server pid was found for '" + server_name_and_environment + "' so nothing can be terminated.")
#""")
append_text_to_file = Function('Utility function to add text to a file.')
server_name = StringParameter('Find the file to work with through the server name.')
text_to_add = StringParameter('The text to add to this file.')
FunctionalityBlock("""
with open(get_pid_directory(server_name) + get_server_pid(server_name) + '.' + server_name, 'a') as file_handler:
file_handler.write(text_to_add)
""")
get_text_from_file = Function('Utility function to get a property from a file.')
server_name = StringParameter('Find the file to work with through the server name.')
property_to_get = StringParameter('The name of the property to get a value from.')
FunctionalityBlock("""
with open(get_pid_directory(server_name) + get_server_pid(server_name) + '.' + server_name, 'r') as file_handler:
file_content = file_handler.readlines()
file_content = [x.strip() for x in file_content] # Remove whitespace characters like \\n from each line.
for line in file_content:
if property_to_get in line:
return line[line.index(' : ') + 3:] ### The found property's value.#RT:str
""")
# ServerMessage class.
ServerMessage = ParentClass('Defines properties of a server to server message.')
json_message = DictionaryParameter('The original server message in JSON form.')
_server_from = StringFieldWithGetter('DC:json_message[SERVER_FROM]', 'The server that this message is from.')
_server_to = StringFieldWithGetter('DC:json_message[SERVER_TO]', 'The server that this message is sent to.')
_content = StringFieldWithGetter('DC:json_message[CONTENT]', 'The server message.')
get_as_dictionary = Function('Returns this ServerMessage as a dictionary.')
FunctionalityBlock("""
return {CONTENT: self._content, SERVER_FROM: self._server_from, SERVER_TO: self._server_to} ### Get this server_message as a dictionary object.
""")
from_manual = Function('Was this message originally sent from a manual user.')
FunctionalityCustom('return \'manual\' in self._server_from ### If this message was sent from a manual user or not.#RT:bool')
from_nexus = Function('Was this message originally sent from a nexus.')
FunctionalityCustom('return \'nexus\' in self._server_from ### If this message was sent from a nexus or not.#RT:bool')
from_peon = Function('Was this message originally sent from a peon.')
FunctionalityCustom('return \'peon\' in self._server_from ### If this message was sent from a peon or not.#RT:bool')
is_terminate_message = Function('Is this message telling the server to shutdown.')
FunctionalityCustom('return terminate_server == self._content ### If this message is telling the receiving server to shutdown.#RT:bool')
''' __ __ __ ___ ___ ___
/ |\/| / \ /__` | | | |__ |__
/_ | | \__X .__/ | \__/ | |
'''
# ZMQCore class.
ZMQCore = ParentClass('Used for sending and receiving messages.')
_context = PassedInStringField('Used for creating sockets.')
_owner = PassedInBooleanField('Does this server own this port? (Should it bind (if owner) or connect (if not owner)).')
_host_and_port = PassedInStringField('The host + port to send/get messages on.')
socket_type = IntegerParameter('The socket type to make, either PUSH or PULL.')
_socket = ObjectField('self._context.socket(socket_type)', 'The socket that will send or get messages.')
get_port = Function('Return the port that this socket is working with.')
FunctionalityCustom("return self._host_and_port[self._host_and_port.rindex(':') + 1:] ### The port that this socket is working with.#RT:str")
clean_up = Function('Clean up sockets that were used.')
FunctionalityCustom('self._socket.close()')
# PushCore class.
PushCore = ChildClass('Used for sending messages.')
socket_type = ParameterValue('zmq.PUSH')
IfStatementBooleanSplit('self._owner', 'self._socket.bind(self._host_and_port)', 'self._socket.connect(self._host_and_port)')
send_server_message = Function('Sends a server message to a server.')
server_message = ObjectParameter('The server message to send.')
FunctionalityCustom('self._socket.send_json(server_message.get_as_dictionary())')
# ReceiveCore class.
ReceiveCore = ChildClass('Used for receiving messages.')
socket_type = ParameterValue('zmq.PULL')
IfStatementBooleanSplit('self._owner', 'self._socket.bind(self._host_and_port)', 'self._socket.connect(self._host_and_port)')
get_server_message = Function('Return the server message that this server gets.')
FunctionalityCustom("return ServerMessage(self._socket.recv_json()) ### Get incoming messages as ServerMessage objects.")
''' ___ __ / __ ___ __ __ ___ ___ ___
|\ | |__ \_/ | | /__` / |__) |__ / \ |\ | /__` | | | |__ |__
| \| |___ / \ \__/ .__/ / | |___ \__/ | \| .__/ | \__/ | |
'''
# ServerCore class.
ServerCore = ParentClass('The foundation that both Nexus and Peons build off of.')
_name = PassedInStringField('The type of server Nexus/Peon.')
_environment = PassedInStringField('The environment that this server is a member of.')
_server_name = StringField('DC:str(self._pid) + \'.\' + self._name + \'_\' + self._environment', 'The full server name.')
_server_from = StringField('DC:self._name + \'_\' + self._environment', 'The server name to put in the SEVER_FROM field for a ServerMessage.')
_context = ObjectField('zmq.Context()', 'A ZMQ specific class that makes sockets.')
_pid = IntegerField('os.getpid()', 'The PID of this server.')
_keep_receiving = BooleanField('True', 'Indicates if this server is still receiving messages. If it is not then it is shutting down.')
# NexusCore class.
NexusCore = ChildClass('The blue-print for a running Nexus server.')
name = ParameterValue('\'nexus\'')
_receive_core = ObjectField('ReceiveCore(self._context, DEFAULT_HOST + str(get_next_available_port()), True)', 'Used for receiving server messages from peons.')
_push_cores = ObjectField('[]', 'A list of all the push cores that send messages to all the peons.')
_peons = DictionaryField('{}', 'KEY - Peon Server Name | VALUE - [push_core, is_verified]')
_peon_current_id = IntegerField('0', 'A system to assign unique identifiers to peons.')
_receiver_thread = ObjectField('threading.Thread(target=self.run_server_until_told_to_shutdown)', 'The threat that will receive messages for the entire Nexus.l')
_push_core_for_manual_replies = ObjectField('PushCore(self._context, DEFAULT_HOST + str(get_next_available_port()), True)', 'Used to reply to manual messages.')
FunctionalityBlock("""
self._create_server_pid_file()
self._receiver_thread.start()
""")
_create_server_pid_file = Function('Creates the server_pid file for this server.')
FunctionalityBlock("""
# Make sure that each environment only has 1 instance of Nexus running.
for file_name in os.listdir(get_pid_directory(self._server_name)):
if 'nexus' in file_name:
dbg.terminate('The environment of ' + self._environment + ' already has a Nexus!')
server_pid = get_server_pid(self._server_name)
if server_pid == '-1':
with open(get_pid_directory(self._server_name) + self._server_name, 'w') as file_handler:
file_handler.write('receive port : ' + self._receive_core.get_port() + '\\n')
file_handler.write('push port for manual reply : ' + self._push_core_for_manual_replies.get_port() + '\\n')
print('Just created Nexus PID file at location : ' + get_pid_directory(self._server_name) + self._server_name)
else:
dbg.terminate('The server PID file already exists for : ' + self._server_name + '!')
""")
reply_to_manual = Function('This function will send a server message response to the manual user.')
message_content = StringParameter('The message sent to the Nexus to reply to.')
FunctionalityBlock("""
if server_message.is_terminate_message():
self._push_core_for_manual_replies.send_server_message(ServerMessage({CONTENT: REPLY_SUCCESS, SERVER_FROM: self._server_name, SERVER_TO: 'manual'}))
self.clean_up()
elif server_message.get_content() == MTN_run_django:
self.launch_peon()
""")
reply_to_peon = Function('This function will send a server message response to the peon.')
peon_server_name = StringParameter('The server name of the peon.')
message_content = StringParameter('The message sent to the Nexus to reply to.')
FunctionalityBlock("""
if server_message.is_terminate_message():
self._peons[peon_server_name][0].send_server_message(ServerMessage({CONTENT: REPLY_SUCCESS, SERVER_FROM: self._server_name, SERVER_TO: peon_server_name}))
self.clean_up()
elif server_message.get_content() == PTN_verify_peon:
self._peons[peon_server_name][0].send_server_message(ServerMessage({CONTENT: REPLY_SUCCESS, SERVER_FROM: self._server_name, SERVER_TO: peon_server_name}))
self._peons[peon_server_name][1] = True
""")
run_server_until_told_to_shutdown = Function('This runs the server and all server logic until the server is told to shut down.')
FunctionalityBlock("""
while self._keep_receiving:
server_message = self._receive_core.get_server_message()
print('I got the following server message : ' + str(server_message))
if server_message.from_manual():
self.reply_to_manual(server_message.get_content())
elif server_message.from_peon():
self.reply_to_peon(server_message.get_server_from(), server_message.get_content()
""")
launch_peon = Function('This launches a peon.')
FunctionalityBlock("""
peon_dictionary_name = 'peon_' + str(self._peon_current_id) + '_' + self._environment
peon_port_to_use = get_next_available_port()
self._peons[peon_dictionary_name] = [PushCore(self._context, DEFAULT_HOST + str(peon_port_to_use)), False]
append_text_to_file(self._server_name, 'Push port for ' + peon_dictionary_name + ' : ' + str(peon_port_to_use))
with open(get_pid_directory(self._server_name) + self._server_name, 'a') as file_handler:
# file_handler.write('push port for ' + peon_dictionary_name + ' : ' + str(peon_port_to_use))
y = 2 # TODO : LAUNCH THE PEON HERE!!!
#subprocess.Popen([])# launch peon # TODO !!
self._peon_current_id += 1
""")
clean_up = Function('Clean up sockets that were used.')
FunctionalityBlock("""
for push_core in self._push_cores:
push_core.clean_up()
self._receiver_thread.join()
self._receive_core.clean_up()
self._context.destroy()
for file_name in os.listdir(get_pid_directory(self._server_name)):
os.remove(get_pid_directory(self._server_name) + file_name)
print('Just removed Nexus PID file at location : ' + get_pid_directory(self._server_name) + file_name)
""")
# PeonCore class.
PeonCore = ChildClass('The blue-print for a running Peon server.')
_push_host_and_port = PassedInStringField('The host address + port to work with.')
_receive_host_and_port = PassedInStringField('The host address + port to work with.')
_push_core = ObjectField('PushCore(self._context, push_host_and_port, False)', 'Used for sending server messages to the Nexus.')
_receive_core = ObjectField('ReceiveCore(self._context, receive_host_and_port, False)', 'Used for receiving server messages from the Nexus.')
_project_is_currently_running = BooleanField('False', 'Indicates if the project process is currently running.')
_project_process = ObjectField('None', 'The process running the project.')
_create_server_pid_file = Function('Creates the server_pid file for this server.')
FunctionalityBlock("""
server_pid = get_server_pid(self._server_name)
if server_pid == '-1':
open(get_pid_directory(self._server_name) + self._server_name, 'w').close()
else:
dbg.terminate('The server PID file already exists for : ' + self._server_name + '!')
""")
#run_project = Function('Runs the specified project.')
#project_to_run = StringParameter('The specified project to run.')
clean_up = Function('Clean up sockets that were used.')
FunctionalityBlock("""
self._push_core.clean_up()
self._receive_core.clean_up()
self._context.destroy()
""")
###########################################################################################################################
# ManualCommunication class.
ManualCommunication = ChildClass('This is used for having manual communication with the Nexus.')
name = ParameterValue('\'manual\'')
FunctionalityBlock("""
nexus_pid = None
nexus_file_name_and_path = get_pid_directory(self._environment)
server_name = ''
for file_name in os.listdir(get_pid_directory(self._environment)):
if 'nexus' in file_name:
nexus_pid = file_name[0:file_name.index('.')]
nexus_file_name_and_path += file_name
server_name = file_name[file_name.index('.') + 1:]
if nexus_pid is None:
dbg.terminate('There is no Nexus to communicate with!')
else:
self._push_core = PushCore(self._context, DEFAULT_HOST + get_text_from_file(server_name, 'receive'), False)
self._receive_core = ReceiveCore(self._context, DEFAULT_HOST + get_text_from_file(server_name, 'manual'), False)
""")
#_push_core = ClassObjectField('PushCore(self._context)', 'Used for sending server messages.')
#_receive_core = ClassObjectField('ReceiveCore()', 'Used for receiving server messages.')
clean_up = Function('Clean up sockets that were used.')
FunctionalityBlock("""
self._push_core.clean_up()
self._receive_core.clean_up()
self._context.destroy()
""")
#nexus = NexusCore(ENVIRONMENT_DEV)
#nexus.clean_up()
#manual = ManualCommunication(ENVIRONMENT_DEV)
#manual.clean_up()
#self._push_core.send_server_message(ServerMessage({CONTENT: 'Hello world from manual!', SERVER_FROM: 'manual', SERVER_TO: 'nexus'}))
#message_back = self._receive_core.get_server_message()
#print('I got the following message back : ')
#print(message_back)
# Testing
| |
import argparse, json, os, time
from parcellearning import pairgat
from parcellearning.utilities import gnnio
from parcellearning.utilities.early_stop import EarlyStopping
from parcellearning.utilities.batch import partition_graphs
from parcellearning.utilities.load import load_schema
from shutil import copyfile
from pathlib import Path
import numpy as np
import dgl
from dgl.data import register_data_args
import dgl.function as fn
import torch
import torch.nn.functional as F
def main(args):
schema = load_schema(args.schema_file)
in_dir = schema['data']['in']
out_dir = schema['data']['out']
Path(out_dir).mkdir(parents=True, exist_ok=True)
# copy schema file to output directory
copy_schema = ''.join([out_dir, args.schema_file.split('/')[-1]])
if not os.path.exists(copy_schema):
copyfile(args.schema_file, copy_schema)
# get features
features = schema['features']
features.sort()
##### GET PARAMETERS FROM SCHEMA FILE #####
# - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - #
MODEL_PARAMS = schema['model_parameters']
OPT_PARAMS = schema['optimizer_parameters']
TRAIN_PARAMS = schema['training_parameters']
STOP_PARAMS = schema['stopping_parameters']
# - - - - - - - - - - - - - - - - - - - - #
# - - - - - - - - - - - - - - - - - - - - #
# load training and validation data
training = gnnio.dataset(dType='training',
features=features,
dSet=schema['data']['training'],
norm=True,
aggregate=True,
clean=True)
validation = gnnio.dataset(dType='validation',
features=features,
dSet=schema['data']['validation'],
norm=True,
aggregate=True,
clean=True)
validation = dgl.batch(validation)
if args.no_background:
print('Excluding background voxels in model training.')
for g in training:
nodes = np.where(g.ndata['label'] == 0)[0]
g.remove_nodes(nodes)
nodes = np.where(validation.ndata['label'] == 0)[0]
validation.remove_nodes(nodes)
val_X = validation.ndata['features']
val_Y = validation.ndata['label']
##### MODEL TRAINING #####
# - - - - - - - - - - - - #
# - - - - - - - - - - - - #
# instantiate model using schema parameters
model = pairgat.PAIRGAT(**MODEL_PARAMS)
# instantiate Adam optimizer using scheme parameters
optimizer = torch.optim.Adam(model.parameters(), **OPT_PARAMS)
# initialize early stopper
stopped_model_output='%s%s.earlystop.Loss.pt' % (out_dir, schema['model'])
stopper = EarlyStopping(filename=stopped_model_output, **STOP_PARAMS)
progress = {k: [] for k in ['Epoch',
'Duration',
'Train Loss',
'Train Acc',
'Val Loss',
'Val Acc']}
cross_entropy = torch.nn.CrossEntropyLoss()
dur = []
print('Training model')
for epoch in range(TRAIN_PARAMS['epochs']):
# learn model on training data
batches = partition_graphs(training, TRAIN_PARAMS['n_batch'])
model.train()
t0 = time.time()
# zero the gradients for this epoch
optimizer.zero_grad()
# aggregate training batch losses
train_loss = 0
train_le = 0
train_lg = 0
train_lb = 0
# aggregate training batch accuracies
train_acc = 0
for iteration, batch in enumerate(batches):
# get training features for this batch
batch_X = batch.ndata['features']
batch_Y = batch.ndata['label']
# push batch through network
batch_logits = model(batch, batch_X)
# compute batch performance
# loss
batch_loss = cross_entropy(batch_logits, batch_Y)
# accuracy
_, batch_indices = torch.max(F.softmax(batch_logits, dim=1), dim=1)
batch_acc = (batch_indices == batch_Y).sum() / batch_Y.shape[0]
# apply backward parameter update pass
batch_loss.backward()
# update training performance
train_loss += batch_loss
train_acc += batch_acc
# accumulate the gradients from each batch
if (iteration+1) % TRAIN_PARAMS['n_batch'] == 0:
optimizer.step()
optimizer.zero_grad()
dur.append(time.time() - t0)
# switch model into evaluation mode
# so we don't update the gradients using the validation data
model.eval()
with torch.no_grad():
# push validation through network
val_logits = model(validation, val_X)
# compute validation performance
# loss
val_loss = cross_entropy(val_logits, val_Y)
# accuracy
_, val_indices = torch.max(F.softmax(val_logits, dim=1), dim=1)
val_acc = (val_indices == val_Y).sum() / val_Y.shape[0]
train_loss /= TRAIN_PARAMS['n_batch']
train_acc /= TRAIN_PARAMS['n_batch']
# Show current performance
print("Epoch {:05d} | Time(s) {:.4f} | Train Loss {:.4f} | Train Acc {:.4f} | Val Loss {:.4f} | Val Acc {:.4f}".format(
epoch, np.mean(dur),
train_loss.item(), train_acc.item(),
val_loss.item(), val_acc.item()))
progress['Epoch'].append(epoch)
if epoch > 3:
progress['Duration'].append(time.time() - t0)
else:
progress['Duration'].append(0)
# update training performance
progress['Train Loss'].append(train_loss.item())
progress['Train Acc'].append(train_acc.item())
# update validation performance
progress['Val Loss'].append(val_loss.item())
progress['Val Acc'].append(val_acc.item())
# set up early stopping criteria on validation loss
# if validation loss does not decree for patience=10
# save best model in last 10 and break the training
early_stop = stopper.step(val_loss.detach().data, model)
if early_stop:
break
model_output = '%s%s.pt' % (out_dir, schema['model'])
model.save(filename=model_output)
# save performance to json
performance_output = '%sperformance.%s.json' % (out_dir, schema['model'])
with open(performance_output, "w") as outparams:
json.dump(progress, outparams, ensure_ascii=True, indent=4, sort_keys=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GAT')
parser.add_argument('--schema-file',
type=str,
help='JSON file with parameters for model, training, and output.')
parser.add_argument('-no_background',
help='Exclude background voxels in model training.',
action='store_true',
required=False)
args = parser.parse_args()
main(args)
| |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for dm_robotics.agentflow.action_spaces."""
from absl.testing import absltest
from absl.testing import parameterized
from dm_env import specs
from dm_robotics.agentflow import action_spaces
from dm_robotics.agentflow import core
from dm_robotics.agentflow import testing_functions
import numpy as np
class ActionSpacesTest(parameterized.TestCase):
def test_constrained_action_spec(self):
spec = specs.BoundedArray(
shape=(2,), dtype=float, minimum=[-50.0, 0.0], maximum=[50.0, 100.0])
space = core.IdentityActionSpace(spec)
constrained_spec = action_spaces.constrained_action_spec(
minimum=[-10.0, 20.0], maximum=[40.0, 90.0], base=spec)
constrained_space = core.IdentityActionSpace(constrained_spec)
good_base_input = np.asarray([0.0, 10.0])
np.testing.assert_almost_equal(
space.project(good_base_input), good_base_input)
# This action is within the new min/max bounds, should pass.
good_smaller_input = np.asarray([0.0, 25.0])
np.testing.assert_almost_equal(
constrained_space.project(good_smaller_input), good_smaller_input)
# The original action that passed the base space should fail in the smaller
# action space.
with self.assertRaises(ValueError):
constrained_space.project(good_base_input)
# Check handling of scalar min/max
spec = specs.BoundedArray(
shape=(3,), dtype=float, minimum=-50.0, maximum=50.0)
constrained_spec = action_spaces.constrained_action_spec(
minimum=-10.0, maximum=40.0, base=spec)
constrained_space = core.IdentityActionSpace(constrained_spec)
good_constrained_input = np.asarray([0.0] * 3)
np.testing.assert_almost_equal(
constrained_space.project(good_constrained_input),
good_constrained_input)
bad_constrained_input = np.asarray([90.0] * 3)
with self.assertRaises(ValueError):
constrained_space.project(bad_constrained_input)
def test_constrained_action_space(self):
spec = specs.BoundedArray(
shape=(2,), dtype=float, minimum=[-50.0, 0.0], maximum=[50.0, 100.0])
space = core.IdentityActionSpace(spec)
constrained_space = action_spaces.constrained_action_space(
minimum=[-10.0, 20.0], maximum=[40.0, 90.0], base=space)
good_base_input = np.asarray([0.0, 10.0])
np.testing.assert_almost_equal(
space.project(good_base_input), good_base_input)
# This action is within the new min/max bounds, should pass.
good_smaller_input = np.asarray([0.0, 25.0])
np.testing.assert_almost_equal(
constrained_space.project(good_smaller_input), good_smaller_input)
# The original action that passed the base space should fail in the smaller
# action space.
with self.assertRaises(ValueError):
constrained_space.project(good_base_input)
def test_simple_fixed_action_space(self):
base = specs.Array(shape=(2,), dtype=np.float32, name='a1\ta2')
base_space = action_spaces.prefix_slicer(base, 'a')
fixed_spec = action_spaces.FixedActionSpace(
base_space, np.asarray([1, 2], dtype=np.float32))
self.assertEqual(base_space.spec().shape, (2,))
self.assertEqual(fixed_spec.spec().shape, (0,))
np.testing.assert_almost_equal(
fixed_spec.project(np.asarray([], np.float32)),
np.asarray([1, 2], dtype=np.float32))
self.assertIsNotNone(fixed_spec.spec().name)
def test_exclusion_slicer(self):
base = specs.Array(shape=(4,), dtype=np.float32,
name='a1\ta2\texclude_action1\texclude_action2')
base_space = action_spaces.prefix_slicer(base,
'^(?!exclude)[[a-zA-Z0-9-_.]+$')
fixed_spec = action_spaces.FixedActionSpace(
base_space, np.asarray([1, 2], dtype=np.float32))
self.assertEqual(base_space.spec().shape, (2,))
self.assertEqual(fixed_spec.spec().shape, (0,))
np.testing.assert_almost_equal(
fixed_spec.project(np.asarray([], np.float32)),
np.asarray([1, 2, np.nan, np.nan], dtype=np.float32))
self.assertIsNotNone(fixed_spec.spec().name)
def test_shrink_to_fit_action_space(self):
# Corresponds to `spec_utils_test.test_primitive`.
spec = specs.BoundedArray(
shape=(3,),
dtype=float,
minimum=[0.0, 0.0, 0.0],
maximum=[20.0, 100.0, 20.0])
action_space = action_spaces.ShrinkToFitActionSpace(spec)
val1 = np.asarray([21.0, 5.0, 21.0]) # over-max, under-min, over-max
factor1 = 20.0 / 21.0
expected1 = np.asarray([20.0, 5.0 * factor1, 20.0])
testing_functions.assert_value(action_space.project(val1), expected1)
val2 = np.asarray([1.0, 200.0, 21.0]) # ok, over-max, over-max
expected2 = np.asarray([0.5, 100.0, 10.5])
testing_functions.assert_value(action_space.project(val2), expected2)
def test_identity_action_space_output(self):
spec = specs.BoundedArray(
shape=(2,), dtype=float, minimum=[-50.0, 0.0], maximum=[50.0, 100.0])
space = core.IdentityActionSpace(spec)
good_input = np.asarray([0.0, 10.0])
bad_input = np.asarray([0.0, 110.0])
np.testing.assert_almost_equal(space.project(good_input), good_input)
try:
space.project(bad_input)
self.fail('Should fail validation')
except ValueError as expected:
del expected
def test_cast_action_space_output(self):
spec = specs.BoundedArray(
shape=(2,), dtype=np.float32, minimum=[-1.0, -2.0], maximum=[1.0, 2.0])
# Should pass validation if action has NaN and ignore_nan is True.
space = action_spaces.CastActionSpace(spec, ignore_nan=True)
_ = space.project(np.asarray([0.0, np.nan]))
# Should raise an exception if action has NaN and ignore_nan is False.
space = action_spaces.CastActionSpace(spec, ignore_nan=False)
with self.assertRaises(ValueError):
space.project(np.asarray([0.0, np.nan]))
# Should raise an exception if action has wrong shape.
with self.assertRaises(ValueError):
space.project(np.asarray([0.0, 0.0, 0.0]))
# Should raise an exception if action is out of bounds.
with self.assertRaises(ValueError):
space.project(np.asarray([0.0, 3.0]))
# Should cast a float64 to float32 and pass validation.
good_input = np.asarray([0.0, 1.0], dtype=np.float64)
expected_result = np.asarray([0.0, 1.0], dtype=np.float32)
actual_result = space.project(good_input)
np.testing.assert_array_almost_equal(expected_result, actual_result)
self.assertEqual(expected_result.dtype, actual_result.dtype)
@parameterized.parameters(
specs.Array(shape=(3,), dtype=np.float32, name='a11\ta12\ta2'),
specs.BoundedArray(shape=(3,), dtype=np.float32, name='a11\ta12\ta2',
minimum=[-1., -2., -3.], maximum=[1., 2., 3.]),
)
def test_sequential_action_space(self, base_spec):
base_space = action_spaces.prefix_slicer(base_spec, 'a')
subspace1 = action_spaces.prefix_slicer(base_space.spec(), 'a1')
subspace2 = action_spaces.prefix_slicer(subspace1.spec(), 'a12')
sequential_spec = action_spaces.SequentialActionSpace(
[subspace2, subspace1, base_space], 'Sequential space')
self.assertEqual(base_space.spec().shape, (3,))
self.assertEqual(sequential_spec.spec().shape, subspace2.spec().shape)
expected_result = np.asarray([np.nan, 3., np.nan], dtype=base_spec.dtype)
np.testing.assert_almost_equal(
sequential_spec.project(np.asarray([3.], np.float32)),
expected_result)
self.assertIsNotNone(sequential_spec.spec().name)
if __name__ == '__main__':
absltest.main()
| |
#!/usr/bin/env python
"""
check_graphite.py
~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import json
import optparse
import urllib
import urllib2
import sys
from numbers import Real
NAGIOS_STATUSES = {
'OK': 0,
'WARNING': 1,
'CRITICAL': 2,
'UNKNOWN': 3
}
class Graphite(object):
def __init__(self, url, targets, _from, _until):
self.url = url.rstrip('/')
self.targets = targets
self._from = _from
self._until = _until
params = [('target', t) for t in self.targets] +\
[('from', self._from)] +\
[('until', self._until)] +\
[('format', 'json')]
self.full_url = self.url + '/render?' +\
urllib.urlencode(params)
def check_datapoints(self, datapoints, check_func, **kwargs):
"""Find alerting datapoints
Args:
datapoints (list): The list of datapoints to check
Kwargs:
check_func (function): The function to find out of bounds datapoints
bounds (list): Compare against `datapoints` to find out of bounds list
compare (list): Used for comparison if `datapoints` is out of bounds
threshold (float): `check_func` is called for each datapoint against `threshold`
beyond (float): Return datapoint if `beyond` value in bounds list (percentage).
Returns:
The list of out of bounds datapoints
"""
if 'threshold' in kwargs:
return [x for x in datapoints if isinstance(x, Real) and check_func(x, kwargs['threshold'])]
elif 'bounds' in kwargs:
if 'compare' in kwargs:
return [datapoints[x] for x in xrange(len(datapoints)) if all([datapoints[x], kwargs['bounds'][x], kwargs['compare'][x]]) and check_func(datapoints[x] / kwargs['bounds'][x], kwargs['beyond']) and check_func(datapoints[x], kwargs['compare'][x])]
else:
return [datapoints[x] for x in xrange(len(datapoints)) if all([datapoints[x], kwargs['bounds'][x]]) and check_func(datapoints[x], kwargs['bounds'][x])]
def fetch_metrics(self):
try:
response = urllib2.urlopen(self.full_url)
if response.code != 200:
return None
else:
return json.loads(response.read())
except urllib2.URLError, TypeError:
return None
def generate_output(self, datapoints, *args, **kwargs):
"""Generate check output
Args:
datapoints (list): The list of datapoints to check
warn_oob (list): Optional list of datapoints considered in warning state
crit_oob (list): Mandatory list of datapoints considered in warning state
Kwargs:
count (int): Number of metrics that would generate an alert
warning (float): The check's warning threshold
critical (float): The check's critical threshold
target (str): The target for `datapoints`
Returns:
A dictionary of datapoints grouped by their status ('CRITICAL', 'WARNING', 'OK')
"""
check_output = dict(OK=[], WARNING=[], CRITICAL=[])
count = kwargs['count']
warning = kwargs.get('warning', 0)
critical = kwargs.get('critical', 0)
target = kwargs.get('target', 'timeseries')
if len(args) > 1:
(warn_oob, crit_oob) = args
else:
crit_oob = [x for x in args[0] if isinstance(x, Real)]
warn_oob = []
if self.has_numbers(crit_oob) and len(crit_oob) >= count:
check_output['CRITICAL'].append('%s [crit=%f|datapoints=%s]' %\
(target, critical, ','.join(['%s' % str(x) for x in crit_oob])))
elif self.has_numbers(warn_oob) and len(warn_oob) >= count:
check_output['WARNING'].append('%s [warn=%f|datapoints=%s]' %\
(target, warning, ','.join(['%s' % str(x) for x in warn_oob])))
else:
check_output['OK'].append('%s [warn=%0.3f|crit=%f|datapoints=%s]' %\
(target, warning, critical, ','.join(['%s' % str(x) for x in datapoints])))
return check_output
def has_numbers(self, lst):
try:
return any([isinstance(x, Real) for x in lst])
except TypeError:
return False
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-U', '--graphite-url', dest='graphite_url',
default='http://localhost/',
metavar='URL',
help='Graphite URL [%default]')
parser.add_option('-t', '--target', dest='target',
action='append',
help='Target to check')
parser.add_option('--compare', dest='compare',
metavar='SERIES',
help='Compare TARGET against SERIES')
parser.add_option('--from', dest='_from',
help='From timestamp/date')
parser.add_option('--until', dest='_until',
default='now',
help='Until timestamp/date [%default]')
parser.add_option('-c', '--count', dest='count',
default=0,
type='int',
help='Alert on at least COUNT metrics [%default]')
parser.add_option('--beyond', dest='beyond',
default=0.7,
type='float',
help='Alert if metric is PERCENTAGE beyond comparison value [%default]')
parser.add_option('--percentile', dest='percentile',
default=0,
type='int',
metavar='PERCENT',
help='Use nPercentile Graphite function on the target (returns one datapoint)')
parser.add_option('--empty-ok', dest='empty_ok',
default=False,
action='store_true',
help='Empty data from Graphite is OK')
parser.add_option('--confidence', dest='confidence_bands',
default=False,
action='store_true',
help='Use holtWintersConfidenceBands Graphite function on the target')
parser.add_option('--over', dest='over',
default=True,
action='store_true',
help='Over specified WARNING or CRITICAL threshold [%default]')
parser.add_option('--under', dest='under',
default=False,
action='store_true',
help='Under specified WARNING or CRITICAL threshold [%default]')
parser.add_option('-W', dest='warning',
type='float',
metavar='VALUE',
help='Warning if datapoints beyond VALUE')
parser.add_option('-C', dest='critical',
type='float',
metavar='VALUE',
help='Critical if datapoints beyond VALUE')
(options, args) = parser.parse_args()
if not all([getattr(options, option) for option in ('_from', 'target')]):
parser.print_help()
sys.exit(NAGIOS_STATUSES['UNKNOWN'])
real_from = options._from
if options.under:
check_func = lambda x, y: x < y
options.over = False
else:
check_func = lambda x, y: x > y
if options.confidence_bands:
targets = [options.target[0], 'holtWintersConfidenceBands(%s)' % options.target[0]]
check_threshold = None
from_slice = int(options._from) * -1
real_from = '-2w'
if options.compare:
targets.append(options.compare)
else:
if not all([getattr(options, option) for option in ('critical', 'warning')]):
parser.print_help()
sys.exit(NAGIOS_STATUSES['UNKNOWN'])
if options.percentile:
targets = ['nPercentile(%s, %d)' % (options.target[0], options.percentile)]
else:
targets = options.target
try:
warn = float(options.warning)
crit = float(options.critical)
except ValueError:
print 'ERROR: WARNING or CRITICAL threshold is not a number\n'
parser.print_help()
sys.exit(NAGIOS_STATUSES['UNKNOWN'])
check_output = {}
graphite = Graphite(options.graphite_url, targets, real_from, options._until)
metric_data = graphite.fetch_metrics()
if metric_data:
if options.confidence_bands:
actual = [x[0] for x in metric_data[0].get('datapoints', [])][from_slice:]
target_name = metric_data[0]['target']
kwargs = {}
kwargs['beyond'] = options.beyond
if options.over:
kwargs['bounds'] = [x[0] for x in metric_data[1].get('datapoints', [])][from_slice:]
elif options.under:
kwargs['bounds'] = [x[0] for x in metric_data[2].get('datapoints', [])][from_slice:]
if options.compare:
kwargs['compare'] = [x[0] for x in metric_data[3].get('datapoints', [])][from_slice:]
if not graphite.has_numbers(kwargs['compare']):
print 'CRITICAL: No compare target output from Graphite!'
sys.exit(NAGIOS_STATUSES['CRITICAL'])
if graphite.has_numbers(actual) and graphite.has_numbers(kwargs['bounds']):
points_oob = graphite.check_datapoints(actual, check_func, **kwargs)
check_output[target_name] = graphite.generate_output(actual,
points_oob,
count=options.count,
target=target_name)
else:
print 'CRITICAL: No output from Graphite for target(s): %s' % ', '.join(targets)
sys.exit(NAGIOS_STATUSES['CRITICAL'])
else:
for target in metric_data:
datapoints = [x[0] for x in target.get('datapoints', []) if isinstance(x[0], Real)]
if not graphite.has_numbers(datapoints) and not options.empty_ok:
print 'CRITICAL: No output from Graphite for target(s): %s' % ', '.join(targets)
sys.exit(NAGIOS_STATUSES['CRITICAL'])
crit_oob = graphite.check_datapoints(datapoints, check_func, threshold=crit)
warn_oob = graphite.check_datapoints(datapoints, check_func, threshold=warn)
check_output[target['target']] = graphite.generate_output(datapoints,
warn_oob,
crit_oob,
count=options.count,
target=target['target'],
warning=warn,
critical=crit)
else:
if options.empty_ok and isinstance(metric_data, list):
print 'OK: No output from Graphite for target(s): %s' % ', '.join(targets)
sys.exit(NAGIOS_STATUSES['OK'])
print 'CRITICAL: No output from Graphite for target(s): %s' % ', '.join(targets)
sys.exit(NAGIOS_STATUSES['CRITICAL'])
for target, messages in check_output.iteritems():
if messages['CRITICAL']:
exit_code = NAGIOS_STATUSES['CRITICAL']
elif messages['WARNING']:
exit_code = NAGIOS_STATUSES['WARNING']
else:
exit_code = NAGIOS_STATUSES['OK']
for status_code in ['CRITICAL', 'WARNING', 'OK']:
if messages[status_code]:
print '\n'.join(['%s: %s' % (status_code, status) for status in messages[status_code]])
sys.exit(exit_code)
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class InlineTemplate(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'custom_fields': 'CustomFields',
'documents': 'list[Document]',
'envelope': 'Envelope',
'recipients': 'Recipients',
'sequence': 'str'
}
attribute_map = {
'custom_fields': 'customFields',
'documents': 'documents',
'envelope': 'envelope',
'recipients': 'recipients',
'sequence': 'sequence'
}
def __init__(self, custom_fields=None, documents=None, envelope=None, recipients=None, sequence=None): # noqa: E501
"""InlineTemplate - a model defined in Swagger""" # noqa: E501
self._custom_fields = None
self._documents = None
self._envelope = None
self._recipients = None
self._sequence = None
self.discriminator = None
if custom_fields is not None:
self.custom_fields = custom_fields
if documents is not None:
self.documents = documents
if envelope is not None:
self.envelope = envelope
if recipients is not None:
self.recipients = recipients
if sequence is not None:
self.sequence = sequence
@property
def custom_fields(self):
"""Gets the custom_fields of this InlineTemplate. # noqa: E501
:return: The custom_fields of this InlineTemplate. # noqa: E501
:rtype: CustomFields
"""
return self._custom_fields
@custom_fields.setter
def custom_fields(self, custom_fields):
"""Sets the custom_fields of this InlineTemplate.
:param custom_fields: The custom_fields of this InlineTemplate. # noqa: E501
:type: CustomFields
"""
self._custom_fields = custom_fields
@property
def documents(self):
"""Gets the documents of this InlineTemplate. # noqa: E501
Complex element contains the details on the documents in the envelope. # noqa: E501
:return: The documents of this InlineTemplate. # noqa: E501
:rtype: list[Document]
"""
return self._documents
@documents.setter
def documents(self, documents):
"""Sets the documents of this InlineTemplate.
Complex element contains the details on the documents in the envelope. # noqa: E501
:param documents: The documents of this InlineTemplate. # noqa: E501
:type: list[Document]
"""
self._documents = documents
@property
def envelope(self):
"""Gets the envelope of this InlineTemplate. # noqa: E501
:return: The envelope of this InlineTemplate. # noqa: E501
:rtype: Envelope
"""
return self._envelope
@envelope.setter
def envelope(self, envelope):
"""Sets the envelope of this InlineTemplate.
:param envelope: The envelope of this InlineTemplate. # noqa: E501
:type: Envelope
"""
self._envelope = envelope
@property
def recipients(self):
"""Gets the recipients of this InlineTemplate. # noqa: E501
:return: The recipients of this InlineTemplate. # noqa: E501
:rtype: Recipients
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this InlineTemplate.
:param recipients: The recipients of this InlineTemplate. # noqa: E501
:type: Recipients
"""
self._recipients = recipients
@property
def sequence(self):
"""Gets the sequence of this InlineTemplate. # noqa: E501
Specifies the order in which templates are overlaid. # noqa: E501
:return: The sequence of this InlineTemplate. # noqa: E501
:rtype: str
"""
return self._sequence
@sequence.setter
def sequence(self, sequence):
"""Sets the sequence of this InlineTemplate.
Specifies the order in which templates are overlaid. # noqa: E501
:param sequence: The sequence of this InlineTemplate. # noqa: E501
:type: str
"""
self._sequence = sequence
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(InlineTemplate, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineTemplate):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
import petsc4py
import sys
petsc4py.init(sys.argv)
from petsc4py import PETSc
import mshr
from dolfin import *
import sympy as sy
import numpy as np
import ExactSol
import MatrixOperations as MO
import CheckPetsc4py as CP
from dolfin import __version__
import MaxwellPrecond as MP
import StokesPrecond
import time
def myCCode(A):
return sy.ccode(A).replace('M_PI','pi')
def Print3D(u,v,w,p,opt):
if opt == "NS":
print " u = (",str(u).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),",",str(v).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),",",str(w).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),")\n"
print " p = (",str(p).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),")\n"
if opt == "M":
print " b = (",str(u).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),",",str(v).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),",",str(w).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),")\n"
print " r = (",str(p).replace('x[0]','x').replace('x[1]','y').replace('x[2]','z'),")\n"
def Domain(n):
mesh = BoxMesh(Point(0., 0., 0.), Point(10., 1., 1.), 10*n, n, n)
class Left(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 0.0)
class Right(SubDomain):
def inside(self, x, on_boundary):
return near(x[0], 10.0)
class Bottom(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 0.0)
class Top(SubDomain):
def inside(self, x, on_boundary):
return near(x[1], 1.0)
class Side1(SubDomain):
def inside(self, x, on_boundary):
return near(x[2], 0.0)
class Side2(SubDomain):
def inside(self, x, on_boundary):
return near(x[2], 1.0)
left = Left()
top = Top()
right = Right()
bottom = Bottom()
side1 = Side1()
side2 = Side2()
# Initialize mesh function for the domain
domains = CellFunction("size_t", mesh)
domains.set_all(0)
# Initialize mesh function for boundary domains
boundaries = FacetFunction("size_t", mesh)
boundaries.set_all(0)
left.mark(boundaries, 2)
top.mark(boundaries, 1)
bottom.mark(boundaries, 1)
#right.mark(boundaries, 1)
side1.mark(boundaries, 1)
side2.mark(boundaries, 1)
return mesh, boundaries, domains
# class u0(Expression):
# def __init__(self):
# def eval_cell(self, values, x, ufc_cell):
# values[0] = 1.0
# values[1] = 0
def ExactSolution(params, B0, delta, x_on, x_off):
Re = 1./params[2]
Ha = sqrt(params[0]/(params[1]*params[2]))
G = 10.
x = sy.Symbol('x[0]')
y = sy.Symbol('x[1]')
z = sy.Symbol('x[2]')
b = sy.diff(y,x)
d = (B0/2)*(sy.tanh((x-x_on)/delta)-sy.tanh((x-x_off)/delta))
e = sy.diff(y,x)
p = sy.diff(y,x)
u = sy.diff(x, x)
v = sy.diff(x, y)
w = sy.diff(x, y)
r = sy.diff(x, y)
u0 = Expression((myCCode(u), myCCode(v), myCCode(w)))
p0 = Expression(myCCode(p))
b0 = Expression((myCCode(b), myCCode(d), myCCode(e)))
r0 = Expression(myCCode(r))
Print3D(u,v,w,p,"NS")
Print3D(b,d,e,r,"M")
# uu = y*x*sy.exp(x+y)
# u = sy.diff(uu, y)
#
# v = -sy.diff(uu, x)
# p = sy.sin(x)*sy.exp(y)
# bb = x*y*sy.cos(x)
# b = sy.diff(bb, y)
# d = -sy.diff(bb, x
# )
# r = x*sy.sin(2*sy.pi*y)*sy.sin(2*sy.pi*x)
# r = sy.diff(x, y)
# b = y
# d = sy.diff(x, y)
# r = sy.diff(y, y)
J11 = p - params[2]*sy.diff(u, x)
J12 = - params[2]*sy.diff(u, y)
J21 = - params[2]*sy.diff(v, x)
J22 = p - params[2]*sy.diff(v, y)
L1 = sy.diff(u,x,x)+sy.diff(u,y,y) + sy.diff(u,z,z)
L2 = sy.diff(v,x,x)+sy.diff(v,y,y) + sy.diff(v,z,z)
L3 = sy.diff(w,x,x)+sy.diff(w,y,y) + sy.diff(w,z,z)
A1 = u*sy.diff(u,x)+v*sy.diff(u,y)+w*sy.diff(u,z)
A2 = u*sy.diff(v,x)+v*sy.diff(v,y)+w*sy.diff(v,z)
A3 = u*sy.diff(w,x)+v*sy.diff(w,y)+w*sy.diff(w,z)
P1 = sy.diff(p, x)
P2 = sy.diff(p, y)
P3 = sy.diff(p, z)
C1 = sy.diff(d,x,y) - sy.diff(b,y,y) - sy.diff(b,z,z) +sy.diff(e,x,z)
C2 = sy.diff(e,y,z) - sy.diff(d,z,z) - sy.diff(d,x,x) +sy.diff(b,x,y)
C3 = sy.diff(b,x,z) - sy.diff(e,x,x) - sy.diff(e,y,y) +sy.diff(d,y,z)
R1 = sy.diff(r,x)
R2 = sy.diff(r,y)
R3 = sy.diff(r,z)
f = u*e-d*w
g = b*w-u*e
h = u*d-v*d
NS1 = sy.diff(h,y)-sy.diff(g,z)
NS2 = sy.diff(f,z)-sy.diff(h,x)
NS3 = sy.diff(g,x)-sy.diff(f,y)
m = sy.diff(e,y)-sy.diff(d,z)
n = sy.diff(b,z)-sy.diff(e,x)
p = sy.diff(d,x)-sy.diff(b,y)
M1 = n*e - d*p
M2 = b*p - m*e
M3 = m*d - n*b
Print3D(-params[2]*L1+A1+P1-params[0]*NS1,-params[2]*L2+A2+P2-params[0]*NS2,-params[2]*L3+A3+P3-params[0]*NS3, p,"NS")
Print3D(params[0]*params[1]*C1+R1-params[0]*M1,params[0]*params[1]*C2+R2-params[0]*M2,params[0]*params[1]*C3+R3-params[0]*M3,r,"M")
Laplacian = Expression((myCCode(L1), myCCode(L2), myCCode(L3)))
Advection = Expression((myCCode(A1), myCCode(A2), myCCode(A3)))
gradPres = Expression((myCCode(P1), myCCode(P2), myCCode(P3)))
NScouple = Expression((myCCode(NS1), myCCode(NS2), myCCode(NS3)))
CurlCurl = Expression((myCCode(C1), myCCode(C2), myCCode(C3)))
gradLagr = Expression((myCCode(R1), myCCode(R2), myCCode(R3)))
Mcouple = Expression((myCCode(M1), myCCode(M2), myCCode(M3)))
# pN = as_matrix(((Expression(myCCode(J11)), Expression(myCCode(J12))), (Expression(myCCode(J21)), Expression(myCCode(J22)))))
return u0, p0, b0, r0, Laplacian, Advection, gradPres, NScouple, CurlCurl, gradLagr, Mcouple
# Sets up the initial guess for the MHD problem
def Stokes(V, Q, F, u0, params, boundaries, domains):
parameters['reorder_dofs_serial'] = False
W = V*Q
IS = MO.IndexSet(W)
mesh = W.mesh()
(u, p) = TrialFunctions(W)
(v, q) = TestFunctions(W)
n = FacetNormal(W.mesh())
a11 = params[2]*inner(grad(v), grad(u))*dx('everywhere')
a12 = -div(v)*p*dx('everywhere')
a21 = -div(u)*q*dx('everywhere')
a = a11+a12+a21
L = inner(v, F)*dx('everywhere') #+ inner(gradu0,v)*ds(2)
def boundary(x, on_boundary):
return on_boundary
bcu1 = DirichletBC(W.sub(0), Expression(("0.0","0.0", "0.0")), boundaries, 1)
bcu2 = DirichletBC(W.sub(0), u0, boundaries, 2)
bcu = [bcu1, bcu2]
A, b = assemble_system(a, L, bcu)
A, b = CP.Assemble(A, b)
pp = params[2]*inner(grad(v), grad(u))*dx + (1./params[2])*p*q*dx
P, Pb = assemble_system(pp, L, bcu)
P, Pb = CP.Assemble(P, Pb)
# print b.array
# sss
u = b.duplicate()
# ksp = PETSc.KSP()
# ksp.create(comm=PETSc.COMM_WORLD)
# pc = ksp.getPC()
# ksp.setType('preonly')
# pc.setType('lu')
# OptDB = PETSc.Options()
# # if __version__ != '1.6.0':
# OptDB['pc_factor_mat_solver_package'] = "umfpack"
# OptDB['pc_factor_mat_ordering_type'] = "rcm"
# ksp.setFromOptions()
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-8)
ksp.max_it = 200
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
ksp.setType('minres')
pc.setPythonContext(StokesPrecond.Approx(W, 1))
ksp.setOperators(A,P)
# print b.array
# bbb
scale = b.norm()
b = b/scale
# ksp.setOperators(A,A)
del A
start_time = time.time()
ksp.solve(b,u)
print ("{:40}").format("Stokes solve, time: "), " ==> ",("{:4f}").format(time.time() - start_time),("{:9}").format(" Its: "), ("{:4}").format(ksp.its), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
# Mits +=dodim
u = u*scale
# print u.array
# ss
u_k = Function(V)
p_k = Function(Q)
u_k.vector()[:] = u.getSubVector(IS[0]).array
p_k.vector()[:] = u.getSubVector(IS[1]).array
ones = Function(Q)
ones.vector()[:]=(0*ones.vector().array()+1)
p_k.vector()[:] += -assemble(p_k*dx('everywhere'))/assemble(ones*dx('everywhere'))
return u_k, p_k
def Maxwell(V, Q, F, b0, params, HiptmairMatrices, Hiptmairtol):
parameters['reorder_dofs_serial'] = False
W = V*Q
IS = MO.IndexSet(W)
(b, r) = TrialFunctions(W)
(c, s) = TestFunctions(W)
a11 = params[1]*params[2]*inner(curl(b), curl(c))*dx('everywhere')
a21 = inner(b,grad(s))*dx('everywhere')
a12 = inner(c,grad(r))*dx('everywhere')
L = inner(c, F)*dx('everywhere')
a = a11+a12+a21
def boundary(x, on_boundary):
return on_boundary
bcb = DirichletBC(W.sub(0), b0, boundary)
bcr = DirichletBC(W.sub(1), Expression(("0.0")), boundary)
bc = [bcb, bcr]
A, b = assemble_system(a, L, bc)
A, b = CP.Assemble(A, b)
u = b.duplicate()
# ksp = PETSc.KSP()
# ksp.create(comm=PETSc.COMM_WORLD)
# pc = ksp.getPC()
# ksp.setType('preonly')
# pc.setType('lu')
# OptDB = PETSc.Options()
# # if __version__ != '1.6.0':
# OptDB['pc_factor_mat_solver_package'] = "umfpack"
# OptDB['pc_factor_mat_ordering_type'] = "rcm"
# ksp.setFromOptions()
print b.array
ksp = PETSc.KSP().create()
ksp.setTolerances(1e-8)
ksp.max_it = 200
pc = ksp.getPC()
pc.setType(PETSc.PC.Type.PYTHON)
ksp.setType('minres')
pc.setPythonContext(MP.Hiptmair(W, HiptmairMatrices[3], HiptmairMatrices[4], HiptmairMatrices[2], HiptmairMatrices[0], HiptmairMatrices[1], HiptmairMatrices[6],Hiptmairtol))
scale = b.norm()
b = b/scale
ksp.setOperators(A,A)
del A
start_time = time.time()
ksp.solve(b,u)
print ("{:40}").format("Maxwell solve, time: "), " ==> ",("{:4f}").format(time.time() - start_time),("{:9}").format(" Its: "), ("{:4}").format(ksp.its), ("{:9}").format(" time: "), ("{:4}").format(time.strftime('%X %x %Z')[0:5])
u = u*scale
b_k = Function(V)
r_k = Function(Q)
b_k.vector()[:] = u.getSubVector(IS[0]).array
r_k.vector()[:] = u.getSubVector(IS[1]).array
return b_k, r_k
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import pandas as pd
import numpy as np
import os
import shutil
import json
import uuid
import tempfile
from sklearn.linear_model import LinearRegression, LogisticRegression, LogisticRegressionCV
from sklearn.preprocessing import Binarizer
from mleap.sklearn.base import LinearRegression
from mleap.sklearn.logistic import LogisticRegression, LogisticRegressionCV
from mleap.sklearn.preprocessing.data import FeatureExtractor, Binarizer
def to_standard_normal_quartile(rand_num):
"""Retrieve the quartile of a data point sampled from the standard normal distribution
Useful for assigning multi-class labels to random data during tests
Such tests should probably use sklearn.preprocessing.KBinsDiscretizer instead
But they can't since scikit-learn is pinned < 0.20.0
https://github.com/combust/mleap/pull/431
"""
if rand_num < -0.67448:
return 0
elif rand_num < 0:
return 1
elif rand_num < 0.67448:
return 2
else:
return 3
class TransformerTests(unittest.TestCase):
def setUp(self):
self.df = pd.DataFrame(np.random.randn(100, 5), columns=['a', 'b', 'c', 'd', 'e'])
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_linear_regression_serializer(self):
linear_regression = LinearRegression(fit_intercept=True, normalize=False)
linear_regression.mlinit(input_features='a',
prediction_column='e')
linear_regression.fit(self.df[['a']], self.df[['e']])
linear_regression.serialize_to_bundle(self.tmp_dir, linear_regression.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, linear_regression.name)) as json_data:
model = json.load(json_data)
self.assertEqual(model['op'], 'linear_regression')
self.assertTrue(model['attributes']['intercept']['double'] is not None)
def test_linear_regression_deserializer(self):
linear_regression = LinearRegression(fit_intercept=True, normalize=False)
linear_regression.mlinit(input_features='a',
prediction_column='e')
linear_regression.fit(self.df[['a']], self.df[['e']])
linear_regression.serialize_to_bundle(self.tmp_dir, linear_regression.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, linear_regression.name)) as json_data:
model = json.load(json_data)
# Now deserialize it back
node_name = "{}.node".format(linear_regression.name)
linear_regression_tf = LinearRegression()
linear_regression_tf = linear_regression_tf.deserialize_from_bundle(self.tmp_dir, node_name)
res_a = linear_regression.predict(self.df[['a']])
res_b = linear_regression_tf.predict(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
def test_logistic_regression_serializer(self):
logistic_regression = LogisticRegression(fit_intercept=True)
logistic_regression.mlinit(input_features='a',
prediction_column='e_binary')
extract_features = ['e']
feature_extractor = FeatureExtractor(input_scalars=['e'],
output_vector='extracted_e_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0.0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='e_binary')
Xres = binarizer.fit_transform(self.df[['a']])
logistic_regression.fit(self.df[['a']], Xres)
logistic_regression.serialize_to_bundle(self.tmp_dir, logistic_regression.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, logistic_regression.name)) as json_data:
model = json.load(json_data)
self.assertEqual(model['op'], 'logistic_regression')
self.assertTrue(model['attributes']['intercept']['double'] is not None)
def test_logistic_regression_deserializer(self):
logistic_regression = LogisticRegression(fit_intercept=True)
logistic_regression.mlinit(input_features='a',
prediction_column='e_binary')
extract_features = ['e']
feature_extractor = FeatureExtractor(input_scalars=['e'],
output_vector='extracted_e_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0.0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='e_binary')
Xres = binarizer.fit_transform(self.df[['a']])
logistic_regression.fit(self.df[['a']], Xres)
logistic_regression.serialize_to_bundle(self.tmp_dir, logistic_regression.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, logistic_regression.name)) as json_data:
model = json.load(json_data)
# Now deserialize it back
node_name = "{}.node".format(logistic_regression.name)
logistic_regression_tf = LogisticRegression()
logistic_regression_tf = logistic_regression_tf.deserialize_from_bundle(self.tmp_dir, node_name)
res_a = logistic_regression.predict(self.df[['a']])
res_b = logistic_regression_tf.predict(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
def test_multinomial_logistic_regression_serializer(self):
logistic_regression = LogisticRegression(fit_intercept=True)
logistic_regression.mlinit(
input_features='a',
prediction_column='prediction'
)
X = self.df[['a']]
y = np.array([to_standard_normal_quartile(elem) for elem in X.to_numpy()])
logistic_regression.fit(X, y)
logistic_regression.serialize_to_bundle(self.tmp_dir, logistic_regression.name)
with open("{}/{}.node/model.json".format(self.tmp_dir, logistic_regression.name)) as json_data:
model = json.load(json_data)
self.assertEqual(model['op'], 'logistic_regression')
self.assertEqual(model['attributes']['num_classes']['long'], 4)
# assert 4x1 coefficient matrix
self.assertEqual(len(model['attributes']['coefficient_matrix']['double']), 4)
self.assertEqual(len(model['attributes']['coefficient_matrix']['shape']['dimensions']), 2)
self.assertEqual(model['attributes']['coefficient_matrix']['shape']['dimensions'][0]['size'], 4)
self.assertEqual(model['attributes']['coefficient_matrix']['shape']['dimensions'][1]['size'], 1)
# assert 4x0 intercept vector
self.assertEqual(len(model['attributes']['intercept_vector']['double']), 4)
self.assertEqual(len(model['attributes']['intercept_vector']['shape']['dimensions']), 1)
self.assertEqual(model['attributes']['intercept_vector']['shape']['dimensions'][0]['size'], 4)
def test_multinomial_logistic_regression_deserializer(self):
logistic_regression = LogisticRegression(fit_intercept=True)
logistic_regression.mlinit(
input_features='a',
prediction_column='prediction'
)
X = self.df[['a']]
y = np.array([to_standard_normal_quartile(elem) for elem in X.to_numpy()])
logistic_regression.fit(X, y)
logistic_regression.serialize_to_bundle(self.tmp_dir, logistic_regression.name)
node_name = "{}.node".format(logistic_regression.name)
logistic_regression_tf = LogisticRegression()
logistic_regression_tf = logistic_regression_tf.deserialize_from_bundle(self.tmp_dir, node_name)
expected = logistic_regression.predict(self.df[['a']])
actual = logistic_regression_tf.predict(self.df[['a']])
np.testing.assert_array_equal(expected, actual)
def test_logistic_regression_cv_serializer(self):
logistic_regression = LogisticRegressionCV(fit_intercept=True)
logistic_regression.mlinit(input_features='a',
prediction_column='e_binary')
extract_features = ['e']
feature_extractor = FeatureExtractor(input_scalars=['e'],
output_vector='extracted_e_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0.0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='e_binary')
Xres = binarizer.fit_transform(self.df[['a']])
logistic_regression.fit(self.df[['a']], Xres)
logistic_regression.serialize_to_bundle(self.tmp_dir, logistic_regression.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, logistic_regression.name)) as json_data:
model = json.load(json_data)
self.assertEqual(model['op'], 'logistic_regression')
self.assertTrue(model['attributes']['intercept']['double'] is not None)
def test_logistic_regression_cv_deserializer(self):
logistic_regression = LogisticRegressionCV(fit_intercept=True)
logistic_regression.mlinit(input_features='a',
prediction_column='e_binary')
extract_features = ['e']
feature_extractor = FeatureExtractor(input_scalars=['e'],
output_vector='extracted_e_output',
output_vector_items=["{}_out".format(x) for x in extract_features])
binarizer = Binarizer(threshold=0.0)
binarizer.mlinit(prior_tf=feature_extractor,
output_features='e_binary')
Xres = binarizer.fit_transform(self.df[['a']])
logistic_regression.fit(self.df[['a']], Xres)
logistic_regression.serialize_to_bundle(self.tmp_dir, logistic_regression.name)
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, logistic_regression.name)) as json_data:
model = json.load(json_data)
# Now deserialize it back
node_name = "{}.node".format(logistic_regression.name)
logistic_regression_tf = LogisticRegressionCV()
logistic_regression_tf = logistic_regression_tf.deserialize_from_bundle(self.tmp_dir, node_name)
res_a = logistic_regression.predict(self.df[['a']])
res_b = logistic_regression_tf.predict(self.df[['a']])
self.assertEqual(res_a[0], res_b[0])
self.assertEqual(res_a[1], res_b[1])
self.assertEqual(res_a[2], res_b[2])
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/deletedSites')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_location_request(
location: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/locations/{location}/deletedSites')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_deleted_web_app_by_location_request(
location: str,
deleted_site_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Web/locations/{location}/deletedSites/{deletedSiteId}')
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"deletedSiteId": _SERIALIZER.url("deleted_site_id", deleted_site_id, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DeletedWebAppsOperations(object):
"""DeletedWebAppsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2021_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DeletedWebAppCollection"]:
"""Get all deleted apps for a subscription.
Description for Get all deleted apps for a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedWebAppCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DeletedWebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedWebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DeletedWebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/deletedSites'} # type: ignore
@distributed_trace
def list_by_location(
self,
location: str,
**kwargs: Any
) -> Iterable["_models.DeletedWebAppCollection"]:
"""Get all deleted apps for a subscription at location.
Description for Get all deleted apps for a subscription at location.
:param location:
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedWebAppCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_01_01.models.DeletedWebAppCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedWebAppCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DeletedWebAppCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/locations/{location}/deletedSites'} # type: ignore
@distributed_trace
def get_deleted_web_app_by_location(
self,
location: str,
deleted_site_id: str,
**kwargs: Any
) -> "_models.DeletedSite":
"""Get deleted app for a subscription at location.
Description for Get deleted app for a subscription at location.
:param location:
:type location: str
:param deleted_site_id: The numeric ID of the deleted app, e.g. 12345.
:type deleted_site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSite, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_01_01.models.DeletedSite
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSite"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_deleted_web_app_by_location_request(
location=location,
deleted_site_id=deleted_site_id,
subscription_id=self._config.subscription_id,
template_url=self.get_deleted_web_app_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DeletedSite', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_web_app_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/locations/{location}/deletedSites/{deletedSiteId}'} # type: ignore
| |
"""
Thomson Reuters Data Citation Index service for registry objects.
"""
"""
The TR DCI service relies on a table "registry_object_citations".
The service operates as follows:
1. Get the registry objects to look up from the database.
2. Look up the registry objects in TR DCI, updating each one's
citation data in the database.
Data structures used throughout this module:
(roc = "registry object citation")
roc_list: list
element: dict: a row from the registry_object_citations table.
key: one of the column names in the registry_object_citations table.
special key: "citation_result" is used internally to store
the values returned from the TR DCI service.
value: the value of that column
query db to get ROs
select from registry_object_citations
where service_provider is TRDCI
and last_checked is more than update_frequency days ago
optionally: where data_source_id = ...
in groups of batch_size:
construct query to TRDCI
for each ro, unpack query_terms to determine what to send to TR
send query
receive result
unpack result, update db
Preliminary work for this service implementation was done by Melanie Barlow.
"""
import sys
import json
import urllib.request
import xml.dom.minidom
import xml.sax.saxutils
import pymysql
import pymysql.constants.CLIENT
# The base module contains the BaseService class.
from . import base
class TRDCIService(base.BaseService):
"""Thomson Reuters Data Citation Index service for registry objects.
"""
# service_url: "https://gateway.webofknowledge.com/gateway/Gateway.cgi"
# username: "username"
# password: "password"
# service_id: "DRCI"
# update_frequency: 7
# batch_size: 50
required_options = {'service_url', 'username', 'password',
'service_id', 'update_frequency', 'batch_size'}
# The value of the service_provider column in the database
SERVICE_PROVIDER = "TRDCI"
# The value of the key used to store citation results in
# each element of roc_list.
CITATION_RESULT_KEY = 'citation_result'
def do_update_citation_data(self):
"""Do the updating of citation data.
"""
roc_list = []
self._get_ROs_for_checking(roc_list, self._params['client_id'])
self._fetch_citations(roc_list)
self._update_citations_in_database(roc_list)
if 'portal_database_name' in self._params:
self._update_citation_counts_in_portal_database(roc_list)
# Columns in the registry_object_citations table.
# RO_CITATIONS_COLUMNS = [
# 'id',
# 'registry_object_id',
# 'data_source_id',
# 'service_provider',
# 'query_terms',
# 'citation_data',
# 'last_checked']
# Columns needed to get the links for querying.
# registry_object_id and slug are needed for updating the portal database.
RO_CITATIONS_CHECKING_COLUMNS = [
'id',
'registry_object_id',
'slug',
'query_terms']
def _get_ROs_for_checking(self, roc_list, data_source_id=None):
"""Get all ROs to be looked up.
The roc_list array is updated in situ.
Arguments:
roc_list -- The array to be populated with RO data
from the database.
data_source_id -- A data_source_id to use for searching the database,
or None, if the ROs of all data sources are to be returned.
"""
cur = self._conn.cursor()
# First, compute the time to be used to set the last_checked
# column of the records that get updated. For the purposes of
# this calculation, select the current time, minus one
# hour. This is a simple workaround to allow this program to
# be run regularly at the same time each update_frequency
# days. If, instead, the update of the records were done using
# an UPDATE statement that used e.g., NOW(), for the
# last_checked value, then the next time this module is run
# (i.e., in update_frequency days), those updated records
# would not yet have "expired".
query = 'SELECT SUBDATE(NOW(),INTERVAL 1 HOUR);'
cur.execute(query)
for r in cur:
self._timestamp = r
# From the database:
# select the columns specified in RO_CITATIONS_CHECKING_COLUMNS
# filter on:
# service_provider is the value of SERVICE_PROVIDER
# last_checked is at least last_checked days ago
# if data_source_id was specified, then only select
# records from that data source.
# Put backquotes around the column names.
# Special treatment for registry_object_id, as it appears
# in both of the two tables being joined.
columns_for_query = [
("`" + c + "`").replace(
'registry_object_id',
'roc`.`registry_object_id')
for c in self.RO_CITATIONS_CHECKING_COLUMNS]
columns_for_query = columns_for_query
query = ("SELECT " + ", ".join(columns_for_query) +
" FROM registry_object_citations roc, registry_objects ro" +
" WHERE roc.registry_object_id = ro.registry_object_id" +
" AND `service_provider` = '" +
self.SERVICE_PROVIDER +
"' AND `last_checked` < NOW() - INTERVAL " +
str(self._params['update_frequency']) + " DAY")
if data_source_id is not None:
query += " AND ro.`data_source_id`=" + str(data_source_id)
query += ";"
if self._debug:
print("DEBUG: _get_ROs_for_checking query:", query,
file=sys.stderr)
cur.execute(query)
for r in cur:
# Turn the result tuple into a dict with the column names as keys
roc = {k: v for k, v in
zip(self.RO_CITATIONS_CHECKING_COLUMNS, r)}
roc_list.append(roc)
cur.close()
if self._debug:
print("DEBUG: _get_ROs_for_checking roc_list:",
roc_list, file=sys.stderr)
def _fetch_citations(self, roc_list):
"""Fetch citation data from TR DCI.
Given the registry objects to be looked up, query the TR DCI
service. Update roc_list with the returned citation data.
Arguments:
roc_list -- The array containing details of the records
to be looked up in the TR DCI service.
"""
roc_index = 0
roc_length = len(roc_list)
while roc_index < roc_length:
self._fetch_citations_for_one_batch(
roc_list, roc_index)
roc_index += int(self._params['batch_size'])
if self._debug:
print("DEBUG: _fetch_citations_and_update: " +
"after fetching, roc_list:", roc_list,
file=sys.stderr)
# Template for requests to be sent to TR DCI
TRDCI_REQUEST_TEMPLATE = """
<request xmlns='http://www.isinet.com/xrpc42' src='app.id=ANDS'>
<fn name='LinksAMR.retrieve'>
<list>
<!-- WHO'S REQUESTING -->
<map>
<val name='username'>{0}</val>
<val name='password'>{1}</val>
</map>
<!-- WHAT'S REQUESTED -->
<map>
<list name='{2}'>
<val>timesCitedAllDB</val>
<val>uid</val>
<val>doi</val>
<val>sourceURL</val>
<val>citingArticlesAllDBURL</val>
<val>repositoryLinkURL</val>
</list>
</map>
<!-- LOOKUP DATA -->
{3}
</list>
</fn>
</request>
"""
def _fetch_citations_for_one_batch(self, roc_list, roc_index):
"""Fetch citation data from TR DCI and update the database.
Look up one batch of registry objects in the TR DCI
service. The batch to be looked up starts at index
roc_index and has size batch_size.
roc_list is updated with the results in situ.
Arguments:
roc_list -- The array containing details of the batch of records
to be looked up in the TR DCI service.
roc_index -- The lower index of the batch of records in roc_list.
"""
if self._debug:
print("DEBUG: _fetch_citations_for_one_batch: " +
"roc_index:", roc_index,
file=sys.stderr)
citation_requests = self._create_citation_requests(roc_list,
roc_index)
# Username, password, and service_id are escaped before
# inserting into the template. The search terms are escaped
# already because of the use of the xml.dom.minidom module.
request_XML = self.TRDCI_REQUEST_TEMPLATE.format(
xml.sax.saxutils.escape(self._params['username']),
xml.sax.saxutils.escape(self._params['password']),
xml.sax.saxutils.escape(self._params['service_id']),
citation_requests.toprettyxml()
)
if self._debug:
print("DEBUG: _fetch_citations_for_one_batch: " +
"request_XML:", request_XML,
file=sys.stderr)
# Create request connection object
request_conn = urllib.request.Request(self._params['service_url'])
# Explicitly add a Content-Type header to notify that
# this is an XML request; without this,
# the Python library adds a Content-Type header with
# "application/x-www-form-urlencoded" instead.
request_conn.add_header('Content-Type',
'application/xml;encoding=utf-8')
response = urllib.request.urlopen(request_conn,
request_XML.encode('utf-8'))
result = response.read()
if self._debug:
print("DEBUG: _fetch_citations_for_one_batch: " +
"result:", result,
file=sys.stderr)
# Now update roc_list with the results.
result_DOM = xml.dom.minidom.parseString(result)
for map_element in result_DOM.getElementsByTagName('map'):
# Only worry about map elements that have a name attribute
# beginning with "cite_".
if not (map_element.hasAttribute('name') and
map_element.getAttribute('name').startswith(
self.CITE_BEGINNING)):
continue
if self._debug:
print("DEBUG: _fetch_citations_for_one_batch: " +
"found map element with name element:",
map_element.getAttribute('name'),
file=sys.stderr)
cite_index = int(map_element.getAttribute('name')[
self.CITE_BEGINNING_LENGTH:])
map_dict = dict()
for val_element in map_element.getElementsByTagName('val'):
name_attr = val_element.getAttribute('name')
text_value = self._get_text_of_element(val_element.childNodes)
map_dict[name_attr] = text_value
roc_list[cite_index][self.CITATION_RESULT_KEY] = map_dict
pass
def _get_text_of_element(self, nodelist):
"""Extract the combined text values from nodelist.
This is based on the getText() function given as part of an
example in the xml.dom.minidom documentation in the Python
Library Reference.
Note that TR return both text and CDATA nodes, so two node
types must be tested for.
Arguments:
nodelist -- The array of nodes containing text to be extracted.
Return value:
The text contained in nodelist.
"""
rc = []
for node in nodelist:
if (node.nodeType == node.TEXT_NODE or
node.nodeType == node.CDATA_SECTION_NODE):
rc.append(node.data)
return ''.join(rc)
# Format of the attribute values to use on map elements
# in the request.
CITE_TEMPLATE = 'cite_{0}'
# Like CITE_TEMPLATE, but used to match attribute values
# returned by the TR DCI service.
CITE_BEGINNING = 'cite_'
# Like CITE_TEMPLATE, but used to match attribute values
# returned by the TR DCI service.
CITE_BEGINNING_LENGTH = len(CITE_BEGINNING)
def _create_citation_requests(self, roc_list, roc_index):
"""Convert batch of records into XML needed for the service.
The use of the xml.dom.minidom module ensures that the
necessary escaping (i.e., of &, <, >) is done before
insertion into the query sent to the TR DCI service.
Code based on work done by Melanie Barlow.
Arguments:
roc_list -- The array containing details of the batch of records
to be looked up in the TR DCI service.
roc_index -- The lower index of the batch of records in roc_list.
Return value:
The XML to be included in the query sent to TR DCI.
"""
DOM_implementation = xml.dom.minidom.getDOMImplementation()
map_document = DOM_implementation.createDocument(None, 'map', None)
map_root = map_document.documentElement
cite_upper_bound = min(roc_index + int(self._params['batch_size']),
len(roc_list))
for cite_counter in range(roc_index, cite_upper_bound):
article = roc_list[cite_counter]
map_node = map_document.createElement('map')
map_attribute = map_document.createAttribute('name')
map_attribute.value = self.CITE_TEMPLATE.format(cite_counter)
cite_counter += 1
map_node.setAttributeNode(map_attribute)
map_root.appendChild(map_node)
if self._debug:
print("DEBUG: _create_citation_rquests: " +
"article['query_terms']:", article['query_terms'],
file=sys.stderr)
for query_key, query_value in json.loads(
article['query_terms']).items():
subnode = map_document.createElement(
'list' if query_key == 'authors' else 'val')
subattribute = map_document.createAttribute('name')
subattribute.value = query_key
subnode.setAttributeNode(subattribute)
map_node.appendChild(subnode)
if query_key == 'authors':
itemList = query_value.split('|')
for item in itemList:
print(item)
itemNode = map_document.createElement('val')
itemText = map_document.createTextNode(item)
itemNode.appendChild(itemText)
subnode.appendChild(itemNode)
else:
subText = map_document.createTextNode(
query_value)
subnode.appendChild(subText)
return map_root
# Database query template for updating the registry's
# registry_object_citations table.
ROC_QUERY_TEMPLATE = ("UPDATE registry_object_citations SET " +
" `citation_data` = %s" +
", `status` = %s" +
", `last_checked` = %s" +
" WHERE `id` = %s;")
def _update_citations_in_database(self, roc_list):
"""Update the citation_data column in the database.
Update the citation_data column in the database based
on the citation data received from TR DCI.
Arguments:
roc_list -- The array containing the batch of records
to be updated, with the citation results returned from the
TR DCI service.
"""
cur = self._conn.cursor()
for r in roc_list:
# if self._debug:
# print("DEBUG: _update_citations_in_database: " +
# "query:", query,
# file=sys.stderr)
if ('message' in r[self.CITATION_RESULT_KEY] and
r[self.CITATION_RESULT_KEY]['message'] ==
'No Result Found'):
status = 'NOT_FOUND'
citation_data = ''
else:
status = 'SUCCESS'
citation_data = json.dumps(r[self.CITATION_RESULT_KEY])
cur.execute(self.ROC_QUERY_TEMPLATE, [citation_data, status,
self._timestamp, r['id']])
cur.close()
self._conn.commit()
# Database query template for updating the portal record_stats table.
PORTAL_UPDATE_TEMPLATE = ("UPDATE record_stats SET " +
" `cited` = %s" +
" WHERE `ro_id` = %s;")
# Database query template for inserting into the portal
# record_stats table.
PORTAL_INSERT_TEMPLATE = ("INSERT INTO record_stats " +
"(ro_id,ro_slug,viewed,cited,accessed) " +
"VALUES (" +
"%s,%s,0,%s,0);")
def _update_citation_counts_in_portal_database(self, roc_list):
"""Update the record_stats table of the portal database.
For those records for which we received citation counts
from TR DCI, update the corresponding entry in the
record_stats column in the portal database.
If there is not yet a record for a registry object in the
record_stats table, insert one. Note well, the only other
place in the code that rows are added to record_stats is
in the stat() function in
applications/portal/registry_object/models/_ro.php.
Changes there and here must be synchronized!
Arguments:
roc_list -- The array containing the batch of records
to be updated, with the citation results returned from the
TR DCI service.
"""
portal_database_connection = self.open_portal_db_connection()
try:
cur = portal_database_connection.cursor()
for r in roc_list:
if ('timesCitedAllDB' in r[self.CITATION_RESULT_KEY]):
if self._debug:
print("DEBUG:",
"_update_citation_counts_in_portal_database:",
"r['registry_object_id']:",
r['registry_object_id'],
"timesCitedAllDB:",
r[self.CITATION_RESULT_KEY]
['timesCitedAllDB'],
file=sys.stderr)
cur.execute(self.PORTAL_UPDATE_TEMPLATE,
[r[self.CITATION_RESULT_KEY]
['timesCitedAllDB'],
r['registry_object_id']])
# This use of cur.rowcount relies on the FOUND_ROWS
# setting on the connection. (See the comment in
# open_portal_db_connection() below.)
if cur.rowcount == 0:
# The row is missing, so insert it
cur.execute(self.PORTAL_INSERT_TEMPLATE,
[r['registry_object_id'],
r['slug'],
r[self.CITATION_RESULT_KEY]
['timesCitedAllDB']])
cur.close()
portal_database_connection.commit()
finally:
portal_database_connection.close()
pass
def open_portal_db_connection(self):
"""Establish a connection with the database.
Only pymysql is supported as the database module.
Future work for this function:
* Support other database modules (PostgreSQL, etc.)
* When we do that, only load the one database Python module required,
not all.
Arguments:
params -- The dictionary of parameters, which must include
all those needed to establish the connection.
"""
try:
# Note the use of the FOUND_ROWS setting. This makes
# UPDATE statements return the number of rows matched
# by the WHERE clause, rather than the number of rows
# with changed values. (E.g., updating the value "1" to "1"
# does not "normally" count as an update. With this setting,
# it does.)
return pymysql.connect(
host=self._params['database_host'],
user=self._params['database_user'],
passwd=self._params['database_password'],
db=self._params['portal_database_name'],
client_flag=pymysql.constants.CLIENT.FOUND_ROWS)
except Exception as e:
print("Database Exception:", e)
sys.exit(1)
# Logging functions
def _insert_message_log(self, owner_id, message, status):
"""Insert a log entry into the database's activity_log table.
The activity is specified as "TRDCI".
Arguments:
owner_id -- The owner of the RO. This value is used as the
"data_source_id" column of the entry.
message -- The value to use for the "message" column of the entry.
status -- The value to use for the "status" column of the entry.
"""
cursor = self._conn.cursor()
sql = ("INSERT INTO activity_log "
"(`data_source_id`, `message`, `activity`, `result`) "
"values (%s, %s, %s, %s);")
cursor.execute(sql, (owner_id, message, 'TRDCI', status))
cursor.close()
self._conn.commit()
if __name__ == "__main__":
print('This module can not be executed standalone.')
sys.exit(1)
| |
import re
import sys
import copy
import socket
from datetime import datetime
from decimal import Decimal
from collections import Mapping, Container
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring
_int_types = (int, long)
class SchemaError(ValueError):
"""
errors encountered in processing a schema (subclass of :class:`ValueError`)
"""
class ValidationError(ValueError):
"""
validation errors encountered during validation (subclass of
:class:`ValueError`)
"""
class FieldValidationError(ValidationError):
"""
validation error that refers to a specific field
Includes `fieldname` and `value` attributes.
"""
def __init__(self, message, fieldname, value):
super(FieldValidationError, self).__init__(message)
self.fieldname = fieldname
self.value = value
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except ValueError:
raise FieldValidationError(
"Value %(value)r of field '%(fieldname)s' is not in "
"'%(format_option)s' format" % locals(), fieldname, value)
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time',
'%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, _int_types + (float, Decimal)):
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' "
"is not a number" % locals(), fieldname,
value)
if not value > 0:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s' "
" is not a positive number" % locals(),
fieldname, value)
def validate_format_ip_address(validator, fieldname, value, format_option):
try:
socket.inet_aton(value)
# Make sure we expect "X.X.X.X" as socket.inet_aton() converts "1"
# to "0.0.0.1"
ip = len(value.split('.')) == 4
except:
ip = False
if not ip:
raise FieldValidationError("Value %(value)r of field '%(fieldname)s'"
"is not a ip-address" % locals(), fieldname,
value)
DEFAULT_FORMAT_VALIDATORS = {
'date-time': validate_format_date_time,
'date': validate_format_date,
'time': validate_format_time,
'utc-millisec': validate_format_utc_millisec,
'ip-address': validate_format_ip_address,
}
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
:param disallow_unknown_properties: defaults to False, set to True to
disallow properties not listed in the schema definition
:param apply_default_to_data: defaults to False, set to True to modify the
data in case the schema definition includes a "default" property
'''
def __init__(self, format_validators=None, required_by_default=True,
blank_by_default=False, disallow_unknown_properties=False,
apply_default_to_data=False):
if format_validators is None:
format_validators = DEFAULT_FORMAT_VALIDATORS.copy()
self._format_validators = format_validators
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
self.disallow_unknown_properties = disallow_unknown_properties
self.apply_default_to_data = apply_default_to_data
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in _int_types
def validate_type_number(self, val):
return type(val) in _int_types + (float, Decimal,)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping) or (hasattr(val, 'keys')
and hasattr(val, 'items'))
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, desc, value, fieldname, **params):
params['value'] = value
params['fieldname'] = fieldname
message = desc % params
raise FieldValidationError(message, fieldname, value)
def _validate_unknown_properties(self, schema, data, fieldname):
schema_properties = set(schema)
data_properties = set(data)
delta = data_properties - schema_properties
if delta:
unknowns = ''
for x in delta:
unknowns += '"%s", ' % x
unknowns = unknowns.rstrip(", ")
raise SchemaError('Unknown properties for field '
'"%(fieldname)s": %(unknowns)s' %
locals())
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
errorlist = []
for eachtype in fieldtype:
try:
self.validate_type(x, fieldname, eachtype, eachtype)
datavalid = True
break
except ValidationError as err:
errorlist.append(err)
if not datavalid:
self._error("Value %(value)r for field '%(fieldname)s' "
"doesn't match any of %(numsubtypes)d "
"subtypes in %(fieldtype)s; "
"errorlist = %(errorlist)r",
value, fieldname, numsubtypes=len(fieldtype),
fieldtype=fieldtype, errorlist=errorlist)
elif isinstance(fieldtype, dict):
try:
self.__validate(fieldname, x, fieldtype)
except ValueError as e:
raise e
else:
try:
type_checker = getattr(self, 'validate_type_%s' %
fieldtype)
except AttributeError:
raise SchemaError("Field type '%s' is not supported." %
fieldtype)
if not type_checker(value):
self._error("Value %(value)r for field '%(fieldname)s' "
"is not of type %(fieldtype)s",
value, fieldname, fieldtype=fieldtype)
def validate_properties(self, x, fieldname, schema, properties=None):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, dict):
if isinstance(properties, dict):
if self.disallow_unknown_properties:
self._validate_unknown_properties(properties, value,
fieldname)
for eachProp in properties:
self.__validate(eachProp, value,
properties.get(eachProp))
else:
raise SchemaError("Properties definition of field '%s' is "
"not an object" % fieldname)
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
if x.get(fieldname) is not None:
value = x.get(fieldname)
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if (not 'additionalItems' in schema and
len(items) != len(value)):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length "
"of schema list", value, fieldname)
else:
for itemIndex in range(len(items)):
try:
self.validate(value[itemIndex],
items[itemIndex])
except FieldValidationError as e:
raise type(e)("Failed to validate field '%s' "
"list schema: %s" %
(fieldname, e), fieldname,
e.value)
elif isinstance(items, dict):
for eachItem in value:
if (self.disallow_unknown_properties and
'properties' in items):
self._validate_unknown_properties(
items['properties'], eachItem, fieldname)
try:
self._validate(eachItem, items)
except FieldValidationError as e:
# a bit of a hack: replace reference to _data
# with 'list item' so error messages make sense
old_error = str(e).replace("field '_data'",
'list item')
raise type(e)("Failed to validate field '%s' list "
"schema: %s" %
(fieldname, old_error), fieldname,
e.value)
else:
raise SchemaError("Properties definition of field '%s' is "
"not a list or an object" % fieldname)
def validate_required(self, x, fieldname, schema, required):
'''
Validates that the given field is present if required is True
'''
# Make sure the field is present
if fieldname not in x and required:
self._error("Required field '%(fieldname)s' is missing",
None, fieldname)
def validate_blank(self, x, fieldname, schema, blank=False):
'''
Validates that the given field is not blank if blank=False
'''
value = x.get(fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error("Value %(value)r for field '%(fieldname)s' cannot be "
"blank'", value, fieldname)
def validate_patternProperties(self, x, fieldname, schema,
patternproperties=None):
if patternproperties is None:
patternproperties = {}
value_obj = x.get(fieldname, {})
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.validate(value, schema)
def validate_additionalItems(self, x, fieldname, schema,
additionalItems=False):
value = x.get(fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
self._error("Length of list %(value)r for field "
"'%(fieldname)s' is not equal to length of schema "
"list", value, fieldname)
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self._validate(remaining, {'items': additionalItems})
def validate_additionalProperties(self, x, fieldname, schema,
additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property
'''
# Shouldn't be validating additionalProperties on non-dicts
value = x.get(fieldname)
if not isinstance(value, dict):
return
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
value = x.get(fieldname)
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties")
if 'patternProperties' in schema:
patterns = schema["patternProperties"].keys()
else:
patterns = []
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if eachProperty not in properties and not \
any(re.match(p, eachProperty) for p in patterns):
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if (isinstance(additionalProperties, bool) and not
additionalProperties):
self._error("additional property '%(prop)s' "
"not defined by 'properties' or "
"'patternProperties' are not "
"allowed in field '%(fieldname)s'",
None, fieldname, prop=eachProperty)
self.__validate(eachProperty, value,
additionalProperties)
else:
raise SchemaError("additionalProperties schema definition for "
"field '%s' is not an object" % fieldname)
def validate_dependencies(self, x, fieldname, schema, dependencies=None):
if x.get(fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error("Field '%(dependency)s' is required by "
"field '%(fieldname)s'",
None, fieldname, dependency=dependency)
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error("Field '%(v)s' is required by field "
"'%(k)s'", None, fieldname, k=k, v=v)
else:
raise SchemaError("'dependencies' must be a string, "
"list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
exclusive = schema.get('exclusiveMinimum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value < minimum) or
(exclusive and value <= minimum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"less than minimum value: %(minimum)f",
value, fieldname, minimum=minimum)
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
if x.get(fieldname) is not None:
value = x.get(fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value > maximum) or
(exclusive and value >= maximum)):
self._error("Value %(value)r for field '%(fieldname)s' is "
"greater than maximum value: %(maximum)f",
value, fieldname, maximum=maximum)
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be less than or equal to %(length)d",
value, fieldname, length=length)
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length
'''
value = x.get(fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error("Length of value %(value)r for field '%(fieldname)s' "
"must be greater than or equal to %(length)d",
value, fieldname, length=length)
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, format_option=None):
'''
Validates the format of primitive data types
'''
value = x.get(fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
format_validator(self, fieldname, value, format_option)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = x.get(fieldname)
if isinstance(value, _str_type):
if not re.match(pattern, value):
self._error("Value %(value)r for field '%(fieldname)s' does "
"not match regular expression '%(pattern)s'",
value, fieldname, pattern=pattern)
def validate_uniqueItems(self, x, fieldname, schema, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = x.get(fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error(
"Value %(value)r for field '%(fieldname)s' is not unique",
value, fieldname)
else:
add(value)
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = x.get(fieldname)
if value is not None:
if not isinstance(options, Container):
raise SchemaError("Enumeration %r for field '%s' must be a "
"container", (options, fieldname))
if value not in options:
self._error("Value %(value)r for field '%(fieldname)s' is not "
"in the enumeration: %(options)r",
value, fieldname, options=options)
def validate_title(self, x, fieldname, schema, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '%s' must be a string" %
fieldname)
def validate_description(self, x, fieldname, schema, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '%s' must be a string"
% fieldname)
def validate_divisibleBy(self, x, fieldname, schema, divisibleBy=None):
value = x.get(fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'%r' <- divisibleBy can not be 0" % schema)
if value % divisibleBy != 0:
self._error("Value %(value)r field '%(fieldname)s' is not "
"divisible by '%(divisibleBy)s'.",
x.get(fieldname), fieldname, divisibleBy=divisibleBy)
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
try:
self.validate_type(x, fieldname, schema, disallow)
except ValidationError:
return
self._error("Value %(value)r of type %(disallow)s is disallowed for "
"field '%(fieldname)s'",
x.get(fieldname), fieldname, disallow=disallow)
def validate_default(self, x, fieldname, schema, default=None):
if self.apply_default_to_data and 'default' in schema:
try:
self.validate_type(
x={'_ds': schema['default']},
fieldname='_ds',
schema=schema,
fieldtype=schema['type'] if 'type' in schema else None
)
except FieldValidationError as exc:
raise SchemaError(exc)
if not fieldname in x:
x[fieldname] = schema['default']
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
'''
self._validate(data, schema)
def _validate(self, data, schema):
self.__validate("_data", {"_data": data}, schema)
def __validate(self, fieldname, data, schema):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError(
"Type for field '%s' must be 'dict', got: '%s'" %
(fieldname, type(schema).__name__))
newschema = copy.copy(schema)
if 'optional' in schema:
raise SchemaError('The "optional" attribute has been replaced'
' by "required"')
if 'requires' in schema:
raise SchemaError('The "requires" attribute has been replaced'
' by "dependencies"')
if 'required' not in schema:
newschema['required'] = self.required_by_default
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
ignored_keys = ['id', 'exclusiveMinimum', 'exclusiveMaximum']
for schemaprop in newschema:
if schemaprop.startswith('$') or schemaprop in ignored_keys:
continue
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if validator:
validator(data, fieldname, schema, newschema[schemaprop])
else:
raise SchemaError('Unknown attribute "%s"' % schemaprop)
return data
__all__ = ['SchemaValidator', 'FieldValidationError']
| |
# coding: utf-8
# # Extract text of Admit Notes
# #### Start with patient MRN and the Admission Date.
# #### This requires 4 sequential sql pulls:
# 1) Find the PatientEncounterID for the hospital admission.
# 2) Find the set of NoteIDs associated with the ZPatientID (PatientEncounterID missing from Notes.
# 3) Find set of NoteIDs associated with ZPatientID AND date of admission.
# 4) Find the text associated with the NoteIDs.
# After get notes, isolate Psychiatric Admission Note and Admission H&P
#
# In[1]:
import base64
import os
import sqlalchemy
import getpass
import sys
import datetime
import pandas as pd
import re
import numpy as np
from scipy import stats
userid = getpass.getuser()
print(userid)
pswd = getpass.getpass('Provide password to connect:')
connection_str ="mssql+pymssql://PARTNERS\\" + str(userid) + ":" + pswd + "@PHSEDW.partners.org"
engine = sqlalchemy.create_engine(connection_str)
#does not establish DBAPI connection UNTIL engine.connect called
# Input MRN, Admit Date
# In[4]:
MRNFile=pd.read_csv('EPIC_GetNotesEDW.csv')
MRNFile.head()
MRN = MRNFile['MRN']
#MRN.to_csv('MRNString.csv', index=False)
#MRNAll = open('MRNString.csv', 'r')
#mrnNote = MRNAll.read()
#mrnNote = mrnNote.splitlines()
#AdmitDate = MRNFile['DateAdmit']
AdmitDate = '2018-08-24'
#AdmitDate.to_csv('DateAdmit.csv', index=False)
#admitDateNote = open('DateAdmit.csv', 'r')
#dateNote = admitDateNote.read()
#dateNote = dateNote.splitlines()
#Don't need any of above
#only do few patients at time
lengthMRN = len(MRN)
lengthAdmitDate = len(AdmitDate)
#test first patient of 1,277 patients
#MRNCurrList = [number for number in range(0,1)]
#dateCurrList = [number for number in range(0,1)]
##After ran script and obtained all notes, 1,255 left (excluded few who were past admits)
MRNCurrList = [number for number in range(308,309)]
#dateCurrList = [number for number in range(0,lengthAdmitDate)]
print(MRN[MRNCurrList])
#print(AdmitDate[dateCurrList])
print(AdmitDate)
# ## Since can't split for loop across multiple cells, will define steps:
# ### 1) Find ZPatient ID (MCL tables do not always have PatientEncounterID for the hospital admission, which would be ideal)
# ### 2) Two inner joins: use ZPatient ID to get Note ID's associated with patient (Note Table). Link NoteIDs to Date of Service (NoteText Table)
# ### 3) Each patient has many notes. Select only McLean (112) and date of admission (AdmitDate). Get rid of duplicate notes (with same NoteID). Each patient will still have multiple notes
# ### 4) Get notes corresponding to Note ID's. Search for CEC Admission Note
# In[5]:
#1) Find ZPatientID and PatientEncounterID
#set up the query;
for whichPt in MRNCurrList:
sql_string = """
SELECT
ptenchosp.PatientEncounterID,
id.PatientIdentityID,
id.PatientID,
ptenchosp.ExpectedAdmissionDTS
FROM
Epic.Encounter.PatientEncounterHospital_McLean AS ptenchosp
INNER JOIN
Epic.Patient.Identity_McLean AS id
ON
id.PatientID = ptenchosp.PatientID
WHERE
id.IdentityTypeID = 112
AND id.PatientIdentityID = '{}'
AND CAST(ptenchosp.HospitalAdmitDTS AS date) = '{}'
ORDER BY
ptenchosp.HospitalAdmitDTS
"""
#run the query, inserting the parameters into the query
with engine.connect() as cxn:
currMRN = MRN[whichPt]
currAdmitDate = AdmitDate
#currAdmitDate = AdmitDate[whichPt]
print(currMRN)
print(currAdmitDate)
PtEncounterID = pd.io.sql.read_sql(sql_string.format(currMRN, currAdmitDate), cxn)
#print(PtEncounterID)
#display a warning if there were multiple admissions; try taking this out
if len(PtEncounterID) > 1:
warn_string = 'Warning: More than one admission for {} on {}. Using most recent admission on that date.'
#print(warn_string.format(MRN, AdmitDate))
ZPatientID = PtEncounterID.iloc[0]['PatientID']
print(ZPatientID)
#pick out the PatientEncounterID
PtEncounterID = PtEncounterID.iloc[0]['PatientEncounterID'] #Use index 0 for first admission; -1 for last admission
PtEncounterID = int(PtEncounterID)
#print(PtEncounterID)
#2. Two inner joins: use ZPatient ID to get Note ID's associated with patient (Note Table).
#Link NoteIDs to Date of Service (NoteText Table)
#set up the query
sql_string2 = """
SELECT
notes.NoteID,
id.PatientID,
id.PatientIdentityID,
id.IdentityTypeID,
notetext.ContactDTS
FROM
Epic.Clinical.Note_McLean AS notes
INNER JOIN Epic.Patient.Identity_McLean AS id ON id.PatientID = notes.PatientLinkID
INNER JOIN Epic.Clinical.NoteText_McLean AS notetext ON notes.NoteID = notetext.NoteID
WHERE
notes.PatientLinkID = '{}'
ORDER BY
notes.NoteID
"""
#print(sql_string2)
#run the query, inserting the parameters into the query
with engine.connect() as cxn:
NoteID = pd.io.sql.read_sql(sql_string2.format(ZPatientID), cxn)
#found there were many duplicate NoteID's for some patients
#3. Convert to dataframe.
#Next use dataframe Note ID to select McLean notes (112) and date of admission (AdmitDate)
#Get rid of duplicates (keep first)
NoteIDFrame = pd.DataFrame(NoteID)
#get rid of non-McLean notes first
NoteIDFrame = NoteIDFrame.where(NoteIDFrame['IdentityTypeID'] == 112.0)
NoteIDFrame['ContactDTS'] = pd.to_datetime(NoteIDFrame['ContactDTS'], unit='s')
NoteIDFrame = NoteIDFrame.where(NoteIDFrame['ContactDTS'] == AdmitDate)
NoteIDFrame = NoteIDFrame.dropna()
NoteIDFrame = NoteIDFrame.drop_duplicates(subset='NoteID', keep='first')
#sort by Note ID
NoteIDFrame = NoteIDFrame.sort_values(by='NoteID')
#renumber indices, drop=True gets rid of old indices
NoteIDFrame = NoteIDFrame.reset_index(drop=True)
#print(NoteIDFrame)
#get list of note ID for patient
listNoteID = list(NoteIDFrame['NoteID'])
#determine number of notes for patient that occurred on Day of Admit
numberNotes = len(listNoteID)
#print(listNoteID)
#4) Get Notes corresponding to Note ID's
#set up the query
sql_string = """
SELECT
NoteTXT
FROM
Epic.Clinical.NoteText_McLean
WHERE
NoteID = '{}'
ORDER BY
LineNBR
"""
#print(sql_string)
#run the query, inserting the parameters into the query
#filename MRN_NoteID
#search each note for Medical Admission Note and Psychiatric Admission Note in first line
noteCounter = 0
for patientList in listNoteID:
if noteCounter < 6:
with engine.connect() as cxn:
NoteText = pd.io.sql.read_sql(sql_string.format(patientList), cxn)
fulltext = NoteText.NoteTXT.str.cat()
filename = [str(MRN[whichPt]) +'_' + str(patientList) +'.txt']
filename = "".join(filename)
#print(filename)
f = open(filename, 'w')
f.write(fulltext)
f.close()
f = open(filename, 'r')
CECnote = f.readline()
psychNote = re.findall('McLean Clinical Evaluation Center Psychiatric Admission Note', CECnote)
medNote = re.findall('CEC Medical Admission Note', CECnote)
if len(psychNote) > 0:
noteCounter = noteCounter + 1
psychFileName = ['PsychAdmit_' + str(MRN[whichPt]) + '.txt']
psychFileName = "".join(psychFileName)
print(psychFileName)
os.rename(filename, psychFileName)
#f = open(psychFileName, 'w')
#f.write(fulltext)
#f.close()
if len(medNote) > 0:
noteCounter = noteCounter + 1
medFileName = ['MedAdmit_' +str(MRN[whichPt]) +'.txt']
medFileName = "".join(medFileName)
print(medFileName)
os.rename(filename, medFileName)
#f = open(medFileName, 'w')
#f.write(fulltext)
#f.close()
| |
''' graph_controllers
routes related to the graphs
'''
import datetime
import json
from flask import (render_template, request, flash, redirect, url_for, session
, abort)
from mongoengine import *
from application import app
# view controls
from decorators import *
# mongoengine models
from models import *
# some utilities
import utilities
# some constants
import constants
graph_route = '/organizations/<org_label>/projects/<project_label>/graphs'
@app.route(graph_route, defaults={'graph_label': None})
@app.route(graph_route + '/<graph_label>', methods=['GET', 'POST'])
@verification_required
@csrf_protect
def graphs(org_label, project_label, graph_label):
''' graphin things
/organizations/aquaya/projects/water-quality/graphs
: view a list of all graphs for the project
/organizations/aquaya/projects/water-quality/graphs?create=true
: create a new graph config, immediately redirect to editing
/organizations/aquaya/projects/water-quality/graphs/ph-vs-time
: view a graph
/organizations/aquaya/projects/water-quality/graphs/ph-vs-time?edit=true
: edit a graph; accepts GET or POST
'''
user = User.objects(email=session['email'])[0]
orgs = Organization.objects(label=org_label)
if not orgs:
flash('Organization "%s" not found, sorry!' % org_label, 'warning')
return redirect(url_for('organizations'))
org = orgs[0]
# permission-check
if org not in user.organizations and not user.admin_rights:
app.logger.error('%s tried to view a project but was \
denied for want of admin rights' % session['email'])
abort(404)
# find the project
projects = Project.objects(label=project_label, organization=org)
if not projects:
flash('Project "%s" not found, sorry!' % project_label, 'warning')
return redirect(url_for('organizations', org_label=org.label))
project = projects[0]
if request.method == 'POST':
# we have a graph_label
graphs = Graph.objects(label=graph_label, project=project)
if not graphs:
abort(404)
graph = graphs[0]
form_type = request.form.get('form_type', '')
if form_type == 'info':
if graph.name != request.form.get('name', ''):
name = request.form.get('name', '')
graph.update(set__name = name)
graphs = Graph.objects(project=project).only('label')
labels = [g.label for g in graphs]
graph.update(set__label = utilities.generate_label(name
, labels))
# reload to catch the name change
graph.reload()
graph.update(set__description =
request.form.get('description', ''))
graph.update(set__graph_type = request.form.get('graph_type', ''))
# axes specify a header and come of the form 'header_id__4abcd001'
xaxis = request.form.get('xaxis', '')
if xaxis:
xaxis = xaxis.split('header_id__')[1]
header = Header.objects(id=xaxis)[0]
graph.update(set__xaxis = header)
yaxis = request.form.get('yaxis', '')
if yaxis:
yaxis = yaxis.split('header_id__')[1]
header = Header.objects(id=yaxis)[0]
graph.update(set__yaxis = header)
# pie chart headers are similar to axes..
pie_header = request.form.get('pie_header', '')
if pie_header:
pie_header = pie_header.split('header_id__')[1]
header = Header.objects(id=pie_header)[0]
graph.update(set__pie_header = header)
elif form_type == 'filters':
# extract the 'any filters' vs 'all' distinction
filter_settings = request.form.get('apply_any_filters', '')
if filter_settings == 'true':
graph.update(set__apply_any_filters = True)
else:
graph.update(set__apply_any_filters = False)
# attach filter to graph
requested_filter_ids = request.form.getlist('filters')
attached_filters = []
for requested_id in requested_filter_ids:
prefix, filter_id = requested_id.split('__')
filters = Filter.objects(id=filter_id)
if not filters:
abort(404)
attached_filters.append(filters[0])
graph.update(set__filters = attached_filters)
elif form_type == 'admin':
# delete the graph
name = graph.name
utilities.delete_graph(graph, session['email'])
flash('graph "%s" was deleted successfully' % name, 'success')
return redirect(url_for('graphs', org_label=org.label
, project_label=project.label))
else:
# bad 'form_type'
abort(404)
flash('changes saved successfully', 'success')
return redirect(url_for('graphs', org_label=org.label
, project_label=project.label, graph_label=graph.label))
if request.method == 'GET':
if graph_label:
graphs = Graph.objects(label=graph_label, project=project)
if not graphs:
app.logger.error('%s tried to access a graph that does not \
exist' % session['email'])
flash('Graph "%s" not found, sorry!' % graph_label
, 'warning')
return redirect(url_for('projects'), org_label=org.label
, project_label=project.label)
graph = graphs[0]
if request.args.get('edit', '') == 'true':
# valid graph types
graph_types = ['line', 'scatter', 'bar', 'chart', 'pie']
available_filters = Filter.objects(project=project)
return render_template('graph_edit.html', graph=graph
, graph_types = graph_types
, allowed_graph_types = constants.graph_types
, available_filters = available_filters)
else:
# render a graph
data = []
project_count = None
filtered_count = None
if graph.graph_type == 'line':
if graph.xaxis and graph.yaxis:
data, project_count = (
utilities.generate_line_graph_data(graph))
filtered_count = len(data)
else:
flash('define an x-axis and y-axis for plotting'
, 'warning')
elif graph.graph_type == 'pie':
if graph.pie_header:
data, project_count = (
utilities.generate_pie_chart_data(graph))
filtered_count = sum([i['data'] for i in json.loads(data)])
else:
flash('define a column to create this pie chart'
, 'warning')
return render_template('graph.html', graph=graph, data=data
, project_count=project_count
, filtered_count = filtered_count)
if request.args.get('create', '') == 'true':
# create a new graph
# CSRF validation
token = request.args.get('token', '')
if not verify_token(token):
abort(403)
try:
graph_name = 'graph-%s' % utilities.generate_random_string(6)
new_graph = Graph(
creation_time = datetime.datetime.utcnow()
, creator = user
, label = graph_name.lower()
, project = project
, name = graph_name
)
new_graph.save()
app.logger.info('graph created by %s' % session['email'])
flash('graph created; please change the defaults', 'success')
except:
app.logger.error('graph creation failed for %s' % \
session['email'])
flash('There was an error, sorry :/', 'error')
return redirect(url_for('projects', org_label=org.label
, project=project.label))
# redirect to the editing screen
return redirect(url_for('graphs', org_label=org.label
, project_label=project.label, graph_label=new_graph.label
, edit='true'))
# no graph in particular was specified
graphs = Graph.objects(project=project)
return render_template('project_graphs.html', project=project
, graphs=graphs)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Dict, List, TYPE_CHECKING
from tests.integration_tests.base_tests import login
from tests.integration_tests.dashboards.filter_sets.consts import (
DASHBOARD_OWNER_USERNAME,
FILTER_SET_OWNER_USERNAME,
REGULAR_USER,
)
from tests.integration_tests.dashboards.filter_sets.utils import (
call_delete_filter_set,
collect_all_ids,
get_filter_set_by_name,
)
if TYPE_CHECKING:
from flask.testing import FlaskClient
from superset.models.filter_set import FilterSet
def assert_filterset_was_not_deleted(filter_set_dict: Dict[str, Any]) -> None:
assert get_filter_set_by_name(filter_set_dict["name"]) is not None
def assert_filterset_deleted(filter_set_dict: Dict[str, Any]) -> None:
assert get_filter_set_by_name(filter_set_dict["name"]) is None
class TestDeleteFilterSet:
def test_with_dashboard_exists_filterset_not_exists__200(
self,
dashboard_id: int,
filtersets: Dict[str, List[FilterSet]],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
filter_set_id = max(collect_all_ids(filtersets)) + 1
response = call_delete_filter_set(client, {"id": filter_set_id}, dashboard_id)
# assert
assert response.status_code == 200
def test_with_dashboard_not_exists_filterset_not_exists__404(
self,
not_exists_dashboard: int,
filtersets: Dict[str, List[FilterSet]],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
filter_set_id = max(collect_all_ids(filtersets)) + 1
response = call_delete_filter_set(
client, {"id": filter_set_id}, not_exists_dashboard
)
# assert
assert response.status_code == 404
def test_with_dashboard_not_exists_filterset_exists__404(
self,
not_exists_dashboard: int,
dashboard_based_filter_set_dict: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
# act
response = call_delete_filter_set(
client, dashboard_based_filter_set_dict, not_exists_dashboard
)
# assert
assert response.status_code == 404
assert_filterset_was_not_deleted(dashboard_based_filter_set_dict)
def test_when_caller_is_admin_and_owner_type_is_user__200(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
# act
response = call_delete_filter_set(client, user_based_filter_set_dict)
# assert
assert response.status_code == 200
assert_filterset_deleted(user_based_filter_set_dict)
def test_when_caller_is_admin_and_owner_type_is_dashboard__200(
self,
test_users: Dict[str, int],
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, "admin")
# act
response = call_delete_filter_set(client, dashboard_based_filter_set_dict)
# assert
assert response.status_code == 200
assert_filterset_deleted(dashboard_based_filter_set_dict)
def test_when_caller_is_dashboard_owner_and_owner_is_other_user_403(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
# act
response = call_delete_filter_set(client, user_based_filter_set_dict)
# assert
assert response.status_code == 403
assert_filterset_was_not_deleted(user_based_filter_set_dict)
def test_when_caller_is_dashboard_owner_and_owner_type_is_dashboard__200(
self,
test_users: Dict[str, int],
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, DASHBOARD_OWNER_USERNAME)
# act
response = call_delete_filter_set(client, dashboard_based_filter_set_dict)
# assert
assert response.status_code == 200
assert_filterset_deleted(dashboard_based_filter_set_dict)
def test_when_caller_is_filterset_owner__200(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, FILTER_SET_OWNER_USERNAME)
# act
response = call_delete_filter_set(client, user_based_filter_set_dict)
# assert
assert response.status_code == 200
assert_filterset_deleted(user_based_filter_set_dict)
def test_when_caller_is_regular_user_and_owner_type_is_user__403(
self,
test_users: Dict[str, int],
user_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, REGULAR_USER)
# act
response = call_delete_filter_set(client, user_based_filter_set_dict)
# assert
assert response.status_code == 403
assert_filterset_was_not_deleted(user_based_filter_set_dict)
def test_when_caller_is_regular_user_and_owner_type_is_dashboard__403(
self,
test_users: Dict[str, int],
dashboard_based_filter_set_dict: Dict[str, Any],
valid_filter_set_data_for_update: Dict[str, Any],
client: FlaskClient[Any],
):
# arrange
login(client, REGULAR_USER)
# act
response = call_delete_filter_set(client, dashboard_based_filter_set_dict)
# assert
assert response.status_code == 403
assert_filterset_was_not_deleted(dashboard_based_filter_set_dict)
| |
from pandac.PandaModules import Vec3, Vec4, Point3, TextNode, VBase4
from direct.gui.DirectGui import DGG, DirectFrame, DirectButton, DirectLabel, DirectScrolledList, DirectCheckButton
from direct.gui import DirectGuiGlobals
from direct.showbase import DirectObject
from direct.showbase import PythonUtil
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TTDialog
from toontown.parties import PartyGlobals
from toontown.parties import PartyUtils
class PublicPartyGui(DirectFrame):
notify = directNotify.newCategory('PublicPartyGui')
def __init__(self, doneEvent):
DirectFrame.__init__(self)
self.doneEvent = doneEvent
self.gui = loader.loadModel('phase_4/models/parties/publicPartyGUI')
self.setPos(0.1, 0.0, 0.1)
self.doneStatus = None
self.activityIconsModel = loader.loadModel('phase_4/models/parties/eventSignIcons')
self.normalFrameColor = Vec4(130 / 255.0, 174 / 255.0, 249 / 255.0, 1.0)
self.selectedFrameColor = Vec4(1.0, 1.0, 0.0, 1.0)
self.load()
self.gui.removeNode()
self.accept('stoppedAsleep', self._close)
return
def load(self):
for backgroundName in ['background', 'parties_background', 'activities_background']:
background = DirectFrame(parent=self, geom=self.gui.find('**/%s' % backgroundName), relief=None)
self.titleLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.PartyGateTitle, pos=self.gui.find('**/title_locator').getPos(), scale=0.1)
self.partyList, self.partyListLabel = self.createPartyListAndLabel('parties', 14)
self.activityList, self.activityListLabel = self.createListAndLabel('activities', 1)
pos = self.gui.find('**/startText_locator').getPos()
self.partyStartButton = DirectButton(parent=self, relief=None, text=TTLocalizer.PartyGateGoToParty, text_align=TextNode.ACenter, text_scale=TTLocalizer.PPGpartyStartButton, text_pos=(pos[0], pos[2]), geom=(self.gui.find('**/startButton_up'),
self.gui.find('**/startButton_down'),
self.gui.find('**/startButton_rollover'),
self.gui.find('**/startButton_inactive')), command=self._startParty)
self.closeButton = DirectButton(parent=self, relief=None, geom=(self.gui.find('**/cancelButton_up'), self.gui.find('**/cancelButton_down'), self.gui.find('**/cancelButton_rollover')), command=self._close)
instructionPos = (0, -0.9)
if not self.gui.find('**/helpText_locator').isEmpty():
tempPos = self.gui.find('**/helpText_locator').getPos()
instructionPos = (tempPos.getX(), tempPos.getZ())
self.instructionsLabel = DirectLabel(parent=self, relief=None, text=TTLocalizer.PartyGateInstructions, text_align=TextNode.ACenter, text_scale=TTLocalizer.PPGinstructionsLabel, text_pos=instructionPos)
return
def createListAndLabel(self, typeString, numItems):
list = DirectScrolledList(parent=self, relief=None, incButton_image=(self.gui.find('**/%sButtonDown_up' % typeString),
self.gui.find('**/%sButtonDown_down' % typeString),
self.gui.find('**/%sButtonDown_rollover' % typeString),
self.gui.find('**/%sButtonDown_inactive' % typeString)), incButton_relief=None, decButton_image=(self.gui.find('**/%sButtonUp_up' % typeString),
self.gui.find('**/%sButtonUp_down' % typeString),
self.gui.find('**/%sButtonUp_rollover' % typeString),
self.gui.find('**/%sButtonUp_inactive' % typeString)), decButton_relief=None, itemFrame_pos=self.gui.find('**/%s_locator' % typeString).getPos(), itemFrame_relief=None, numItemsVisible=numItems, forceHeight=0.055)
strings = {'activities': TTLocalizer.EventsPageHostingTabActivityListTitle,
'parties': TTLocalizer.PartyGatePartiesListTitle}
label = DirectLabel(parent=self, relief=None, text=strings[typeString], text_scale=0.06, pos=self.gui.find('**/%sText_locator' % typeString).getPos())
return (list, label)
def refresh(self, partyInfoTupleList):
PublicPartyGui.notify.debug('refresh : partyInfoTupleList = %s' % partyInfoTupleList)
self.selectedItem = None
self.partyList.removeAndDestroyAllItems()
self.activityList.removeAndDestroyAllItems()
self.partyStartButton['state'] = DirectGuiGlobals.DISABLED
sortedList = partyInfoTupleList[:]
def cmp(left, right):
if left[2] < right[2]:
return -1
elif left[2] == right[2]:
if len(left[4]) < len(right[4]):
return -1
elif len(left[4]) == len(right[4]):
return 0
else:
return 1
else:
return 1
sortedList.sort(cmp, reverse=True)
indexToCut = -1
for index, partyTuple in enumerate(sortedList):
numberOfGuests = partyTuple[2]
if numberOfGuests < PartyGlobals.MaxToonsAtAParty:
indexToCut = index
break
if indexToCut > 0:
sortedList = sortedList[indexToCut:] + sortedList[:indexToCut]
for index, partyTuple in enumerate(sortedList):
shardId = partyTuple[0]
zoneId = partyTuple[1]
numberOfGuests = partyTuple[2]
hostName = partyTuple[3]
activityIds = partyTuple[4]
minLeft = partyTuple[5]
item = DirectButton(relief=DGG.RIDGE, borderWidth=(0.01, 0.01), frameSize=(-0.01,
0.45,
-0.015,
0.04), frameColor=self.normalFrameColor, text=hostName, text_align=TextNode.ALeft, text_bg=Vec4(0.0, 0.0, 0.0, 0.0), text_scale=0.045, command=self.partyClicked)
otherInfoWidth = 0.08
numActivities = len(activityIds)
PartyUtils.truncateTextOfLabelBasedOnWidth(item, hostName, PartyGlobals.EventsPageGuestNameMaxWidth)
num = DirectLabel(relief=DGG.RIDGE, borderWidth=(0.01, 0.01), frameSize=(0.0,
otherInfoWidth,
-0.015,
0.04), frameColor=self.normalFrameColor, text='%d' % numberOfGuests, text_align=TextNode.ALeft, text_scale=0.045, text_pos=(0.01, 0, 0), pos=(0.45, 0.0, 0.0))
num.reparentTo(item)
item.numLabel = num
actLabelPos = num.getPos()
actLabelPos.setX(actLabelPos.getX() + otherInfoWidth)
actLabel = DirectLabel(relief=DGG.RIDGE, borderWidth=(0.01, 0.01), frameSize=(0.0,
otherInfoWidth,
-0.015,
0.04), frameColor=self.normalFrameColor, text='%d' % numActivities, text_align=TextNode.ALeft, text_scale=0.045, text_pos=(0.01, 0, 0), pos=actLabelPos)
actLabel.reparentTo(item)
item.actLabel = actLabel
minLabelPos = actLabel.getPos()
minLabelPos.setX(minLabelPos.getX() + otherInfoWidth)
minLabel = DirectLabel(relief=DGG.RIDGE, borderWidth=(0.01, 0.01), frameSize=(0.0,
otherInfoWidth,
-0.015,
0.04), frameColor=self.normalFrameColor, text='%d' % minLeft, text_align=TextNode.ALeft, text_scale=0.045, text_pos=(0.01, 0, 0), pos=minLabelPos)
minLabel.reparentTo(item)
item.minLabel = minLabel
item['extraArgs'] = [item]
item.setPythonTag('shardId', shardId)
item.setPythonTag('zoneId', zoneId)
item.setPythonTag('activityIds', activityIds)
self.partyList.addItem(item)
return
def partyClicked(self, item):
self.partyStartButton['state'] = DirectGuiGlobals.NORMAL
if self.selectedItem is not None:
self.selectedItem['state'] = DirectGuiGlobals.NORMAL
self.selectedItem['frameColor'] = self.normalFrameColor
numLabel = self.selectedItem.numLabel
if not numLabel.isEmpty():
numLabel['frameColor'] = self.normalFrameColor
actLabel = self.selectedItem.actLabel
if not actLabel.isEmpty():
actLabel['frameColor'] = self.normalFrameColor
minLabel = self.selectedItem.minLabel
if not minLabel.isEmpty():
minLabel['frameColor'] = self.normalFrameColor
self.selectedItem = item
self.selectedItem['state'] = DirectGuiGlobals.DISABLED
self.selectedItem['frameColor'] = self.selectedFrameColor
numLabel = self.selectedItem.numLabel
if not numLabel.isEmpty():
numLabel['frameColor'] = self.selectedFrameColor
actLabel = self.selectedItem.actLabel
if not actLabel.isEmpty():
actLabel['frameColor'] = self.selectedFrameColor
minLabel = self.selectedItem.minLabel
if not minLabel.isEmpty():
minLabel['frameColor'] = self.selectedFrameColor
self.fillActivityList(item.getPythonTag('activityIds'))
return
def fillActivityList(self, activityIds):
self.activityList.removeAndDestroyAllItems()
sortedList = activityIds[:]
sortedList.sort()
lastActivityId = -1
for activityId in sortedList:
if activityId == lastActivityId:
continue
lastActivityId = activityId
number = sortedList.count(activityId)
text = TTLocalizer.PartyActivityNameDict[activityId]['generic']
if number > 1:
text += ' X %d' % number
item = DirectLabel(relief=None, text=text, text_align=TextNode.ACenter, text_scale=0.05, text_pos=(0.0, -0.15), geom_scale=0.3, geom_pos=Vec3(0.0, 0.0, 0.07), geom=PartyUtils.getPartyActivityIcon(self.activityIconsModel, PartyGlobals.ActivityIds.getString(activityId)))
self.activityList.addItem(item)
return
def _startParty(self):
if self.selectedItem is None:
self.partyStartButton['state'] = DirectGuiGlobals.DISABLED
return
self.doneStatus = (self.selectedItem.getPythonTag('shardId'), self.selectedItem.getPythonTag('zoneId'))
messenger.send(self.doneEvent)
return
def _close(self):
self.doneStatus = None
messenger.send(self.doneEvent)
return
def destroy(self):
self.activityIconsModel.removeNode()
del self.activityIconsModel
self.partyList.removeAndDestroyAllItems()
try:
for item in self.partyList['items']:
item.actLabel = None
item.numLabel = None
item.minLabel = None
except:
pass
self.activityList.removeAndDestroyAllItems()
del self.partyList
del self.activityList
self.ignoreAll()
DirectFrame.destroy(self)
return
def createPartyListAndLabel(self, typeString, numItems):
list = DirectScrolledList(parent=self, relief=None, incButton_image=(self.gui.find('**/%sButtonDown_up' % typeString),
self.gui.find('**/%sButtonDown_down' % typeString),
self.gui.find('**/%sButtonDown_rollover' % typeString),
self.gui.find('**/%sButtonDown_inactive' % typeString)), incButton_relief=None, decButton_image=(self.gui.find('**/%sButtonUp_up' % typeString),
self.gui.find('**/%sButtonUp_down' % typeString),
self.gui.find('**/%sButtonUp_rollover' % typeString),
self.gui.find('**/%sButtonUp_inactive' % typeString)), decButton_relief=None, itemFrame_pos=self.gui.find('**/%s_locator' % typeString).getPos(), itemFrame_relief=None, numItemsVisible=numItems, forceHeight=0.055)
strings = {'activities': TTLocalizer.EventsPageHostingTabActivityListTitle,
'parties': TTLocalizer.PartyGatePartiesListTitle}
hostPos = self.gui.find('**/%sText_locator' % typeString).getPos()
label = DirectLabel(parent=self, text_align=TextNode.ALeft, relief=None, text=strings[typeString], text_scale=0.06, pos=hostPos)
curPos = label.getPos()
curPos.setX(curPos.getX() + 0.5)
if not self.gui.find('**/partiesText_locator1').isEmpty():
curPos = self.gui.find('**/partiesText_locator1').getPos()
hpr = Point3(0, 0, -40)
toonsLabel = DirectLabel(parent=self, text_align=TextNode.ALeft, relief=None, text=TTLocalizer.PartyGatesPartiesListToons, text_scale=TTLocalizer.PPGtoonsLabel, pos=curPos, hpr=hpr)
curPos.setX(curPos.getX() + 0.1)
if not self.gui.find('**/partiesText_locator2').isEmpty():
curPos = self.gui.find('**/partiesText_locator2').getPos()
activitiesLabel = DirectLabel(parent=self, text_align=TextNode.ALeft, relief=None, text=TTLocalizer.PartyGatesPartiesListActivities, text_scale=TTLocalizer.PPGactivitiesLabel, pos=curPos, hpr=hpr)
curPos.setX(curPos.getX() + 0.1)
if not self.gui.find('**/partiesText_locator3').isEmpty():
curPos = self.gui.find('**/partiesText_locator3').getPos()
minLeftLabel = DirectLabel(parent=self, text_align=TextNode.ALeft, relief=None, text=TTLocalizer.PartyGatesPartiesListMinLeft, text_scale=TTLocalizer.PPGminLeftLabel, pos=curPos, hpr=hpr)
return (list, label)
def stash(self):
base.setCellsAvailable(base.bottomCells, 1)
DirectFrame.stash(self)
def unstash(self):
base.setCellsAvailable(base.bottomCells, 0)
DirectFrame.unstash(self)
| |
import os
import sys
import getopt
import Common
import AutoGemmParameters
import KernelParameters
################################################################################
# SINC - Kernel Source Includes
################################################################################
class KernelSourceIncludes:
##############################################################################
# SINC - default constructor
##############################################################################
def __init__(self):
self.incFileName = Common.getIncludePath() + "AutoGemmKernelSources.h"
self.incFile = open(self.incFileName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.incStr = "#ifndef AUTOGEMM_KERNEL_SOURCE_INCLUDES_H\n"
self.incStr += "#define AUTOGEMM_KERNEL_SOURCE_INCLUDES_H\n"
self.incStr += "\n"
self.cppFileName = Common.getIncludePath() + "AutoGemmKernelSources.cpp"
self.cppFile = open(self.cppFileName, "w")
self.cppFile.write( Common.getAutoGemmHeader() )
self.cppStr = "\n"
self.cppStr += "#include \"%sAutoGemmKernelSources.h\"\n" % Common.getRelativeIncludePath()
self.cppStr += "#include \"UserGemmKernelSources/UserGemmKernelSourceIncludes.cpp\"\n"
#self.cppStr += "#include \"UserGemmKernelSources/UserGemmKernelSources.cpp\"\n"
def addKernel(self, kernel):
kernelName = kernel.getName()
self.incStr += "extern const unsigned int %s_workGroupNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_workGroupNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_unroll;\n" % kernelName
self.incStr += "extern const char * const %s_src;\n" % kernelName
self.cppStr += "#include \"%s%s_src.cpp\"\n" % (Common.getRelativeKernelSourcePath(), kernelName)
kernelName = kernel.getRowName()
self.incStr += "extern const unsigned int %s_workGroupNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_workGroupNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_unroll;\n" % kernelName
self.incStr += "extern const char * const %s_src;\n" % kernelName
self.cppStr += "#include \"%s%s_src.cpp\"\n" % (Common.getRelativeKernelSourcePath(), kernelName )
kernelName = kernel.getColName()
self.incStr += "extern const unsigned int %s_workGroupNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_workGroupNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_unroll;\n" % kernelName
self.incStr += "extern const char * const %s_src;\n" % kernelName
self.cppStr += "#include \"%s%s_src.cpp\"\n" % (Common.getRelativeKernelSourcePath(), kernelName)
kernelName = kernel.getCornerName()
self.incStr += "extern const unsigned int %s_workGroupNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_workGroupNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumRows;\n" % kernelName
self.incStr += "extern const unsigned int %s_microTileNumCols;\n" % kernelName
self.incStr += "extern const unsigned int %s_unroll;\n" % kernelName
self.incStr += "extern const char * const %s_src;\n" % kernelName
self.cppStr += "#include \"%s%s_src.cpp\"\n" % (Common.getRelativeKernelSourcePath(), kernelName)
self.incFile.write( self.incStr )
self.incStr = ""
self.cppFile.write( self.cppStr )
self.cppStr = ""
def writeToFile(self):
self.incFile.write( self.incStr )
self.incFile.write( "\n#endif\n" )
self.incFile.close()
self.cppFile.write( self.cppStr )
self.cppFile.close()
################################################################################
# BINC - Kernel Binary Includes
################################################################################
class KernelBinaryIncludes:
##############################################################################
# BINC - default constructor
##############################################################################
def __init__(self):
self.incFileName = Common.getIncludePath() + "AutoGemmKernelBinaries.h"
self.incFile = open(self.incFileName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.incStr = ""
self.incStr += "#include <cstddef>\n"
self.incStr += "\n#ifndef AUTOGEMM_KERNEL_BINARIES_H\n"
self.incStr += "#define AUTOGEMM_KERNEL_BINARIES_H\n"
self.incStr += "\n"
self.cppFileName = Common.getIncludePath() + "AutoGemmKernelBinaries.cpp"
self.cppFile = open(self.cppFileName, "w")
self.cppFile.write( Common.getAutoGemmHeader() )
self.cppStr = ""
self.cppStr += "#include \"%sAutoGemmKernelBinaries.h\"\n" % Common.getRelativeIncludePath()
self.cppStr += "\n"
self.cppStr += "#ifdef AUTOGEMM_USE_PRE_COMPILED_KERNELS\n"
self.cppStr += "#include \"%sAutoGemmKernelBinariesPreCompiled.h\"\n" % Common.getRelativeKernelBinaryPath()
self.cppStr += "#endif\n"
self.cppStr += "\n"
def addKernel(self, kernel):
kernelName = kernel.getName()
self.incStr += "extern unsigned char *%s_bin;\n" % kernelName
self.incStr += "extern size_t %s_binSize;\n" % kernelName
self.cppStr += "#ifndef KERNEL_" + kernelName.upper() + "_BIN_CPP\n"
self.cppStr += "unsigned char *%s_bin = 0;\n" % kernelName
self.cppStr += " size_t %s_binSize = 0;\n" % kernelName
self.cppStr += "#else\n"
# self.cppStr += "#pragma message(\"AutoGemmKernelBinaries.cpp: %s was pre-compiled.\")\n" % kernelName
self.cppStr += "#endif\n"
kernelName = kernel.getRowName()
self.incStr += "extern unsigned char *%s_bin;\n" % kernelName
self.incStr += "extern size_t %s_binSize;\n" % kernelName
self.cppStr += "#ifndef KERNEL_" + kernelName.upper() + "_BIN_CPP\n"
self.cppStr += "unsigned char *%s_bin = 0;\n" % kernelName
self.cppStr += " size_t %s_binSize = 0;\n" % kernelName
self.cppStr += "#else\n"
# self.cppStr += "#pragma message(\"AutoGemmKernelBinaries.cpp: %s was pre-compiled.\")\n" % kernelName
self.cppStr += "#endif\n"
kernelName = kernel.getColName()
self.incStr += "extern unsigned char *%s_bin;\n" % kernelName
self.incStr += "extern size_t %s_binSize;\n" % kernelName
self.cppStr += "#ifndef KERNEL_" + kernelName.upper() + "_BIN_CPP\n"
self.cppStr += "unsigned char *%s_bin = 0;\n" % kernelName
self.cppStr += " size_t %s_binSize = 0;\n" % kernelName
self.cppStr += "#else\n"
# self.cppStr += "#pragma message(\"AutoGemmKernelBinaries.cpp: %s was pre-compiled.\")\n" % kernelName
self.cppStr += "#endif\n"
kernelName = kernel.getCornerName()
self.incStr += "extern unsigned char *%s_bin;\n" % kernelName
self.incStr += "extern size_t %s_binSize;\n" % kernelName
self.cppStr += "#ifndef KERNEL_" + kernelName.upper() + "_BIN_CPP\n"
self.cppStr += "unsigned char *%s_bin = 0;\n" % kernelName
self.cppStr += " size_t %s_binSize = 0;\n" % kernelName
self.cppStr += "#else\n"
# self.cppStr += "#pragma message(\"AutoGemmKernelBinaries.cpp: %s was pre-compiled.\")\n" % kernelName
self.cppStr += "#endif\n"
self.incFile.write( self.incStr )
self.incStr = ""
self.cppFile.write( self.cppStr )
self.cppStr = ""
def writeToFile(self):
self.incFile.write( self.incStr )
self.incFile.write( "\n#endif\n" )
self.incFile.close()
self.cppFile.write( self.cppStr )
self.cppFile.close()
################################################################################
# CINC - ClKernel Includes
################################################################################
class ClKernelIncludes:
##############################################################################
# CINC - default constructor
##############################################################################
def __init__(self):
self.incName = Common.getIncludePath() + "AutoGemmClKernels.h"
self.incFile = open(self.incName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.incStr = "#ifndef AUTOGEMM_CL_KERNELS_H\n"
self.incStr += "#define AUTOGEMM_CL_KERNELS_H\n"
self.incStr += "#if defined( __APPLE__ ) || defined( __MACOSX )\n"
self.incStr += "#include <OpenCL/cl.h>\n"
self.incStr += "#else\n"
self.incStr += "#include <CL/cl.h>\n"
self.incStr += "#endif\n"
self.incStr += "\n"
self.incStr += "#ifdef __cplusplus\n"
self.incStr += "extern \"C\" {\n"
self.incStr += "#endif\n"
self.incStr += " void initAutoGemmClKernels(void);\n";
self.incStr += "#ifdef __cplusplus\n"
self.incStr += "}\n";
self.incStr += "#endif\n"
self.incStr += "\n";
self.cppName = Common.getIncludePath() + "AutoGemmClKernels.cpp"
self.cppFile = open(self.cppName, "w")
self.cppFile.write( Common.getAutoGemmHeader() )
self.cppStr = "#if defined( __APPLE__ ) || defined( __MACOSX )\n"
self.cppStr += "#include <OpenCL/cl.h>\n"
self.cppStr += "#else\n"
self.cppStr += "#include <CL/cl.h>\n"
self.cppStr += "#endif\n"
self.cppStr += "\n"
self.initFunction = "";
self.initFunction += "extern \"C\" {\n";
self.initFunction += " void initAutoGemmClKernels(void);\n";
self.initFunction += "}\n";
self.initFunction += "\n";
self.initFunction += "void initAutoGemmClKernels(void) {\n";
self.defines = "";
def addKernel(self, kernel):
kernelNames = [
kernel.getName(),
kernel.getRowName(),
kernel.getColName(),
kernel.getCornerName()
]
for kernelName in kernelNames:
self.incStr += "extern cl_kernel %s_clKernel;\n" % kernelName
self.defines += "cl_kernel %s_clKernel = NULL;\n" % kernelName
self.initFunction += " if(%s_clKernel != NULL) {\n" % kernelName
self.initFunction += " clReleaseKernel(%s_clKernel);\n" % kernelName
self.initFunction += " %s_clKernel = NULL;\n" % kernelName
self.initFunction += " }\n"
self.incFile.write( self.incStr )
self.incStr = ""
# self.cppFile.write( self.cppStr )
# self.cppStr = ""
def writeToFile(self):
self.incFile.write( self.incStr )
self.incFile.write( "\n#endif\n" )
self.incFile.close()
self.initFunction += "}\n";
self.cppStr += self.defines + "\n";
self.defines = "";
self.cppStr += self.initFunction + "\n";
self.initFunction = "";
# self.cppStr += "\n";
# self.cppStr += "initAutoGemmClKernels();\n";
self.cppFile.write( self.cppStr )
self.cppFile.close()
################################################################################
# KSBO - Kernel Source Build Options
################################################################################
class KernelSourceBuildOptions:
##############################################################################
# KSBO - default constructor
##############################################################################
def __init__(self):
self.incName = Common.getIncludePath() + "AutoGemmKernelBuildOptionsSource.h"
self.incFile = open(self.incName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.incStr = "#ifndef AUTOGEMM_KERNEL_SOURCE_BUILD_OPTIONS_H\n"
self.incStr += "#define AUTOGEMM_KERNEL_SOURCE_BUILD_OPTIONS_H\n"
self.incStr += "\n"
self.cppName = Common.getIncludePath() + "AutoGemmKernelBuildOptionsSource.cpp"
self.cppFile = open(self.cppName, "w")
self.cppFile.write( Common.getAutoGemmHeader() )
self.cppStr = ""
self.cppStr += "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsSource.h\"\n"
def addKernel(self, kernel):
kernelName = kernel.getName()
self.incStr += "extern const char * const %s_srcBuildOptions;\n" \
% kernelName
self.cppStr += "const char * const %s_srcBuildOptions = \"-cl-std=CL%s\";\n" \
% (kernelName, Common.getClCompilerVersion() )
self.incFile.write( self.incStr )
self.incStr = ""
self.cppFile.write( self.cppStr )
self.cppStr = ""
def writeToFile(self):
self.incFile.write( self.incStr )
self.incFile.write( "\n#endif\n" )
self.incFile.close()
self.cppFile.write( self.cppStr )
self.cppFile.close()
################################################################################
# KBSO - Kernel Binary Build Options
################################################################################
class KernelBinaryBuildOptions:
##############################################################################
# KBSO - default constructor
##############################################################################
def __init__(self):
self.incName = Common.getIncludePath() + "AutoGemmKernelBuildOptionsBinary.h"
self.incFile = open(self.incName, "w")
self.incFile.write( Common.getAutoGemmHeader() )
self.incStr = "#ifndef AUTOGEMM_KERNEL_BINARY_BUILD_OPTIONS_H\n"
self.incStr += "#define AUTOGEMM_KERNEL_BINARY_BUILD_OPTIONS_H\n"
self.incStr += "\n"
self.cppName = Common.getIncludePath() + "AutoGemmKernelBuildOptionsBinary.cpp"
self.cppFile = open(self.cppName, "w")
self.cppFile.write( Common.getAutoGemmHeader() )
self.cppStr = ""
self.cppStr += "#include \"" + Common.getRelativeIncludePath() + "AutoGemmKernelBuildOptionsBinary.h\"\n"
def addKernel(self, kernel):
kernelName = kernel.getName()
self.incStr += "extern const char * const %s_binBuildOptions;\n" % kernelName
self.cppStr += "const char * const %s_binBuildOptions = \"-cl-std=CL%s\";\n" % (kernelName, Common.getClCompilerVersion() )
self.incFile.write( self.incStr )
self.incStr = ""
self.cppFile.write( self.cppStr )
self.cppStr = ""
def writeToFile(self):
self.incFile.write( self.incStr )
self.incFile.write( "\n#endif\n" )
self.incFile.close()
self.cppFile.write( self.cppStr )
self.cppFile.close()
################################################################################
# CPPKE - Cpp Kernel enumeration
################################################################################
class CppKernelEnumeration:
##############################################################################
# CPPKE - default constructor
##############################################################################
def __init__(self):
self.fileName = Common.getIncludePath() + "AutoGemmKernelEnumeration.h"
self.kernelStr = ""
self.tileStr = ""
self.nonTileStr = ""
self.kernelCount = 0
self.tileCount = 0
self.nonTileCount = 0
self.precision = ""
self.precisionInitialized = False
def newPrecision(self, precision):
if self.precisionInitialized:
self.kernelStr += "};\n"
self.kernelStr += "const unsigned int %sgemmNumKernels = %d;\n\n" \
% (self.precision, self.kernelCount)
self.tileStr += "};\n"
self.tileStr += "const unsigned int %sgemmNumTiles = %d;\n\n" \
% (self.precision, self.tileCount)
self.nonTileStr += "};\n"
self.nonTileStr += "const unsigned int %sgemmNumNonTiles = %d;\n\n" \
% (self.precision, self.nonTileCount)
self.precisionInitialized = True
self.precision = precision
self.kernelStr += "// order, transA, transB, beta, macroTileNumRows, macroTileNumCols, unroll, mSpill, nSpill\n"
self.kernelStr += "unsigned int " + precision + "gemmKernelEnumeration[][9] = {\n"
self.tileStr += "// macroTileNumRows, macroTileNumCols, unroll\n"
self.tileStr += "unsigned int " + precision + "gemmTileEnumeration[][3] = {\n"
self.nonTileStr += "// order, transA, transB, beta\n"
self.nonTileStr += "unsigned int " + precision + "gemmNonTileEnumeration[][4] = {\n"
self.tileCount = 0
self.nonTileCount = 0
self.kernelCount = 0
def addTile(self, tile):
self.tileStr += " { %3u, %3u, %1u },\n" % ( \
tile.macroTileNumRows, \
tile.macroTileNumCols, \
tile.unroll )
self.tileCount += 1
def addNonTile(self, nonTile):
self.nonTileStr += " { %1u, %1u, %1u, %1u },\n" % ( \
1 if nonTile.order=="clblasColumnMajor" else 0, \
0 if nonTile.transA=="N" else 1 if nonTile.transA=="T" else 2 , \
0 if nonTile.transB=="N" else 1 if nonTile.transB=="T" else 2, \
1 if nonTile.beta>0 else 0 )
self.nonTileCount += 1
def addKernel(self, kernel):
# 6) list to add to ktest for automated kernel testing
for mSpill in range(0, 2):
for nSpill in range(0, 2):
self.kernelStr += " { %1u, %1u, %1u, %1u, %3u, %3u, %2u, %1u, %1u },\n" % ( \
1 if kernel.order=="clblasColumnMajor" else 0, \
0 if kernel.transA=="N" else 1 if kernel.transA=="T" else 2 , \
0 if kernel.transB=="N" else 1 if kernel.transB=="T" else 2, \
1 if kernel.beta>0 else 0, \
kernel.macroTileNumRows, \
kernel.macroTileNumCols, \
kernel.unroll, \
mSpill, \
nSpill )
self.kernelCount += 4
def writeToFile(self):
self.kernelStr += "};\n"
self.kernelStr += "const unsigned int %sgemmNumKernels = %d;\n" % (self.precision, self.kernelCount)
self.tileStr += "};\n"
self.tileStr += "const unsigned int %sgemmNumTiles = %d;\n" % (self.precision, self.tileCount)
self.nonTileStr += "};\n"
self.nonTileStr += "const unsigned int %sgemmNumNonTiles = %d;\n" % (self.precision, self.nonTileCount)
incFile = open(self.fileName, "w")
incFile.write( Common.getAutoGemmHeader() )
incFile.write( self.tileStr )
incFile.write( "\n\n" )
incFile.write( self.nonTileStr )
incFile.write( "\n\n" )
incFile.write( self.kernelStr )
incFile.close()
################################################################################
# Write Includes
################################################################################
def writeIncludes():
print("AutoGemm.py: Generating include files.")
if not os.path.exists( Common.getIncludePath() ):
os.makedirs( Common.getIncludePath() )
kernelSourceIncludes = KernelSourceIncludes()
kernelBinaryIncludes = KernelBinaryIncludes()
clKernelIncludes = ClKernelIncludes()
kernelSourceBuildOptions = KernelSourceBuildOptions()
kernelBinaryBuildOptions = KernelBinaryBuildOptions()
cppKernelEnumeration = CppKernelEnumeration()
# for each precision
kernel = KernelParameters.KernelParameters()
for precision in AutoGemmParameters.precisions:
kernel.precision = precision
cppKernelEnumeration.newPrecision(precision)
# valid tiles for this precision
tiles = AutoGemmParameters.getTilesForPrecision(precision)
# add tiles for this precision to Cpp
for tile in tiles:
cppKernelEnumeration.addTile(tile)
# for non tile parameters
for order in AutoGemmParameters.orders:
kernel.order = order
for transA in AutoGemmParameters.transposes[precision]:
kernel.transA = transA
for transB in AutoGemmParameters.transposes[precision]:
kernel.transB = transB
for beta in AutoGemmParameters.betas:
kernel.beta = beta
# add this nonTile combo for this precision to Cpp
cppKernelEnumeration.addNonTile(kernel)
# for tile parameters
for tile in tiles:
kernel.useTile(tile)
kernelSourceIncludes.addKernel(kernel)
kernelBinaryIncludes.addKernel(kernel)
kernelSourceBuildOptions.addKernel(kernel)
kernelBinaryBuildOptions.addKernel(kernel)
clKernelIncludes.addKernel(kernel)
cppKernelEnumeration.addKernel(kernel)
# save written files
kernelSourceIncludes.writeToFile()
kernelBinaryIncludes.writeToFile()
clKernelIncludes.writeToFile()
kernelSourceBuildOptions.writeToFile()
kernelBinaryBuildOptions.writeToFile()
cppKernelEnumeration.writeToFile()
################################################################################
# Main
################################################################################
if __name__ == "__main__":
if len(sys.argv) == 2:
Common.setOutputPath(sys.argv[1])
else:
print("Warning: No output path specified; default is working directory.")
writeIncludes()
| |
"""Support for MQTT fans."""
import logging
import voluptuous as vol
from homeassistant.components import fan, mqtt
from homeassistant.components.fan import (
ATTR_SPEED,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
CONF_UNIQUE_ID,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_STATE_VALUE_TEMPLATE = "state_value_template"
CONF_SPEED_STATE_TOPIC = "speed_state_topic"
CONF_SPEED_COMMAND_TOPIC = "speed_command_topic"
CONF_SPEED_VALUE_TEMPLATE = "speed_value_template"
CONF_OSCILLATION_STATE_TOPIC = "oscillation_state_topic"
CONF_OSCILLATION_COMMAND_TOPIC = "oscillation_command_topic"
CONF_OSCILLATION_VALUE_TEMPLATE = "oscillation_value_template"
CONF_PAYLOAD_OSCILLATION_ON = "payload_oscillation_on"
CONF_PAYLOAD_OSCILLATION_OFF = "payload_oscillation_off"
CONF_PAYLOAD_OFF_SPEED = "payload_off_speed"
CONF_PAYLOAD_LOW_SPEED = "payload_low_speed"
CONF_PAYLOAD_MEDIUM_SPEED = "payload_medium_speed"
CONF_PAYLOAD_HIGH_SPEED = "payload_high_speed"
CONF_SPEED_LIST = "speeds"
DEFAULT_NAME = "MQTT Fan"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_OPTIMISTIC = False
OSCILLATE_ON_PAYLOAD = "oscillate_on"
OSCILLATE_OFF_PAYLOAD = "oscillate_off"
OSCILLATION = "oscillation"
PLATFORM_SCHEMA = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PAYLOAD_HIGH_SPEED, default=SPEED_HIGH): cv.string,
vol.Optional(CONF_PAYLOAD_LOW_SPEED, default=SPEED_LOW): cv.string,
vol.Optional(CONF_PAYLOAD_MEDIUM_SPEED, default=SPEED_MEDIUM): cv.string,
vol.Optional(CONF_PAYLOAD_OFF_SPEED, default=SPEED_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_OFF, default=OSCILLATE_OFF_PAYLOAD
): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_ON, default=OSCILLATE_ON_PAYLOAD
): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SPEED_LIST,
default=[SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH],
): cv.ensure_list,
vol.Optional(CONF_SPEED_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT fan through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT fan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT fan."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(fan.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)])
class MqttFan(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
FanEntity,
):
"""A MQTT fan component."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT fan."""
self.hass = hass
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = False
self._speed = None
self._oscillation = None
self._supported_features = 0
self._sub_state = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_speed = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_SPEED_STATE_TOPIC,
CONF_SPEED_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_SPEED: config.get(CONF_SPEED_VALUE_TEMPLATE),
OSCILLATION: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"SPEED_LOW": config[CONF_PAYLOAD_LOW_SPEED],
"SPEED_MEDIUM": config[CONF_PAYLOAD_MEDIUM_SPEED],
"SPEED_HIGH": config[CONF_PAYLOAD_HIGH_SPEED],
"SPEED_OFF": config[CONF_PAYLOAD_OFF_SPEED],
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_speed = (
optimistic or self._topic[CONF_SPEED_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and SUPPORT_OSCILLATE
)
self._supported_features |= (
self._topic[CONF_SPEED_COMMAND_TOPIC] is not None and SUPPORT_SET_SPEED
)
for key, tpl in list(self._templates.items()):
if tpl is None:
self._templates[key] = lambda value: value
else:
tpl.hass = self.hass
self._templates[key] = tpl.async_render_with_possible_json_value
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[CONF_STATE](msg.payload)
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def speed_received(msg):
"""Handle new received MQTT message for the speed."""
payload = self._templates[ATTR_SPEED](msg.payload)
if payload == self._payload["SPEED_LOW"]:
self._speed = SPEED_LOW
elif payload == self._payload["SPEED_MEDIUM"]:
self._speed = SPEED_MEDIUM
elif payload == self._payload["SPEED_HIGH"]:
self._speed = SPEED_HIGH
elif payload == self._payload["SPEED_OFF"]:
self._speed = SPEED_OFF
self.async_write_ha_state()
if self._topic[CONF_SPEED_STATE_TOPIC] is not None:
topics[CONF_SPEED_STATE_TOPIC] = {
"topic": self._topic[CONF_SPEED_STATE_TOPIC],
"msg_callback": speed_received,
"qos": self._config[CONF_QOS],
}
self._speed = SPEED_OFF
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = self._templates[OSCILLATION](msg.payload)
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
}
self._oscillation = False
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed for a MQTT fan."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def name(self) -> str:
"""Get entity name."""
return self._config[CONF_NAME]
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._config[CONF_SPEED_LIST]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed(self):
"""Return the current speed."""
return self._speed
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["STATE_ON"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if speed:
await self.async_set_speed(speed)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["STATE_OFF"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan.
This method is a coroutine.
"""
if speed == SPEED_LOW:
mqtt_payload = self._payload["SPEED_LOW"]
elif speed == SPEED_MEDIUM:
mqtt_payload = self._payload["SPEED_MEDIUM"]
elif speed == SPEED_HIGH:
mqtt_payload = self._payload["SPEED_HIGH"]
elif speed == SPEED_OFF:
mqtt_payload = self._payload["SPEED_OFF"]
else:
mqtt_payload = speed
mqtt.async_publish(
self.hass,
self._topic[CONF_SPEED_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_speed:
self._speed = speed
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if oscillating is False:
payload = self._payload["OSCILLATE_OFF_PAYLOAD"]
else:
payload = self._payload["OSCILLATE_ON_PAYLOAD"]
mqtt.async_publish(
self.hass,
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod, ABCMeta
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.param import Param, Params
from pyspark.ml.param.shared import HasLabelCol, HasPredictionCol, HasRawPredictionCol
from pyspark.ml.util import keyword_only
from pyspark.mllib.common import inherit_doc
__all__ = ['Evaluator', 'BinaryClassificationEvaluator', 'RegressionEvaluator',
'MulticlassClassificationEvaluator']
@inherit_doc
class Evaluator(Params):
"""
Base class for evaluators that compute metrics from predictions.
"""
__metaclass__ = ABCMeta
@abstractmethod
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and
predictions
:return: metric
"""
raise NotImplementedError()
def evaluate(self, dataset, params=None):
"""
Evaluates the output with optional parameters.
:param dataset: a dataset that contains labels/observations and
predictions
:param params: an optional param map that overrides embedded
params
:return: metric
"""
if params is None:
params = dict()
if isinstance(params, dict):
if params:
return self.copy(params)._evaluate(dataset)
else:
return self._evaluate(dataset)
else:
raise ValueError("Params must be a param map but got %s." % type(params))
def isLargerBetter(self):
"""
Indicates whether the metric returned by :py:meth:`evaluate` should be maximized
(True, default) or minimized (False).
A given evaluator may support multiple metrics which may be maximized or minimized.
"""
return True
@inherit_doc
class JavaEvaluator(Evaluator, JavaWrapper):
"""
Base class for :py:class:`Evaluator`s that wrap Java/Scala
implementations.
"""
__metaclass__ = ABCMeta
def _evaluate(self, dataset):
"""
Evaluates the output.
:param dataset: a dataset that contains labels/observations and predictions.
:return: evaluation metric
"""
self._transfer_params_to_java()
return self._java_obj.evaluate(dataset._jdf)
def isLargerBetter(self):
self._transfer_params_to_java()
return self._java_obj.isLargerBetter()
@inherit_doc
class BinaryClassificationEvaluator(JavaEvaluator, HasLabelCol, HasRawPredictionCol):
"""
Evaluator for binary classification, which expects two input
columns: rawPrediction and label.
>>> from pyspark.mllib.linalg import Vectors
>>> scoreAndLabels = map(lambda x: (Vectors.dense([1.0 - x[0], x[0]]), x[1]),
... [(0.1, 0.0), (0.1, 1.0), (0.4, 0.0), (0.6, 0.0), (0.6, 1.0), (0.6, 1.0), (0.8, 1.0)])
>>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = BinaryClassificationEvaluator(rawPredictionCol="raw")
>>> evaluator.evaluate(dataset)
0.70...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "areaUnderPR"})
0.83...
"""
# a placeholder to make it appear in the generated doc
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)")
@keyword_only
def __init__(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
__init__(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
"""
super(BinaryClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.BinaryClassificationEvaluator", self.uid)
#: param for metric name in evaluation (areaUnderROC|areaUnderPR)
self.metricName = Param(self, "metricName",
"metric name in evaluation (areaUnderROC|areaUnderPR)")
self._setDefault(rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC")
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
self._paramMap[self.metricName] = value
return self
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
def setParams(self, rawPredictionCol="rawPrediction", labelCol="label",
metricName="areaUnderROC"):
"""
setParams(self, rawPredictionCol="rawPrediction", labelCol="label", \
metricName="areaUnderROC")
Sets params for binary classification evaluator.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class RegressionEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol):
"""
Evaluator for Regression, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(-28.98343821, -27.0), (20.21491975, 21.5),
... (-25.98418959, -22.0), (30.69731842, 33.0), (74.69283752, 71.0)]
>>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["raw", "label"])
...
>>> evaluator = RegressionEvaluator(predictionCol="raw")
>>> evaluator.evaluate(dataset)
2.842...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "r2"})
0.993...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "mae"})
2.649...
"""
# Because we will maximize evaluation value (ref: `CrossValidator`),
# when we evaluate a metric that is needed to minimize (e.g., `"rmse"`, `"mse"`, `"mae"`),
# we take and output the negative of this metric.
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation (mse|rmse|r2|mae)")
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
"""
super(RegressionEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.RegressionEvaluator", self.uid)
#: param for metric name in evaluation (mse|rmse|r2|mae)
self.metricName = Param(self, "metricName",
"metric name in evaluation (mse|rmse|r2|mae)")
self._setDefault(predictionCol="prediction", labelCol="label",
metricName="rmse")
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
self._paramMap[self.metricName] = value
return self
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="rmse"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="rmse")
Sets params for regression evaluator.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
@inherit_doc
class MulticlassClassificationEvaluator(JavaEvaluator, HasLabelCol, HasPredictionCol):
"""
Evaluator for Multiclass Classification, which expects two input
columns: prediction and label.
>>> scoreAndLabels = [(0.0, 0.0), (0.0, 1.0), (0.0, 0.0),
... (1.0, 0.0), (1.0, 1.0), (1.0, 1.0), (1.0, 1.0), (2.0, 2.0), (2.0, 0.0)]
>>> dataset = sqlContext.createDataFrame(scoreAndLabels, ["prediction", "label"])
...
>>> evaluator = MulticlassClassificationEvaluator(predictionCol="prediction")
>>> evaluator.evaluate(dataset)
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "precision"})
0.66...
>>> evaluator.evaluate(dataset, {evaluator.metricName: "recall"})
0.66...
"""
# a placeholder to make it appear in the generated doc
metricName = Param(Params._dummy(), "metricName",
"metric name in evaluation "
"(f1|precision|recall|weightedPrecision|weightedRecall)")
@keyword_only
def __init__(self, predictionCol="prediction", labelCol="label",
metricName="f1"):
"""
__init__(self, predictionCol="prediction", labelCol="label", \
metricName="f1")
"""
super(MulticlassClassificationEvaluator, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator", self.uid)
# param for metric name in evaluation (f1|precision|recall|weightedPrecision|weightedRecall)
self.metricName = Param(self, "metricName",
"metric name in evaluation"
" (f1|precision|recall|weightedPrecision|weightedRecall)")
self._setDefault(predictionCol="prediction", labelCol="label",
metricName="f1")
kwargs = self.__init__._input_kwargs
self._set(**kwargs)
def setMetricName(self, value):
"""
Sets the value of :py:attr:`metricName`.
"""
self._paramMap[self.metricName] = value
return self
def getMetricName(self):
"""
Gets the value of metricName or its default value.
"""
return self.getOrDefault(self.metricName)
@keyword_only
def setParams(self, predictionCol="prediction", labelCol="label",
metricName="f1"):
"""
setParams(self, predictionCol="prediction", labelCol="label", \
metricName="f1")
Sets params for multiclass classification evaluator.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.evaluation tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
| |
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import tempfile
import fixtures
import six
from neutron.common import constants
from neutron.tests import base
from neutron.tests.common import helpers as c_helpers
from neutron.tests.common import net_helpers
class ConfigDict(base.AttributeDict):
def update(self, other):
self.convert_to_attr_dict(other)
super(ConfigDict, self).update(other)
def convert_to_attr_dict(self, other):
"""Convert nested dicts to AttributeDict.
:param other: dictionary to be directly modified.
"""
for key, value in six.iteritems(other):
if isinstance(value, dict):
if not isinstance(value, base.AttributeDict):
other[key] = base.AttributeDict(value)
self.convert_to_attr_dict(value)
class ConfigFileFixture(fixtures.Fixture):
"""A fixture that knows how to translate configurations to files.
:param base_filename: the filename to use on disk.
:param config: a ConfigDict instance.
:param temp_dir: an existing temporary directory to use for storage.
"""
def __init__(self, base_filename, config, temp_dir):
super(ConfigFileFixture, self).__init__()
self.base_filename = base_filename
self.config = config
self.temp_dir = temp_dir
def _setUp(self):
config_parser = self.dict_to_config_parser(self.config)
# Need to randomly generate a unique folder to put the file in
self.filename = os.path.join(self.temp_dir, self.base_filename)
with open(self.filename, 'w') as f:
config_parser.write(f)
f.flush()
def dict_to_config_parser(self, config_dict):
config_parser = six.moves.configparser.SafeConfigParser()
for section, section_dict in six.iteritems(config_dict):
if section != 'DEFAULT':
config_parser.add_section(section)
for option, value in six.iteritems(section_dict):
config_parser.set(section, option, value)
return config_parser
class ConfigFixture(fixtures.Fixture):
"""A fixture that holds an actual Neutron configuration.
Note that 'self.config' is intended to only be updated once, during
the constructor, so if this fixture is re-used (setUp is called twice),
then the dynamic configuration values won't change. The correct usage
is initializing a new instance of the class.
"""
def __init__(self, temp_dir, base_filename):
super(ConfigFixture, self).__init__()
self.config = ConfigDict()
self.temp_dir = temp_dir
self.base_filename = base_filename
def _setUp(self):
cfg_fixture = ConfigFileFixture(
self.base_filename, self.config, self.temp_dir)
self.useFixture(cfg_fixture)
self.filename = cfg_fixture.filename
class NeutronConfigFixture(ConfigFixture):
def __init__(self, temp_dir, connection, rabbitmq_environment):
super(NeutronConfigFixture, self).__init__(
temp_dir, base_filename='neutron.conf')
self.config.update({
'DEFAULT': {
'host': self._generate_host(),
'state_path': self._generate_state_path(temp_dir),
'lock_path': '$state_path/lock',
'bind_port': self._generate_port(),
'api_paste_config': self._generate_api_paste(),
'policy_file': self._generate_policy_json(),
'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'service_plugins': ('neutron.services.l3_router.'
'l3_router_plugin.L3RouterPlugin'),
'rabbit_userid': rabbitmq_environment.user,
'rabbit_password': rabbitmq_environment.password,
'rabbit_hosts': '127.0.0.1',
'rabbit_virtual_host': rabbitmq_environment.vhost,
'auth_strategy': 'noauth',
'verbose': 'True',
'debug': 'True',
},
'database': {
'connection': connection,
}
})
def _generate_host(self):
return base.get_rand_name(prefix='host-')
def _generate_state_path(self, temp_dir):
# Assume that temp_dir will be removed by the caller
self.state_path = tempfile.mkdtemp(prefix='state_path', dir=temp_dir)
return self.state_path
def _generate_port(self):
"""Get a free TCP port from the Operating System and return it.
This might fail if some other process occupies this port after this
function finished but before the neutron-server process started.
"""
return str(net_helpers.get_free_namespace_port(
constants.PROTO_NAME_TCP))
def _generate_api_paste(self):
return c_helpers.find_sample_file('api-paste.ini')
def _generate_policy_json(self):
return c_helpers.find_sample_file('policy.json')
class ML2ConfigFixture(ConfigFixture):
def __init__(self, temp_dir):
super(ML2ConfigFixture, self).__init__(
temp_dir, base_filename='ml2_conf.ini')
self.config.update({
'ml2': {
'tenant_network_types': 'vlan',
'mechanism_drivers': 'openvswitch',
},
'ml2_type_vlan': {
'network_vlan_ranges': 'physnet1:1000:2999',
},
'ml2_type_gre': {
'tunnel_id_ranges': '1:1000',
},
'ml2_type_vxlan': {
'vni_ranges': '1001:2000',
},
'ovs': {
'enable_tunneling': 'False',
'local_ip': '127.0.0.1',
'bridge_mappings': self._generate_bridge_mappings(),
'integration_bridge': self._generate_integration_bridge(),
},
'securitygroup': {
'firewall_driver': ('neutron.agent.linux.iptables_firewall.'
'OVSHybridIptablesFirewallDriver'),
}
})
def _generate_bridge_mappings(self):
return ('physnet1:%s' %
base.get_rand_name(
prefix='br-eth',
max_length=constants.DEVICE_NAME_MAX_LEN))
def _generate_integration_bridge(self):
return base.get_rand_name(prefix='br-int',
max_length=constants.DEVICE_NAME_MAX_LEN)
class L3ConfigFixture(ConfigFixture):
def __init__(self, temp_dir, integration_bridge):
super(L3ConfigFixture, self).__init__(
temp_dir, base_filename='l3_agent.ini')
self.config.update({
'DEFAULT': {
'l3_agent_manager': ('neutron.agent.l3_agent.'
'L3NATAgentWithStateReport'),
'interface_driver': ('neutron.agent.linux.interface.'
'OVSInterfaceDriver'),
'ovs_integration_bridge': integration_bridge,
'external_network_bridge': self._generate_external_bridge(),
'debug': 'True',
'verbose': 'True',
'test_namespace_suffix': self._generate_namespace_suffix(),
}
})
def _generate_external_bridge(self):
return base.get_rand_name(prefix='br-ex',
max_length=constants.DEVICE_NAME_MAX_LEN)
def _generate_namespace_suffix(self):
return base.get_rand_name(prefix='test')
| |
"""
kombu.transport.pyamqplib
=========================
amqplib transport.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
import socket
try:
from ssl import SSLError
except ImportError:
class SSLError(Exception): # noqa
pass
from amqplib import client_0_8 as amqp
from amqplib.client_0_8 import transport
from amqplib.client_0_8.channel import Channel as _Channel
from amqplib.client_0_8.exceptions import AMQPConnectionException
from amqplib.client_0_8.exceptions import AMQPChannelException
from kombu.transport import base
DEFAULT_PORT = 5672
# amqplib's handshake mistakenly identifies as protocol version 1191,
# this breaks in RabbitMQ tip, which no longer falls back to
# 0-8 for unknown ids.
transport.AMQP_PROTOCOL_HEADER = "AMQP\x01\x01\x08\x00"
class Connection(amqp.Connection): # pragma: no cover
def _dispatch_basic_return(self, channel, args, msg):
reply_code = args.read_short()
reply_text = args.read_shortstr()
exchange = args.read_shortstr()
routing_key = args.read_shortstr()
exc = AMQPChannelException(reply_code, reply_text, (50, 60))
if channel.events["basic_return"]:
for callback in channel.events["basic_return"]:
callback(exc, exchange, routing_key, msg)
else:
raise exc
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self._method_override = {(60, 50): self._dispatch_basic_return}
def drain_events(self, allowed_methods=None, timeout=None):
"""Wait for an event on any channel."""
return self.wait_multi(self.channels.values(), timeout=timeout)
def wait_multi(self, channels, allowed_methods=None, timeout=None):
"""Wait for an event on a channel."""
chanmap = dict((chan.channel_id, chan) for chan in channels)
chanid, method_sig, args, content = self._wait_multiple(
chanmap.keys(), allowed_methods, timeout=timeout)
channel = chanmap[chanid]
if content \
and channel.auto_decode \
and hasattr(content, 'content_encoding'):
try:
content.body = content.body.decode(content.content_encoding)
except Exception:
pass
amqp_method = self._method_override.get(method_sig) or \
channel._METHOD_MAP.get(method_sig, None)
if amqp_method is None:
raise Exception('Unknown AMQP method (%d, %d)' % method_sig)
if content is None:
return amqp_method(channel, args)
else:
return amqp_method(channel, args, content)
def read_timeout(self, timeout=None):
if timeout is None:
return self.method_reader.read_method()
sock = self.transport.sock
prev = sock.gettimeout()
sock.settimeout(timeout)
try:
try:
return self.method_reader.read_method()
except SSLError, exc:
# http://bugs.python.org/issue10272
if "timed out" in str(exc):
raise socket.timeout()
raise
finally:
sock.settimeout(prev)
def _wait_multiple(self, channel_ids, allowed_methods, timeout=None):
for channel_id in channel_ids:
method_queue = self.channels[channel_id].method_queue
for queued_method in method_queue:
method_sig = queued_method[0]
if (allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40)):
method_queue.remove(queued_method)
method_sig, args, content = queued_method
return channel_id, method_sig, args, content
# Nothing queued, need to wait for a method from the peer
read_timeout = self.read_timeout
channels = self.channels
wait = self.wait
while 1:
channel, method_sig, args, content = read_timeout(timeout)
if (channel in channel_ids) \
and ((allowed_methods is None) \
or (method_sig in allowed_methods) \
or (method_sig == (20, 40))):
return channel, method_sig, args, content
# Not the channel and/or method we were looking for. Queue
# this method for later
channels[channel].method_queue.append((method_sig, args, content))
#
# If we just queued up a method for channel 0 (the Connection
# itself) it's probably a close method in reaction to some
# error, so deal with it right away.
#
if channel == 0:
wait()
def channel(self, channel_id=None):
try:
return self.channels[channel_id]
except KeyError:
return Channel(self, channel_id)
class Message(base.Message):
"""A message received by the broker.
.. attribute:: body
The message body.
.. attribute:: delivery_tag
The message delivery tag, uniquely identifying this message.
.. attribute:: channel
The channel instance the message was received on.
"""
def __init__(self, channel, msg, **kwargs):
props = msg.properties
super(Message, self).__init__(channel,
body=msg.body,
delivery_tag=msg.delivery_tag,
content_type=props.get("content_type"),
content_encoding=props.get("content_encoding"),
delivery_info=msg.delivery_info,
properties=msg.properties,
headers=props.get("application_headers"),
**kwargs)
class Channel(_Channel, base.StdChannel):
Message = Message
events = {"basic_return": []}
def __init__(self, *args, **kwargs):
self.no_ack_consumers = set()
super(Channel, self).__init__(*args, **kwargs)
def prepare_message(self, message_data, priority=None,
content_type=None, content_encoding=None, headers=None,
properties=None):
"""Encapsulate data into a AMQP message."""
return amqp.Message(message_data, priority=priority,
content_type=content_type,
content_encoding=content_encoding,
application_headers=headers,
**properties)
def message_to_python(self, raw_message):
"""Convert encoded message body back to a Python value."""
return self.Message(self, raw_message)
def close(self):
try:
super(Channel, self).close()
finally:
self.connection = None
def basic_consume(self, *args, **kwargs):
consumer_tag = super(Channel, self).basic_consume(*args, **kwargs)
if kwargs["no_ack"]:
self.no_ack_consumers.add(consumer_tag)
return consumer_tag
def basic_cancel(self, consumer_tag, **kwargs):
self.no_ack_consumers.discard(consumer_tag)
return super(Channel, self).basic_cancel(consumer_tag, **kwargs)
class Transport(base.Transport):
Connection = Connection
default_port = DEFAULT_PORT
# it's very annoying that amqplib sometimes raises AttributeError
# if the connection is lost, but nothing we can do about that here.
connection_errors = (AMQPConnectionException,
socket.error,
IOError,
OSError,
AttributeError)
channel_errors = (AMQPChannelException, )
def __init__(self, client, **kwargs):
self.client = client
self.default_port = kwargs.get("default_port") or self.default_port
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return connection.drain_events(**kwargs)
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.client
for name, default_value in self.default_connection_params.items():
if not getattr(conninfo, name, None):
setattr(conninfo, name, default_value)
conn = self.Connection(host=conninfo.host,
userid=conninfo.userid,
password=conninfo.password,
login_method=conninfo.login_method,
virtual_host=conninfo.virtual_host,
insist=conninfo.insist,
ssl=conninfo.ssl,
connect_timeout=conninfo.connect_timeout)
conn.client = self.client
return conn
def close_connection(self, connection):
"""Close the AMQP broker connection."""
connection.client = None
connection.close()
def verify_connection(self, connection):
return connection.channels is not None
@property
def default_connection_params(self):
return {"userid": "guest", "password": "guest",
"port": self.default_port,
"hostname": "localhost", "login_method": "AMQPLAIN"}
| |
import functools
from django import http
from django.shortcuts import (get_list_or_404, get_object_or_404, redirect,
render)
from django.views.decorators.vary import vary_on_headers
import commonware.log
import session_csrf
from mobility.decorators import mobilized
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from abuse.models import send_abuse_report
from amo import messages, urlresolvers
from amo.forms import AbuseForm
from amo.models import manual_order
from amo.urlresolvers import reverse
from reviews.models import Review
from translations.query import order_by_translation
from versions.models import Version
from .decorators import addon_view_factory
from .models import Addon
log = commonware.log.getLogger('z.addons')
addon_view = addon_view_factory(qs=Addon.objects.valid)
addon_unreviewed_view = addon_view_factory(qs=Addon.objects.unreviewed)
addon_disabled_view = addon_view_factory(qs=Addon.objects.valid_and_disabled)
def author_addon_clicked(f):
"""Decorator redirecting clicks on "Other add-ons by author"."""
@functools.wraps(f)
def decorated(request, *args, **kwargs):
redirect_id = request.GET.get('addons-author-addons-select', None)
if not redirect_id:
return f(request, *args, **kwargs)
try:
target_id = int(redirect_id)
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[target_id]))
except ValueError:
return http.HttpResponseBadRequest('Invalid add-on ID.')
return decorated
@addon_disabled_view
def addon_detail(request, addon):
"""Add-ons details page dispatcher."""
if addon.is_deleted:
raise http.Http404
if addon.is_disabled:
return render(request, 'addons/impala/disabled.html',
{'addon': addon}, status=404)
if addon.is_webapp():
# Apps don't deserve AMO detail pages.
raise http.Http404
# addon needs to have a version and be valid for this app.
if addon.type in request.APP.types:
if not addon.current_version:
raise http.Http404
return extension_detail(request, addon)
else:
# Redirect to an app that supports this type.
try:
new_app = [a for a in amo.APP_USAGE if addon.type
in a.types][0]
except IndexError:
raise http.Http404
else:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = new_app.short
return http.HttpResponsePermanentRedirect(reverse(
'addons.detail', args=[addon.slug]))
@vary_on_headers('X-Requested-With')
def extension_detail(request, addon):
"""Extensions details page."""
# If current version is incompatible with this app, redirect.
comp_apps = addon.compatible_apps
if comp_apps and request.APP not in comp_apps:
prefixer = urlresolvers.get_url_prefix()
prefixer.app = comp_apps.keys()[0].short
return redirect('addons.detail', addon.slug, permanent=True)
# Addon recommendations.
recommended = Addon.objects.listed(request.APP).filter(
recommended_for__addon=addon)[:6]
ctx = {
'addon': addon,
'src': request.GET.get('src', 'dp-btn-primary'),
'version_src': request.GET.get('src', 'dp-btn-version'),
'tags': addon.tags.not_blacklisted(),
'recommendations': recommended,
'reviews': Review.objects.valid().filter(addon=addon, is_latest=True),
'get_replies': Review.get_replies,
'abuse_form': AbuseForm(request=request),
}
# details.html just returns the top half of the page for speed. The bottom
# does a lot more queries we don't want on the initial page load.
if request.is_ajax():
# Other add-ons/apps from the same author(s).
ctx['author_addons'] = addon.authors_other_addons(app=request.APP)[:6]
return render(request, 'addons/impala/details-more.html', ctx)
else:
if addon.is_webapp():
ctx['search_placeholder'] = 'apps'
return render(request, 'addons/impala/details.html', ctx)
@mobilized(extension_detail)
def extension_detail(request, addon):
return render(request, 'addons/mobile/details.html', {'addon': addon})
class BaseFilter(object):
"""
Filters help generate querysets for add-on listings.
You have to define ``opts`` on the subclass as a sequence of (key, title)
pairs. The key is used in GET parameters and the title can be used in the
view.
The chosen filter field is combined with the ``base`` queryset using
the ``key`` found in request.GET. ``default`` should be a key in ``opts``
that's used if nothing good is found in request.GET.
"""
def __init__(self, request, base, key, default, model=Addon):
self.opts_dict = dict(self.opts)
self.extras_dict = dict(self.extras) if hasattr(self, 'extras') else {}
self.request = request
self.base_queryset = base
self.key = key
self.model = model
self.field, self.title = self.options(self.request, key, default)
self.qs = self.filter(self.field)
def options(self, request, key, default):
"""Get the (option, title) pair we want according to the request."""
if key in request.GET and (request.GET[key] in self.opts_dict or
request.GET[key] in self.extras_dict):
opt = request.GET[key]
else:
opt = default
if opt in self.opts_dict:
title = self.opts_dict[opt]
else:
title = self.extras_dict[opt]
return opt, title
def all(self):
"""Get a full mapping of {option: queryset}."""
return dict((field, self.filter(field)) for field in dict(self.opts))
def filter(self, field):
"""Get the queryset for the given field."""
filter = self._filter(field) & self.base_queryset
order = getattr(self, 'order_%s' % field, None)
if order:
return order(filter)
return filter
def _filter(self, field):
return getattr(self, 'filter_%s' % field)()
def filter_featured(self):
ids = self.model.featured_random(self.request.APP, self.request.LANG)
return manual_order(self.model.objects, ids, 'addons.id')
def filter_price(self):
return self.model.objects.order_by('addonpremium__price__price', 'id')
def filter_free(self):
if self.model == Addon:
return self.model.objects.top_free(self.request.APP, listed=False)
else:
return self.model.objects.top_free(listed=False)
def filter_paid(self):
if self.model == Addon:
return self.model.objects.top_paid(self.request.APP, listed=False)
else:
return self.model.objects.top_paid(listed=False)
def filter_popular(self):
return (self.model.objects.order_by('-weekly_downloads')
.with_index(addons='downloads_type_idx'))
def filter_downloads(self):
return self.filter_popular()
def filter_users(self):
return (self.model.objects.order_by('-average_daily_users')
.with_index(addons='adus_type_idx'))
def filter_created(self):
return (self.model.objects.order_by('-created')
.with_index(addons='created_type_idx'))
def filter_updated(self):
return (self.model.objects.order_by('-last_updated')
.with_index(addons='last_updated_type_idx'))
def filter_rating(self):
return (self.model.objects.order_by('-bayesian_rating')
.with_index(addons='rating_type_idx'))
def filter_hotness(self):
return self.model.objects.order_by('-hotness')
def filter_name(self):
return order_by_translation(self.model.objects.all(), 'name')
class ESBaseFilter(BaseFilter):
"""BaseFilter that uses elasticsearch."""
def __init__(self, request, base, key, default):
super(ESBaseFilter, self).__init__(request, base, key, default)
def filter(self, field):
sorts = {'name': 'name_sort',
'created': '-created',
'updated': '-last_updated',
'popular': '-weekly_downloads',
'users': '-average_daily_users',
'rating': '-bayesian_rating'}
return self.base_queryset.order_by(sorts[field])
class HomepageFilter(BaseFilter):
opts = (('featured', _lazy(u'Featured')),
('popular', _lazy(u'Popular')),
('new', _lazy(u'Recently Added')),
('updated', _lazy(u'Recently Updated')))
filter_new = BaseFilter.filter_created
# Define a placeholder home response until we can hunt down all the places
# it is called from and remove them.
def home(request):
return http.HttpResponse('home')
@addon_view
def eula(request, addon, file_id=None):
if not addon.eula:
return http.HttpResponseRedirect(addon.get_url_path())
if file_id:
version = get_object_or_404(addon.versions, files__id=file_id)
else:
version = addon.current_version
return render(request, 'addons/eula.html',
{'addon': addon, 'version': version})
@addon_view
def privacy(request, addon):
if not addon.privacy_policy:
return http.HttpResponseRedirect(addon.get_url_path())
return render(request, 'addons/privacy.html', {'addon': addon})
@addon_view
def developers(request, addon, page):
if 'version' in request.GET:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=request.GET['version'])[0]
else:
version = addon.current_version
if 'src' in request.GET:
contribution_src = src = request.GET['src']
else:
page_srcs = {
'developers': ('developers', 'meet-developers'),
'installed': ('meet-the-developer-post-install', 'post-download'),
'roadblock': ('meetthedeveloper_roadblock', 'roadblock'),
}
# Download src and contribution_src are different.
src, contribution_src = page_srcs.get(page)
return render(request, 'addons/impala/developers.html',
{'addon': addon, 'page': page, 'src': src,
'contribution_src': contribution_src,
'version': version})
@addon_view
def license(request, addon, version=None):
if version is not None:
qs = addon.versions.filter(files__status__in=amo.VALID_STATUSES)
version = get_list_or_404(qs, version=version)[0]
else:
version = addon.current_version
if not (version and version.license):
raise http.Http404
return render(request, 'addons/impala/license.html',
dict(addon=addon, version=version))
def license_redirect(request, version):
version = get_object_or_404(Version, pk=version)
return redirect(version.license_url(), permanent=True)
@session_csrf.anonymous_csrf_exempt
@addon_view
def report_abuse(request, addon):
form = AbuseForm(request.POST or None, request=request)
if request.method == "POST" and form.is_valid():
send_abuse_report(request, addon, form.cleaned_data['text'])
messages.success(request, _('Abuse reported.'))
return http.HttpResponseRedirect(addon.get_url_path())
else:
return render(request, 'addons/report_abuse_full.html',
{'addon': addon, 'abuse_form': form})
| |
#!python
"""
AndroidTabs
===========
AndroidTabs try to reproduce the behaviours of Android Tabs.
It allow you to create your own custom tabbed panel
with an animated tab indicator in a easy way.
Just create your tabs that must inherit from AndroidTabsBase
and add them to an AndroidTabs instance.
class MyTab(BoxLayout, AndroidTabsBase):
pass
android_tabs = AndroidTabs()
for n in range(1,6):
tab = MyTab(text='Tab %s' % n)
tab.add_widget(Button(text='Button %s' % n))
android_tabs.add_widget(tab)
"""
from kivy.app import App
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.behaviors import ToggleButtonBehavior
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.anchorlayout import AnchorLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.carousel import Carousel
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.scrollview import ScrollView
from kivy.graphics import Color, Rectangle
from kivy.utils import boundary
from kivy.properties import (
ObjectProperty,
NumericProperty,
VariableListProperty,
StringProperty,
AliasProperty,
BooleanProperty,
BoundedNumericProperty,
ReferenceListProperty
)
class AndroidTabsException(Exception):
'''The AndroidTabsException class'''
pass
class AndroidTabsLabel(ToggleButtonBehavior, Label):
'''
AndroidTabsLabel it represent the label of each tab.
'''
text_color_normal = VariableListProperty([1, 1, 1, .6])
'''
Text color of the label when it is not selected.
'''
text_color_active = VariableListProperty([1])
'''
Text color of the label when it is selected.
'''
tab = ObjectProperty(None)
tab_bar = ObjectProperty(None)
def __init__(self, **kwargs):
super(AndroidTabsLabel, self).__init__(**kwargs)
self.min_space = 0
def on_release(self):
# if the label is selected load the relative tab from carousel
if self.state == 'down':
self.tab_bar.parent.carousel.load_slide(self.tab)
def on_texture(self, widget, texture):
# just save the minimum width of the label based of the content
if texture:
self.width = texture.width
self.min_space = self.width
def _trigger_update_tab_indicator(self):
# update the position and size of the indicator
# when the label changes size or position
if self.state == 'down':
self.tab_bar.update_indicator(self.x, self.width)
class AndroidTabsBase(Widget):
'''
AndroidTabsBase allow you to create a tab.
You must create a new class that inherits
from AndroidTabsBase.
In this way you have total control over the
views of your tabbed panel.
'''
text = StringProperty('')
'''
It will be the label text of the tab.
'''
tab_label = ObjectProperty(None)
'''
It is the label object reference of the tab.
'''
def __init__(self, **kwargs):
self.tab_label = AndroidTabsLabel(tab=self)
super(AndroidTabsBase, self).__init__(**kwargs)
def on_text(self, widget, text):
# set the label text
self.tab_label.text = self.text
class AndroidTabsMain(BoxLayout):
'''
AndroidTabsMain is just a boxlayout that contain
the carousel. It allows you to have control over the carousel.
'''
pass
class AndroidTabsCarousel(Carousel):
'''
AndroidTabsCarousel class.
'''
pass
class AndroidTabsScrollView(ScrollView):
'''
AndroidTabsScrollView hacked version to fix scroll_x manual setting.
'''
def goto(self, scroll_x, scroll_y):
''' Update event value along with scroll_*
'''
def _update(e, x):
if e:
e.value = (e.max + e.min) * x
if not (scroll_x is None):
self.scroll_x = scroll_x
_update(self.effect_x, scroll_x)
if not (scroll_y is None):
self.scroll_y = scroll_y
_update(self.effect_y, scroll_y)
class AndroidTabsBar(BoxLayout):
'''
AndroidTabsBar is just a boxlayout that contain
the scrollview for the tabs.
It is also responsible to resize the tab label when it needed.
'''
target = ObjectProperty(None, allownone=True)
'''
Is the carousel reference of the next tab / slide.
When you go from "Tab A" to "Tab B", "Tab B" will be the
target tab / slide of the carousel.
'''
def get_rect_instruction(self):
for i in self.layout.canvas.after.children:
if isinstance(i, Rectangle):
return i
indicator = AliasProperty(
get_rect_instruction,
cache=True)
'''
Is the Rectangle instruction reference
of the tab indicator.
'''
def get_last_scroll_x(self):
return self.scrollview.scroll_x
last_scroll_x = AliasProperty(
get_last_scroll_x,
bind=('target', ),
cache=True)
'''
Is the carousel reference of the next tab / slide.
When you go from "Tab A" to "Tab B", "Tab B" will be the
target tab / slide of the carousel.
'''
def __init__(self, **kwargs):
self._trigger_update_tab_bar = Clock.schedule_once(
self._update_tab_bar, 0)
super(AndroidTabsBar, self).__init__(**kwargs)
def _update_tab_bar(self, *args):
# update width of the labels when it is needed
width, tabs = self.scrollview.width, self.layout.children
tabs_widths = [t.min_space for t in tabs if t.min_space]
tabs_space = float(sum(tabs_widths))
if not tabs_space:
return
ratio = width / tabs_space
use_ratio = True in (width / len(tabs) < w for w in tabs_widths)
for t in tabs:
t.width = t.min_space if tabs_space > width \
else t.min_space * ratio if use_ratio is True \
else width / len(tabs)
def update_indicator(self, x, w):
# update position and size of the indicator
self.indicator.pos = (x, 0)
self.indicator.size = (w, self.indicator.size[1])
def tab_bar_autoscroll(self, target, step):
# automatic scroll animation of the tab bar.
bound_left = self.center_x
bound_right = self.layout.width - bound_left
dt = target.center_x - bound_left
sx, sy = self.scrollview.convert_distance_to_scroll(dt, 0)
# last scroll x of the tab bar
lsx = self.last_scroll_x
# determine scroll direction
scroll_is_late = lsx < sx
# distance to run
dst = abs(lsx - sx) * step
if not dst:
return
if scroll_is_late and target.center_x > bound_left:
x = lsx + dst
elif not scroll_is_late and target.center_x < bound_right:
x = lsx - dst
x = boundary(x, 0.0, 1.0)
self.scrollview.goto(x, None)
def android_animation(self, carousel, offset):
# try to reproduce the android animation effect.
if offset != 0 and abs(offset) < carousel.width:
forward = offset < 0
offset = abs(offset)
step = offset / float(carousel.width)
distance = abs(offset - carousel.width)
threshold = self.parent.anim_threshold
breakpoint = carousel.width - (carousel.width * threshold)
traveled = distance / breakpoint if breakpoint else 0
break_step = 1.0 - traveled
indicator_animation = self.parent.tab_indicator_anim
skip_slide = carousel.slides[carousel._skip_slide] \
if carousel._skip_slide is not None else None
next_slide = carousel.next_slide \
if forward else carousel.previous_slide
self.target = skip_slide if skip_slide else next_slide
if not self.target:
return
a = carousel.current_slide.tab_label
b = self.target.tab_label
self.tab_bar_autoscroll(b, step)
if not indicator_animation:
return
if step <= threshold:
if forward:
gap_w = abs((a.x + a.width) - (b.x + b.width))
w_step = a.width + (gap_w * step)
x_step = a.x
else:
gap = abs((a.x - b.x))
x_step = a.x - gap * step
w_step = a.width + gap * step
else:
if forward:
x_step = a.x + abs((a.x - b.x)) * break_step
gap_w = abs((a.x + a.width) - (b.x + b.width))
ind_width = a.width + gap_w * threshold
gap_w = ind_width - b.width
w_step = ind_width - (gap_w * break_step)
else:
x_step = a.x - abs((a.x - b.x)) * threshold
x_step = x_step - abs(x_step - b.x) * break_step
ind_width = (a.x + a.width) - x_step if threshold else a.width
gap_w = ind_width - b.width
w_step = ind_width - (gap_w * break_step)
w_step = w_step if w_step + x_step <= a.x + a.width \
else ind_width
self.update_indicator(x_step, w_step)
class AndroidTabs(AnchorLayout):
'''
The AndroidTabs class.
You can use it to create your own custom tabbed panel.
'''
default_tab = NumericProperty(0)
'''
Index of the default tab. Default to 0.
'''
tab_bar_height = NumericProperty('48dp')
'''
Height of the tab bar.
'''
tab_indicator_anim = BooleanProperty(True)
'''
Tab indicator animation. Default to True.
If you do not want animation set it to False.
'''
tab_indicator_height = NumericProperty('2dp')
'''
Height of the tab indicator.
'''
tab_indicator_color = VariableListProperty([1])
'''
Color of the tab indicator.
'''
anim_duration = NumericProperty(0.2)
'''
Duration of the slide animation. Default to 0.2.
'''
anim_threshold = BoundedNumericProperty(
0.8, min=0.0, max=1.0,
errorhandler=lambda x: 0.0 if x < 0.0 else 1.0)
'''
Animation threshold allow you to change
the tab indicator animation effect.
Default to 0.8.
'''
def on_carousel_index(self, carousel, index):
# when the index of the carousel change, update
# tab indicator, select the current tab and reset threshold data.
current_tab_label = carousel.current_slide.tab_label
if current_tab_label.state == 'normal':
current_tab_label._do_press()
self.tab_bar.update_indicator(
current_tab_label.x,
current_tab_label.width)
def add_widget(self, widget):
# You can add only subclass of AndroidTabsBase.
if len(self.children) >= 2:
if not issubclass(widget.__class__, AndroidTabsBase):
raise AndroidTabsException(
'AndroidTabs accept only subclass of AndroidTabsBase')
widget.tab_label.tab_bar = self.tab_bar
self.tab_bar.layout.add_widget(widget.tab_label)
self.carousel.add_widget(widget)
return
return super(AndroidTabs, self).add_widget(widget)
def remove_widget(self, widget):
# You can remove only subclass of AndroidTabsBase.
if not issubclass(widget.__class__, AndroidTabsBase):
raise AndroidTabsException(
'AndroidTabs can remove only subclass of AndroidTabBase')
if widget.parent.parent == self.carousel:
self.tab_bar.layout.remove_widget(widget.tab_label)
self.carousel.remove_widget(widget)
Builder.load_string('''
#:import DampedScrollEffect kivy.effects.dampedscroll.DampedScrollEffect
<AndroidTabsLabel>:
size_hint: None, 1
halign: 'center'
padding: '12dp', 0
group: 'tabs'
allow_no_selection: False
text_color_normal: 1, 1, 1, .6
text_color_active: 1, 1, 1, 1
color: self.text_color_active if self.state is 'down' \
else self.text_color_normal
on_x: self._trigger_update_tab_indicator()
on_width: self._trigger_update_tab_indicator()
<AndroidTabsScrollView>:
size_hint: 1, 1
do_scroll_y: False
bar_color: 0, 0, 0, 0
bar_inactive_color: 0, 0, 0, 0
bar_width: 0
effect_cls: DampedScrollEffect
<AndroidTabs>:
carousel: carousel
tab_bar: tab_bar
anchor_y: 'top'
AndroidTabsMain:
padding: 0, tab_bar.height, 0, 0
AndroidTabsCarousel:
id: carousel
anim_move_duration: root.anim_duration
on_index: root.on_carousel_index(*args)
on__offset: tab_bar.android_animation(*args)
on_slides: self.index = root.default_tab
on_slides: root.on_carousel_index(self, 0)
AndroidTabsBar:
id: tab_bar
carousel: carousel
scrollview: scrollview
layout: layout
size_hint: 1, None
height: root.tab_bar_height
AndroidTabsScrollView:
id: scrollview
on_width: tab_bar._trigger_update_tab_bar()
GridLayout:
id: layout
rows: 1
size_hint: None, 1
width: self.minimum_width
on_width: tab_bar._trigger_update_tab_bar()
canvas.after:
Color:
rgba: root.tab_indicator_color
Rectangle:
pos: self.pos
size: 0, root.tab_indicator_height
''')
kvdemo = '''
#:import get_color_from_hex kivy.utils.get_color_from_hex
<AndroidTabsBar>:
canvas.before:
Color:
rgba: get_color_from_hex('#03A9F4')
Rectangle:
pos: self.pos
size: self.size
# you can add a bit of shade if you want
Color:
rgba: 0,0,0,.3
Rectangle:
pos: self.pos[0], self.pos[1] - 1
size: self.size[0], 1
Color:
rgba: 0,0,0,.2
Rectangle:
pos: self.pos[0], self.pos[1] - 2
size: self.size[0], 1
Color:
rgba: 0,0,0,.05
Rectangle:
pos: self.pos[0], self.pos[1] - 3
size: self.size[0], 1
<MyTab>:
Button:
text: root.text
'''
if __name__ == '__main__':
class MyTab(BoxLayout, AndroidTabsBase):
pass
class Example(App):
def build(self):
Builder.load_string(kvdemo)
android_tabs = AndroidTabs()
for n in range(1, 6):
tab = MyTab(text='TAB %s' % n)
android_tabs.add_widget(tab)
return android_tabs
Example().run()
| |
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import re
import os
import socket
from powerline.lib.url import urllib_read
from powerline.lib.threaded import ThreadedSegment, KwThreadedSegment
from powerline.lib.monotonic import monotonic
from powerline.lib.humanize_bytes import humanize_bytes
from powerline.segments import with_docstring
from powerline.theme import requires_segment_info
@requires_segment_info
def hostname(pl, segment_info, only_if_ssh=False, exclude_domain=False):
'''Return the current hostname.
:param bool only_if_ssh:
only return the hostname if currently in an SSH session
:param bool exclude_domain:
return the hostname without domain if there is one
'''
if only_if_ssh and not segment_info['environ'].get('SSH_CLIENT'):
return None
if exclude_domain:
return socket.gethostname().split('.')[0]
return socket.gethostname()
def _external_ip(query_url='http://ipv4.icanhazip.com/'):
return urllib_read(query_url).strip()
class ExternalIpSegment(ThreadedSegment):
interval = 300
def set_state(self, query_url='http://ipv4.icanhazip.com/', **kwargs):
self.query_url = query_url
super(ExternalIpSegment, self).set_state(**kwargs)
def update(self, old_ip):
return _external_ip(query_url=self.query_url)
def render(self, ip, **kwargs):
if not ip:
return None
return [{'contents': ip, 'divider_highlight_group': 'background:divider'}]
external_ip = with_docstring(ExternalIpSegment(),
'''Return external IP address.
:param str query_url:
URI to query for IP address, should return only the IP address as a text string
Suggested URIs:
* http://ipv4.icanhazip.com/
* http://ipv6.icanhazip.com/
* http://icanhazip.com/ (returns IPv6 address if available, else IPv4)
Divider highlight group used: ``background:divider``.
''')
try:
import netifaces
except ImportError:
def internal_ip(pl, interface='auto', ipv=4):
return None
else:
_interface_starts = {
'eth': 10, # Regular ethernet adapters : eth1
'enp': 10, # Regular ethernet adapters, Gentoo : enp2s0
'ath': 9, # Atheros WiFi adapters : ath0
'wlan': 9, # Other WiFi adapters : wlan1
'wlp': 9, # Other WiFi adapters, Gentoo : wlp5s0
'teredo': 1, # miredo interface : teredo
'lo': -10, # Loopback interface : lo
'docker': -5, # Docker bridge interface : docker0
'vmnet': -5, # VMWare bridge interface : vmnet1
'vboxnet': -5, # VirtualBox bridge interface : vboxnet0
}
_interface_start_re = re.compile(r'^([a-z]+?)(\d|$)')
def _interface_key(interface):
match = _interface_start_re.match(interface)
if match:
try:
base = _interface_starts[match.group(1)] * 100
except KeyError:
base = 500
if match.group(2):
return base - int(match.group(2))
else:
return base
else:
return 0
def internal_ip(pl, interface='auto', ipv=4):
if interface == 'auto':
try:
interface = next(iter(sorted(netifaces.interfaces(), key=_interface_key, reverse=True)))
except StopIteration:
pl.info('No network interfaces found')
return None
addrs = netifaces.ifaddresses(interface)
try:
return addrs[netifaces.AF_INET6 if ipv == 6 else netifaces.AF_INET][0]['addr']
except (KeyError, IndexError):
return None
internal_ip = with_docstring(internal_ip,
'''Return internal IP address
Requires ``netifaces`` module to work properly.
:param str interface:
Interface on which IP will be checked. Use ``auto`` to automatically
detect interface. In this case interfaces with lower numbers will be
preferred over interfaces with similar names. Order of preference based on
names:
#. ``eth`` and ``enp`` followed by number or the end of string.
#. ``ath``, ``wlan`` and ``wlp`` followed by number or the end of string.
#. ``teredo`` followed by number or the end of string.
#. Any other interface that is not ``lo*``.
#. ``lo`` followed by number or the end of string.
:param int ipv:
4 or 6 for ipv4 and ipv6 respectively, depending on which IP address you
need exactly.
''')
try:
import psutil
def _get_bytes(interface):
try:
io_counters = psutil.net_io_counters(pernic=True)
except AttributeError:
io_counters = psutil.network_io_counters(pernic=True)
if_io = io_counters.get(interface)
if not if_io:
return None
return if_io.bytes_recv, if_io.bytes_sent
def _get_interfaces():
try:
io_counters = psutil.net_io_counters(pernic=True)
except AttributeError:
io_counters = psutil.network_io_counters(pernic=True)
for interface, data in io_counters.items():
if data:
yield interface, data.bytes_recv, data.bytes_sent
except ImportError:
def _get_bytes(interface):
with open('/sys/class/net/{interface}/statistics/rx_bytes'.format(interface=interface), 'rb') as file_obj:
rx = int(file_obj.read())
with open('/sys/class/net/{interface}/statistics/tx_bytes'.format(interface=interface), 'rb') as file_obj:
tx = int(file_obj.read())
return (rx, tx)
def _get_interfaces():
for interface in os.listdir('/sys/class/net'):
x = _get_bytes(interface)
if x is not None:
yield interface, x[0], x[1]
class NetworkLoadSegment(KwThreadedSegment):
interfaces = {}
replace_num_pat = re.compile(r'[a-zA-Z]+')
@staticmethod
def key(interface='auto', **kwargs):
return interface
def compute_state(self, interface):
if interface == 'auto':
proc_exists = getattr(self, 'proc_exists', None)
if proc_exists is None:
proc_exists = self.proc_exists = os.path.exists('/proc/net/route')
if proc_exists:
# Look for default interface in routing table
with open('/proc/net/route', 'rb') as f:
for line in f.readlines():
parts = line.split()
if len(parts) > 1:
iface, destination = parts[:2]
if not destination.replace(b'0', b''):
interface = iface.decode('utf-8')
break
if interface == 'auto':
# Choose interface with most total activity, excluding some
# well known interface names
interface, total = 'eth0', -1
for name, rx, tx in _get_interfaces():
base = self.replace_num_pat.match(name)
if None in (base, rx, tx) or base.group() in ('lo', 'vmnet', 'sit'):
continue
activity = rx + tx
if activity > total:
total = activity
interface = name
try:
idata = self.interfaces[interface]
try:
idata['prev'] = idata['last']
except KeyError:
pass
except KeyError:
idata = {}
if self.run_once:
idata['prev'] = (monotonic(), _get_bytes(interface))
self.shutdown_event.wait(self.interval)
self.interfaces[interface] = idata
idata['last'] = (monotonic(), _get_bytes(interface))
return idata.copy()
def render_one(self, idata, recv_format='DL {value:>8}', sent_format='UL {value:>8}', suffix='B/s', si_prefix=False, **kwargs):
if not idata or 'prev' not in idata:
return None
t1, b1 = idata['prev']
t2, b2 = idata['last']
measure_interval = t2 - t1
if None in (b1, b2):
return None
r = []
for i, key in zip((0, 1), ('recv', 'sent')):
format = locals()[key + '_format']
try:
value = (b2[i] - b1[i]) / measure_interval
except ZeroDivisionError:
self.warn('Measure interval zero.')
value = 0
max_key = key + '_max'
is_gradient = max_key in kwargs
hl_groups = ['network_load_' + key, 'network_load']
if is_gradient:
hl_groups[:0] = (group + '_gradient' for group in hl_groups)
r.append({
'contents': format.format(value=humanize_bytes(value, suffix, si_prefix)),
'divider_highlight_group': 'network_load:divider',
'highlight_groups': hl_groups,
})
if is_gradient:
max = kwargs[max_key]
if value >= max:
r[-1]['gradient_level'] = 100
else:
r[-1]['gradient_level'] = value * 100.0 / max
return r
network_load = with_docstring(NetworkLoadSegment(),
'''Return the network load.
Uses the ``psutil`` module if available for multi-platform compatibility,
falls back to reading
:file:`/sys/class/net/{interface}/statistics/{rx,tx}_bytes`.
:param str interface:
Network interface to measure (use the special value "auto" to have powerline
try to auto-detect the network interface).
:param str suffix:
String appended to each load string.
:param bool si_prefix:
Use SI prefix, e.g. MB instead of MiB.
:param str recv_format:
Format string that determines how download speed should look like. Receives
``value`` as argument.
:param str sent_format:
Format string that determines how upload speed should look like. Receives
``value`` as argument.
:param float recv_max:
Maximum number of received bytes per second. Is only used to compute
gradient level.
:param float sent_max:
Maximum number of sent bytes per second. Is only used to compute gradient
level.
Divider highlight group used: ``network_load:divider``.
Highlight groups used: ``network_load_sent_gradient`` (gradient) or ``network_load_recv_gradient`` (gradient) or ``network_load_gradient`` (gradient), ``network_load_sent`` or ``network_load_recv`` or ``network_load``.
''')
| |
# Copyright (c) 2010 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Scheduler Service
"""
from oslo.config import cfg
from oslo import messaging
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova.conductor.tasks import live_migrate
from nova import exception
from nova import manager
from nova.objects import instance as instance_obj
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import periodic_task
from nova import quota
from nova.scheduler import utils as scheduler_utils
LOG = logging.getLogger(__name__)
scheduler_driver_opts = [
cfg.StrOpt('scheduler_driver',
default='nova.scheduler.filter_scheduler.FilterScheduler',
help='Default driver to use for the scheduler'),
cfg.IntOpt('scheduler_driver_task_period',
default=60,
help='How often (in seconds) to run periodic tasks in '
'the scheduler driver of your choice. '
'Please note this is likely to interact with the value '
'of service_down_time, but exactly how they interact '
'will depend on your choice of scheduler driver.'),
]
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
QUOTAS = quota.QUOTAS
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
target = messaging.Target(version='2.9')
def __init__(self, scheduler_driver=None, *args, **kwargs):
if not scheduler_driver:
scheduler_driver = CONF.scheduler_driver
self.driver = importutils.import_object(scheduler_driver)
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(SchedulerManager, self).__init__(service_name='scheduler',
*args, **kwargs)
self.additional_endpoints.append(_SchedulerManagerV3Proxy(self))
def create_volume(self, context, volume_id, snapshot_id,
reservations=None, image_id=None):
#function removed in RPC API 2.3
pass
@messaging.expected_exceptions(exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError)
def live_migration(self, context, instance, dest,
block_migration, disk_over_commit, pclm):
try:
self._schedule_live_migration(context, instance, dest,
block_migration, disk_over_commit, pclm)
except (exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.MigrationPreCheckError) as ex:
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
dict(vm_state=instance['vm_state'],
task_state=None,
expected_task_state=task_states.MIGRATING,),
context, ex, request_spec)
except Exception as ex:
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('live_migration',
{'vm_state': vm_states.ERROR},
context, ex, request_spec)
def _schedule_live_migration(self, context, instance, dest,
block_migration, disk_over_commit, pclm):
task = live_migrate.LiveMigrationTask(context, instance,
dest, block_migration, disk_over_commit, pclm)
return task.execute()
def run_instance(self, context, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec=True):
"""Tries to call schedule_run_instance on the driver.
Sets instance vm_state to ERROR on exceptions
"""
instance_uuids = request_spec['instance_uuids']
with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
'schedule', *instance_uuids):
try:
return self.driver.schedule_run_instance(context,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
legacy_bdm_in_spec)
except exception.NoValidHost as ex:
# don't re-raise
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('run_instance',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
def prep_resize(self, context, image, request_spec, filter_properties,
instance, instance_type, reservations):
"""Tries to call schedule_prep_resize on the driver.
Sets instance vm_state to ACTIVE on NoHostFound
Sets vm_state to ERROR on other exceptions
"""
instance_uuid = instance['uuid']
with compute_utils.EventReporter(context, conductor_api.LocalAPI(),
'schedule', instance_uuid):
try:
request_spec['num_instances'] = len(
request_spec['instance_uuids'])
hosts = self.driver.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
filter_properties.pop('context', None)
(host, node) = (host_state['host'], host_state['nodename'])
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
inst_obj = instance_obj.Instance._from_db_object(
context, instance_obj.Instance(), instance,
expected_attrs=attrs)
self.compute_rpcapi.prep_resize(
context, image, inst_obj, instance_type, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node)
except exception.NoValidHost as ex:
vm_state = instance.get('vm_state', vm_states.ACTIVE)
self._set_vm_state_and_notify('prep_resize',
{'vm_state': vm_state,
'task_state': None},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
except Exception as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify('prep_resize',
{'vm_state': vm_states.ERROR,
'task_state': None},
context, ex, request_spec)
if reservations:
QUOTAS.rollback(context, reservations)
def _set_vm_state_and_notify(self, method, updates, context, ex,
request_spec):
scheduler_utils.set_vm_state_and_notify(
context, 'scheduler', method, updates, ex, request_spec, self.db)
# NOTE(hanlind): This method can be removed in v3.0 of the RPC API.
def show_host_resources(self, context, host):
"""Shows the physical/usage resource given by hosts.
:param context: security context
:param host: hostname
:returns:
example format is below::
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
'vcpus_used': 12, 'memory_mb_used': 10240,
'local_gb_used': 64}
"""
# Getting compute node info and related instances info
service_ref = self.db.service_get_by_compute_host(context, host)
instance_refs = self.db.instance_get_all_by_host(context,
service_ref['host'])
# Getting total available/used resource
compute_ref = service_ref['compute_node'][0]
resource = {'vcpus': compute_ref['vcpus'],
'memory_mb': compute_ref['memory_mb'],
'local_gb': compute_ref['local_gb'],
'vcpus_used': compute_ref['vcpus_used'],
'memory_mb_used': compute_ref['memory_mb_used'],
'local_gb_used': compute_ref['local_gb_used']}
usage = dict()
if not instance_refs:
return {'resource': resource, 'usage': usage}
# Getting usage resource per project
project_ids = [i['project_id'] for i in instance_refs]
project_ids = list(set(project_ids))
for project_id in project_ids:
vcpus = [i['vcpus'] for i in instance_refs
if i['project_id'] == project_id]
mem = [i['memory_mb'] for i in instance_refs
if i['project_id'] == project_id]
root = [i['root_gb'] for i in instance_refs
if i['project_id'] == project_id]
ephemeral = [i['ephemeral_gb'] for i in instance_refs
if i['project_id'] == project_id]
usage[project_id] = {'vcpus': sum(vcpus),
'memory_mb': sum(mem),
'root_gb': sum(root),
'ephemeral_gb': sum(ephemeral)}
return {'resource': resource, 'usage': usage}
@periodic_task.periodic_task
def _expire_reservations(self, context):
QUOTAS.expire(context)
@periodic_task.periodic_task(spacing=CONF.scheduler_driver_task_period,
run_immediately=True)
def _run_periodic_tasks(self, context):
self.driver.run_periodic_tasks(context)
# NOTE(russellb) This method can be removed in 3.0 of this API. It is
# deprecated in favor of the method in the base API.
def get_backdoor_port(self, context):
return self.backdoor_port
# NOTE(hanlind): This method can be removed in v4.0 of the RPC API.
@messaging.expected_exceptions(exception.NoValidHost)
def select_hosts(self, context, request_spec, filter_properties):
"""Returns host(s) best suited for this request_spec
and filter_properties.
"""
dests = self.driver.select_destinations(context, request_spec,
filter_properties)
hosts = [dest['host'] for dest in dests]
return jsonutils.to_primitive(hosts)
@messaging.expected_exceptions(exception.NoValidHost)
def select_destinations(self, context, request_spec, filter_properties):
"""Returns destinations(s) best suited for this request_spec and
filter_properties.
The result should be a list of dicts with 'host', 'nodename' and
'limits' as keys.
"""
dests = self.driver.select_destinations(context, request_spec,
filter_properties)
return jsonutils.to_primitive(dests)
class _SchedulerManagerV3Proxy(object):
target = messaging.Target(version='3.0')
def __init__(self, manager):
self.manager = manager
def select_destinations(self, ctxt, request_spec, filter_properties):
return self.manager.select_destinations(ctxt,
request_spec=request_spec, filter_properties=filter_properties)
def run_instance(self, ctxt, request_spec, admin_password,
injected_files, requested_networks, is_first_time,
filter_properties, legacy_bdm_in_spec):
return self.manager.run_instance(ctxt, request_spec=request_spec,
admin_password=admin_password, injected_files=injected_files,
requested_networks=requested_networks,
is_first_time=is_first_time,
filter_properties=filter_properties,
legacy_bdm_in_spec=legacy_bdm_in_spec)
def prep_resize(self, ctxt, instance, instance_type, image,
request_spec, filter_properties, reservations):
return self.manager.prep_resize(ctxt, instance=instance,
instance_type=instance_type, image=image,
request_spec=request_spec, filter_properties=filter_properties,
reservations=reservations)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import load
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import saver
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
def _run_signature(session, meta_graph_def, inputs, signature_key):
signature = meta_graph_def.signature_def[signature_key]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
input_tensor = session.graph.get_tensor_by_name(
signature.inputs[arg_name].name)
feed_dict[input_tensor] = inputs[arg_name]
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = session.graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
def _import_and_infer(
save_dir,
inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
model = loader.load(session, [tag_constants.SERVING], save_dir)
return _run_signature(session, model, inputs, signature_key)
class SaveTest(test.TestCase, parameterized.TestCase):
def test_method_save_signature(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual({"output_0": 2.}, _import_and_infer(save_dir, {"x": 1.}))
def test_method_save_list_func(self):
root = tracking.AutoTrackable()
@def_function.function
def case_fn(x):
branch_index = constant_op.constant(1)
branches = [lambda: x, lambda: x + 1]
case_out = control_flow_ops.switch_case(branch_index, branches)
return case_out
root.f = def_function.function(
lambda x: 2. * case_fn(x),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual({"output_0": 4.}, _import_and_infer(save_dir, {"x": 1.}))
def test_method_save_concrete(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda z: {"out": 2. * z})
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir, {
"non_default_key":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual({"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_method_save_annotated_function(self):
# This test is only meaningful with Python 3 because Python 2's
# inspect.getargspec doesn't save annotations.
root = tracking.AutoTrackable()
class UnknownType(object): # pylint: disable=unused-variable
pass
def annotated_function(z):
return {"out": 2. * z}
# Same effect as annotating function like the following.
# def annotated_function("z": UnknownType) -> UnknownType:
# This is a workaround since Python 2 does not support annotations and
# our presubmit linter catches it.
annotated_function.__annotations__ = {
"z": UnknownType,
"return": UnknownType
}
root.f = def_function.function(annotated_function)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir, {
"non_default_key":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual({"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_unsaveable_func_graph(self):
root = module.Module()
@def_function.function(input_signature=[])
def nested_f():
ops.get_default_graph().mark_as_unsaveable("ERROR MSG")
return 1
@def_function.function(input_signature=[])
def f():
return nested_f()
root.f = f
with self.assertRaisesRegex(ValueError, "ERROR MSG"):
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_untracked_variable_useful_message(self):
root = module.Module()
v = variables.Variable(1., name="some_unique_name")
@def_function.function(input_signature=[])
def f():
return v.read_value()
root.f = f
with self.assertRaisesRegex(AssertionError, "some_unique_name"):
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_version_information_included(self):
root = tracking.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
saved_model_proto = loader_impl.parse_saved_model(save_dir)
self.assertEqual(
versions.__version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_version)
self.assertEqual(
versions.__git_version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_git_version)
def test_non_concrete_error(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "Expected a TensorFlow function"):
save.save(root, save_dir, root.f)
def test_captures_unreachable_variable(self):
root = tracking.AutoTrackable()
unreachable_variable = variables.Variable([5.0, 2.0])
root.reachable_variable = variables.Variable([1.0, 3.0])
@def_function.function
def increase_variable(x):
return 2 * unreachable_variable * x + root.reachable_variable
root.f = increase_variable
self.assertAllEqual([101.0, 83.0],
root.f(constant_op.constant([10.0, 20.0])).numpy())
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(KeyError, "not reachable from root"):
save.save(root, save_dir)
def test_nested_inputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x[0],
input_signature=([
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)
],))
root.f([constant_op.constant(1.), constant_op.constant(1.)])
def test_nested_outputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "non-Tensor value"):
save.save(root, save_dir, to_save)
def test_nested_dict_outputs(self):
root = util.Checkpoint(
f=def_function.function(lambda x: { # pylint: disable=g-long-lambda
"a": 2. * x,
"b": (3. * x, 4. * x)
}))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "non-Tensor value"):
save.save(root, save_dir, to_save)
def test_variable(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, to_save)
self.assertAllEqual({"output_0": 12.},
_import_and_infer(save_dir, {"x": 2.}))
def test_single_function_default_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3., input_signature=())
model.f()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertAllClose({"output_0": 3.}, _import_and_infer(save_dir, {}))
def test_single_function_no_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3.)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
def test_find_default_save_function(self):
class ObjWithDefaultSignature(util.Checkpoint):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
])
def _default_save_signature(self, x):
return x + x + 1
obj = ObjWithDefaultSignature()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_docstring(self):
class Adder(module.Module):
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
])
def add(self, x):
return x + x + 1.
to_save = Adder()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_datastructures(self):
class HasDatastructures(util.Checkpoint):
def __init__(self):
self.a = [1.]
self.a.append(variables.Variable(2.))
self.b = {"a": variables.Variable(3.)}
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
])
def add(self, x):
return x + math_ops.add_n(self.a) + self.b["a"]
to_save = HasDatastructures()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 10.},
_import_and_infer(save_dir, {"x": 4.}))
def test_default_attr_stripping(self):
class Complex(util.Checkpoint):
@def_function.function(input_signature=[])
def __call__(self):
return math_ops.complex(
constant_op.constant(1.), constant_op.constant(2.), name="complex")
to_save = Complex()
to_save()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
loader.load(session, [tag_constants.SERVING], save_dir)
func, = [f for name, f in graph._functions.items() if "call" in name]
complex_node, = [
node for node in func.definition.node_def if node.op == "Complex"
]
self.assertNotIn("T", complex_node.attr)
self.assertNotIn("Tout", complex_node.attr)
def test_signature_attribute_reserved(self):
root = util.Checkpoint(signatures=variables.Variable(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegex(ValueError, "del obj.signatures"):
save.save(root, save_dir)
del root.signatures
save.save(root, save_dir)
def test_function_with_captured_dataset(self):
if test_util.is_gpu_available():
self.skipTest("Currently broken when a GPU is available.")
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (dataset_ops.Dataset.range(5).map(lambda x: x**2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
signatures=root.__call__.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int64)))
self.assertAllClose({"output_0": 3 * (1 + 4 + 9 + 16)},
_import_and_infer(save_dir, {"x": 3}))
def test_variable_args_cannot_be_used_as_signature(self):
@def_function.function(input_signature=[
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)
])
def f(unused_v):
return 1
root = tracking.AutoTrackable()
root.f = f.get_concrete_function()
with self.assertRaisesRegex(ValueError,
"tf.Variable inputs cannot be exported"):
save.save(
root,
os.path.join(self.get_temp_dir(), "saved_model"),
signatures=root.f)
def test_export_correct_output_shapes(self):
"""Asserts that nodes are exported with the correct number of output shapes.
After backpropagation rewrite, functions are rewritten with additional
outputs. When exporting to SavedModel, the shapes of the additional outputs
were incorrectly added to the FunctionDef proto (b/133666530).
"""
obj = tracking.AutoTrackable()
obj.v = variables.Variable(2.)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def f(x):
return (math_ops.multiply(obj.v, x), math_ops.multiply(obj.v,
(x + 1)), None)
obj.f = f
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def g(x):
return obj.f(x)[1]
obj.g = g
# After the following lines, the concrete functions of obj.g and obj.f are
# rewritten with many extra outputs.
with backprop.GradientTape():
obj.g(constant_op.constant(3.0))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures={"g": obj.g})
graph_def = loader_impl.parse_saved_model(save_dir).meta_graphs[0].graph_def
def assert_correct_number_of_output_shapes(node):
if node.op == "StatefulPartitionedCall":
fn_name = node.attr["f"].func.name
if fn_name.startswith("__inference_f"):
self.assertLen(node.attr["_output_shapes"].list.shape, 2)
if fn_name.startswith("__inference_g"):
self.assertLen(node.attr["_output_shapes"].list.shape, 1)
for f in graph_def.library.function:
if (f.signature.name.startswith("__inference_f") or
f.signature.name.startswith("__inference_g")):
for node in f.node_def:
assert_correct_number_of_output_shapes(node)
def test_save_cached_variable(self):
with ops.Graph().as_default(), session_lib.Session() as session:
obj = tracking.AutoTrackable()
obj.v = variables.Variable(2., caching_device=lambda op: op.device)
obj.w = variables.Variable(3.)
session.run([obj.v.initializer, obj.w.initializer])
@def_function.function(input_signature=[])
def f():
return obj.v + obj.w
obj.f = f
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures=obj.f)
self.assertAllClose({"output_0": 5}, _import_and_infer(save_dir, {}))
@parameterized.named_parameters(
("_SaveDevices_ExportMetaGraph",
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES, True),
("_DiscardDevices_ExportMetaGraph", save_options.VariablePolicy.NONE,
True), ("_SaveDevices_Save",
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES, False),
("_DiscardDevices_Save", save_options.VariablePolicy.NONE, False))
def test_save_variable_devices(self, save_devices, meta_graph_only):
context._reset_context()
cpus = context.context().list_physical_devices("CPU")
if len(cpus) == 1:
context.context().set_logical_device_configuration(
cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
root = tracking.AutoTrackable()
with ops.device("CPU:0"):
root.v0 = variables.Variable(1., name="v0")
with ops.device("CPU:1"):
root.v1 = variables.Variable(1., name="v1")
options = save_options.SaveOptions(
experimental_variable_policy=save_devices)
file_name = os.path.join(self.get_temp_dir(), "saved_model")
if meta_graph_only:
save.export_meta_graph(obj=root, filename=file_name, options=options)
else:
save.save(obj=root, export_dir=file_name, options=options)
meta = None
if meta_graph_only:
meta = meta_graph.read_meta_graph_file(file_name)
else:
meta = loader_impl.parse_saved_model(file_name).meta_graphs[0]
# Check devices in meta graph nodes.
graph_def = meta.graph_def
v0 = next((n for n in graph_def.node if n.name == "v0"), None)
v1 = next((n for n in graph_def.node if n.name == "v1"), None)
self.assertIsNotNone(v0)
self.assertIsNotNone(v1)
if save_devices == save_options.VariablePolicy.SAVE_VARIABLE_DEVICES:
self.assertIn("CPU:0", v0.device)
self.assertIn("CPU:1", v1.device)
else:
self.assertEmpty(v0.device)
self.assertEmpty(v1.device)
# Check devices in object graph nodes.
object_graph_def = meta.object_graph_def
v0 = next((n.variable
for n in object_graph_def.nodes
if n.HasField("variable") and n.variable.name == "v0"), None)
v1 = next((n.variable
for n in object_graph_def.nodes
if n.HasField("variable") and n.variable.name == "v1"), None)
self.assertIsNotNone(v0)
self.assertIsNotNone(v1)
if save_devices == save_options.VariablePolicy.SAVE_VARIABLE_DEVICES:
self.assertIn("CPU:0", v0.device)
self.assertIn("CPU:1", v1.device)
else:
self.assertEmpty(v0.device)
self.assertEmpty(v1.device)
@parameterized.named_parameters(
("_ExpandDistributedVariables",
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES),
("_DiscardDistributedVariables", save_options.VariablePolicy.NONE))
def test_expand_distributed_variables(self, expand_strategy):
context._reset_context()
cpus = context.context().list_physical_devices("CPU")
if len(cpus) == 1:
context.context().set_logical_device_configuration(
cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
context.ensure_initialized()
file_name = os.path.join(self.get_temp_dir(), "saved_model.pb")
with mirrored_strategy.MirroredStrategy(["CPU:0", "CPU:1"]).scope():
root = tracking.AutoTrackable()
root.v = variables.Variable([1., 1.], name="v")
@def_function.function(input_signature=[])
def f():
root.v.assign([2., 2.])
root.f = f
save.export_meta_graph(
obj=root,
filename=file_name,
options=save_options.SaveOptions(
experimental_variable_policy=expand_strategy))
graph_def = meta_graph.read_meta_graph_file(file_name).graph_def
v0 = next((n for n in graph_def.node if n.name == "v"), None)
v1 = next((n for n in graph_def.node if n.name == "v/replica_1"), None)
self.assertIsNotNone(v0)
saved_function = next((f for f in graph_def.library.function
if "inference_f_" in f.signature.name), None)
self.assertIsNotNone(saved_function)
if (expand_strategy ==
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES):
self.assertIsNotNone(v1)
# experimental_save_variable_devices should have been automatically set.
self.assertIn("CPU:0", v0.device)
self.assertIn("CPU:1", v1.device)
self.assertLen(saved_function.signature.input_arg, 2)
else:
self.assertIsNone(v1)
self.assertEmpty(v0.device)
self.assertLen(saved_function.signature.input_arg, 1)
def test_expand_distributed_variables_not_allowed(self):
root = tracking.AutoTrackable()
with self.assertRaisesRegex(NotImplementedError,
"not implemented in saved_model.save"):
save.save(
obj=root,
export_dir="",
options=save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy
.EXPAND_DISTRIBUTED_VARIABLES))
def test_save_uninitialized_variable(self):
root = tracking.AutoTrackable()
root.uninitialized_variable = resource_variable_ops.UninitializedVariable(
name="uninitialized_variable", dtype=dtypes.float32)
root.initialized_variable = variables.Variable(
1.0, name="initialized_variable")
# TODO(b/149594077): Python loading does not work now partly because it
# shouldn't, as the public API and semantics of uninitialized variables
# are not properly defined, and officially supporting loading would end up
# defining semantics "by usage." We should only allow loading once the API
# is made official.
export_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, export_dir)
with self.assertRaisesRegex(FileNotFoundError,
"Key uninitialized_variable"):
load.load(export_dir)
with ops.Graph().as_default(), session_lib.Session() as session:
# The final ValueError here (with "no variables to save") is confusing,
# but errors upstream give the user the correct information (a
# NotFoundError stating that the uninitalized_variable was not found in
# the checkpoint).
with self.assertRaises(ValueError):
loader.load(session, [tag_constants.SERVING], export_dir)
class VariablePolicyEnumTest(test.TestCase):
def testFromObj(self):
self.assertEqual(save_options.VariablePolicy.NONE,
save_options.VariablePolicy.from_obj(None))
self.assertEqual(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
save_options.VariablePolicy.from_obj(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES))
self.assertEqual(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
save_options.VariablePolicy.from_obj(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES))
self.assertEqual(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
save_options.VariablePolicy.from_obj("save_variable_devices"))
self.assertEqual(
save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
save_options.VariablePolicy.from_obj("SaVe_VaRiAbLe_DeViCeS"))
self.assertEqual(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
save_options.VariablePolicy.from_obj("expand_distributed_variables"))
self.assertEqual(
save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
save_options.VariablePolicy.from_obj("eXpAnD_dIsTrIbUtEd_VaRiAbLeS"))
for invalid in ["not_a_valid_value", 2.0, []]:
with self.assertRaisesRegex(ValueError, "Invalid VariablePolicy value"):
save_options.VariablePolicy.from_obj(invalid)
def testNamingConvention(self):
"""Enforces names are uppercase versions of values."""
for policy in save_options.VariablePolicy:
if policy == save_options.VariablePolicy.NONE:
self.assertIsNone(policy.value)
else:
self.assertEqual(policy.name, policy.name.upper())
self.assertEqual(policy.value, policy.value.lower())
self.assertEqual(policy.name, policy.value.upper())
class SavingOptionsTest(test.TestCase):
def testOpNameSpace(self):
# TODO(kathywu): Add test that saves out SavedModel with a custom op when
# the ">" character is allowed in op names.
graph_def = graph_pb2.GraphDef()
text_format.Parse("node { name: 'A' op: 'Test>CustomOp' }", graph_def)
with self.assertRaisesRegex(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
save._verify_ops(graph_def, ["Test"])
# Test with multiple carrots in op name.
text_format.Parse("node { name: 'A' op: 'Test>>A>CustomOp' }", graph_def)
with self.assertRaisesRegex(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
save._verify_ops(graph_def, ["Test"])
def test_save_debug_info_enabled(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: math_ops.mul(2., x, name="DEBUG_INFO_OP"),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
root.f,
options=save_options.SaveOptions(save_debug_info=True))
debug_info_file_name = os.path.join(save_dir, "debug",
"saved_model_debug_info.pb")
self.assertTrue(os.path.exists(debug_info_file_name))
debug_info = graph_debug_info_pb2.GraphDebugInfo()
with open(debug_info_file_name, "rb") as f:
debug_info.ParseFromString(f.read())
# Verify that there is a trace for DEBUG_INFO_OP just to ensure that
# function debug info tracing is nominally functioning.
found_op = False
for key in debug_info.traces.keys():
if key.startswith("DEBUG_INFO_OP@"):
found_op = True
break
self.assertTrue(found_op, "Did not find DEBUG_INFO_OP in trace")
def test_save_debug_info_disabled(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: math_ops.mul(2., x, name="DEBUG_INFO_OP"),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
root.f,
options=save_options.SaveOptions(save_debug_info=False))
debug_info_file_name = os.path.join(save_dir, "debug",
"saved_model_debug_info.pb")
self.assertFalse(os.path.exists(debug_info_file_name))
def test_function_aliases(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
function_cache = list(root.f._stateful_fn._function_cache.all_values())
function_aliases = loader_impl.parse_saved_model(
save_dir).meta_graphs[0].meta_info_def.function_aliases
self.assertLen(function_cache, 1)
self.assertEqual(function_cache[0].name.decode("utf-8"),
list(function_aliases.keys())[0])
def test_accepts_io_device(self):
options = save_options.SaveOptions()
self.assertIsNone(options.experimental_io_device)
options = save_options.SaveOptions(experimental_io_device="/job:localhost")
self.assertEqual("/job:localhost", options.experimental_io_device)
def test_accepts_variable_policy(self):
options = save_options.SaveOptions()
self.assertEqual(save_options.VariablePolicy.NONE,
options.experimental_variable_policy)
# VariablePolicy instances.
options = save_options.SaveOptions(experimental_variable_policy=save_options
.VariablePolicy.SAVE_VARIABLE_DEVICES)
self.assertEqual(save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
options.experimental_variable_policy)
options = save_options.SaveOptions(
experimental_variable_policy=save_options.VariablePolicy
.EXPAND_DISTRIBUTED_VARIABLES)
self.assertEqual(save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
options.experimental_variable_policy)
# String conversions.
options = save_options.SaveOptions(
experimental_variable_policy="save_variable_devices")
self.assertEqual(save_options.VariablePolicy.SAVE_VARIABLE_DEVICES,
options.experimental_variable_policy)
options = save_options.SaveOptions(
experimental_variable_policy="expand_distributed_variables")
self.assertEqual(save_options.VariablePolicy.EXPAND_DISTRIBUTED_VARIABLES,
options.experimental_variable_policy)
with self.assertRaisesRegex(ValueError, "Invalid VariablePolicy value"):
options = save_options.SaveOptions(
experimental_variable_policy="not_a_valid_value")
class AssetTests(test.TestCase):
def setUp(self):
super(AssetTests, self).setUp()
self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(self._vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
def test_asset_path_returned(self):
root = tracking.AutoTrackable()
root.path = tracking.Asset(self._vocab_path)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
root.get_asset = def_function.function(lambda: root.path.asset_path)
save.save(root, save_dir, signatures=root.get_asset.get_concrete_function())
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
file_io.rename(save_dir, second_dir)
imported_path = _import_and_infer(second_dir, {})["output_0"]
self.assertIn(
compat.as_str_any(second_dir), compat.as_str_any(imported_path))
def test_table(self):
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
root = util.Checkpoint(
table=lookup_ops.HashTable(initializer, default_value=-1))
root.table_user = def_function.function(
root.table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
self.assertEqual(
2, self.evaluate(root.table_user(constant_op.constant("gamma"))))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
file_io.delete_file(self._vocab_path)
self.assertAllClose({"output_0": [2, 0]},
_import_and_infer(save_dir,
{"keys": ["gamma", "alpha"]}))
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
# Asset paths should track the location the SavedModel is loaded from.
file_io.rename(save_dir, second_dir)
self.assertAllClose({"output_0": [2, 1]},
_import_and_infer(second_dir,
{"keys": ["gamma", "beta"]}))
def test_untracked_table_useful_message(self):
root = module.Module()
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
table = lookup_ops.HashTable(initializer, default_value=-1)
root.table_user = def_function.function(
table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
root.table_user(constant_op.constant("gamma"))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(AssertionError, "HashTable"):
save.save(root, save_dir)
def test_unused_asset(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset = tracking.Asset(self._vocab_path)
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, export_dir)
self.assertAllClose({"output_0": [0.2]},
_import_and_infer(export_dir, {"x": [0.1]}))
def test_sensible_function_building_exception(self):
root = util.Checkpoint(v=variables.Variable(2.))
root.f = def_function.function(
lambda x: 2. * root.v,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
@def_function.function
def _calls_save():
save.save(root, export_dir)
with self.assertRaisesRegex(AssertionError, "tf.function"):
_calls_save()
class ExportMetaGraphTests(test.TestCase):
def test_export_meta_graph(self):
root = tracking.AutoTrackable()
root.variable = resource_variable_ops.UninitializedVariable(
name="some_variable", dtype=dtypes.float32)
@def_function.function(input_signature=[tensor_spec.TensorSpec(None)])
def multiply_var(x):
return root.variable * x
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def update(y):
root.variable.assign_add(y)
# TODO(b/150393409): All functions exported as signatures must have at
# least one output.
return 0
@def_function.function(input_signature=[])
def initialize():
root.variable.assign(1.0)
# TODO(b/150393409): All functions exported as signatures must have at
# least one output.
return 0
save_path = os.path.join(self.get_temp_dir(), "meta_graph.pb")
save.export_meta_graph(
root,
save_path,
signatures={
"multiply_var": multiply_var,
"initialize": initialize,
"update": update
})
with ops.Graph().as_default(), session_lib.Session() as session:
saver.import_meta_graph(save_path)
meta_graph_def = meta_graph.read_meta_graph_file(save_path)
# Initialize variable to 1
_run_signature(session, meta_graph_def, {}, "initialize")
out = _run_signature(session, meta_graph_def, {"x": 3}, "multiply_var")
self.assertAllEqual(out, {"output_0": 3})
# Adds 2 to the variable. Variable is now 3
_run_signature(session, meta_graph_def, {"y": 2}, "update")
out = _run_signature(session, meta_graph_def, {"x": 4}, "multiply_var")
self.assertAllEqual(out, {"output_0": 12})
if __name__ == "__main__":
test.main()
| |
"""Test case implementation"""
import sys
import difflib
import pprint
import re
import unittest
import warnings
from unittest2 import result
from unittest2.util import (
safe_repr, safe_str, strclass,
unorderable_list_difference
)
from unittest2.compatibility import wraps
__unittest = True
DIFF_OMITTED = ('\nDiff is %s characters long. '
'Set self.maxDiff to None to see it.')
class SkipTest(Exception):
"""
Raise this exception in a test to skip it.
Usually you can use TestResult.skip() or one of the skipping decorators
instead of raising this directly.
"""
class _ExpectedFailure(Exception):
"""
Raise this when a test is expected to fail.
This is an implementation detail.
"""
def __init__(self, exc_info):
# can't use super because Python 2.4 exceptions are old style
Exception.__init__(self)
self.exc_info = exc_info
class _UnexpectedSuccess(Exception):
"""
The test was supposed to fail, but it didn't!
"""
def _id(obj):
return obj
def skip(reason):
"""
Unconditionally skip a test.
"""
def decorator(test_item):
if not (isinstance(test_item, type) and issubclass(test_item, TestCase)):
@wraps(test_item)
def skip_wrapper(*args, **kwargs):
raise SkipTest(reason)
test_item = skip_wrapper
test_item.__unittest_skip__ = True
test_item.__unittest_skip_why__ = reason
return test_item
return decorator
def skipIf(condition, reason):
"""
Skip a test if the condition is true.
"""
if condition:
return skip(reason)
return _id
def skipUnless(condition, reason):
"""
Skip a test unless the condition is true.
"""
if not condition:
return skip(reason)
return _id
def expectedFailure(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
raise _ExpectedFailure(sys.exc_info())
raise _UnexpectedSuccess
return wrapper
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case, expected_regexp=None):
self.expected = expected
self.failureException = test_case.failureException
self.expected_regexp = expected_regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
try:
exc_name = self.expected.__name__
except AttributeError:
exc_name = str(self.expected)
raise self.failureException(
"%s not raised" % (exc_name,))
if not issubclass(exc_type, self.expected):
# let unexpected exceptions pass through
return False
self.exception = exc_value # store for later retrieval
if self.expected_regexp is None:
return True
expected_regexp = self.expected_regexp
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
return True
class _TypeEqualityDict(object):
def __init__(self, testcase):
self.testcase = testcase
self._store = {}
def __setitem__(self, key, value):
self._store[key] = value
def __getitem__(self, key):
value = self._store[key]
if isinstance(value, basestring):
return getattr(self.testcase, value)
return value
def get(self, key, default=None):
if key in self._store:
return self[key]
return default
class TestCase(unittest.TestCase):
"""A class whose instances are single test cases.
By default, the test code itself should be placed in a method named
'runTest'.
If the fixture may be used for many test cases, create as
many test methods as are needed. When instantiating such a TestCase
subclass, specify in the constructor arguments the name of the test method
that the instance is to execute.
Test authors should subclass TestCase for their own tests. Construction
and deconstruction of the test's environment ('fixture') can be
implemented by overriding the 'setUp' and 'tearDown' methods respectively.
If it is necessary to override the __init__ method, the base class
__init__ method must always be called. It is important that subclasses
should not change the signature of their __init__ method, since instances
of the classes are instantiated automatically by parts of the framework
in order to be run.
"""
# This attribute determines which exception will be raised when
# the instance's assertion methods fail; test methods raising this
# exception will be deemed to have 'failed' rather than 'errored'
failureException = AssertionError
# This attribute sets the maximum length of a diff in failure messages
# by assert methods using difflib. It is looked up as an instance attribute
# so can be configured by individual tests if required.
maxDiff = 80*8
# This attribute determines whether long messages (including repr of
# objects used in assert methods) will be printed on failure in *addition*
# to any explicit message passed.
longMessage = True
# Attribute used by TestSuite for classSetUp
_classSetupFailed = False
def __init__(self, methodName='runTest'):
"""Create an instance of the class that will use the named test
method when executed. Raises a ValueError if the instance does
not have a method with the specified name.
"""
self._testMethodName = methodName
self._resultForDoCleanups = None
try:
testMethod = getattr(self, methodName)
except AttributeError:
raise ValueError("no such test method in %s: %s" % \
(self.__class__, methodName))
self._testMethodDoc = testMethod.__doc__
self._cleanups = []
# Map types to custom assertEqual functions that will compare
# instances of said type in more detail to generate a more useful
# error message.
self._type_equality_funcs = _TypeEqualityDict(self)
self.addTypeEqualityFunc(dict, 'assertDictEqual')
self.addTypeEqualityFunc(list, 'assertListEqual')
self.addTypeEqualityFunc(tuple, 'assertTupleEqual')
self.addTypeEqualityFunc(set, 'assertSetEqual')
self.addTypeEqualityFunc(frozenset, 'assertSetEqual')
self.addTypeEqualityFunc(unicode, 'assertMultiLineEqual')
def addTypeEqualityFunc(self, typeobj, function):
"""Add a type specific assertEqual style function to compare a type.
This method is for use by TestCase subclasses that need to register
their own type equality functions to provide nicer error messages.
Args:
typeobj: The data type to call this function on when both values
are of the same type in assertEqual().
function: The callable taking two arguments and an optional
msg= argument that raises self.failureException with a
useful error message when the two arguments are not equal.
"""
self._type_equality_funcs[typeobj] = function
def addCleanup(self, function, *args, **kwargs):
"""Add a function, with arguments, to be called when the test is
completed. Functions added are called on a LIFO basis and are
called after tearDown on test failure or success.
Cleanup items are called even if setUp fails (unlike tearDown)."""
self._cleanups.append((function, args, kwargs))
def setUp(self):
"Hook method for setting up the test fixture before exercising it."
@classmethod
def setUpClass(cls):
"Hook method for setting up class fixture before running tests in the class."
@classmethod
def tearDownClass(cls):
"Hook method for deconstructing the class fixture after running all tests in the class."
def tearDown(self):
"Hook method for deconstructing the test fixture after testing it."
def countTestCases(self):
return 1
def defaultTestResult(self):
return result.TestResult()
def shortDescription(self):
"""Returns a one-line description of the test, or None if no
description has been provided.
The default implementation of this method returns the first line of
the specified test method's docstring.
"""
doc = self._testMethodDoc
return doc and doc.split("\n")[0].strip() or None
def id(self):
return "%s.%s" % (strclass(self.__class__), self._testMethodName)
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._testMethodName == other._testMethodName
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._testMethodName))
def __str__(self):
return "%s (%s)" % (self._testMethodName, strclass(self.__class__))
def __repr__(self):
return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(self, reason)
else:
warnings.warn("Use of a TestResult without an addSkip method is deprecated",
DeprecationWarning, 2)
result.addSuccess(self)
def run(self, result=None):
orig_result = result
if result is None:
result = self.defaultTestResult()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
self._resultForDoCleanups = result
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
if (getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)):
# If the class or method was skipped.
try:
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, skip_why)
finally:
result.stopTest(self)
return
try:
success = False
try:
self.setUp()
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
try:
testMethod()
except self.failureException:
result.addFailure(self, sys.exc_info())
except _ExpectedFailure, e:
addExpectedFailure = getattr(result, 'addExpectedFailure', None)
if addExpectedFailure is not None:
addExpectedFailure(self, e.exc_info)
else:
warnings.warn("Use of a TestResult without an addExpectedFailure method is deprecated",
DeprecationWarning)
result.addSuccess(self)
except _UnexpectedSuccess:
addUnexpectedSuccess = getattr(result, 'addUnexpectedSuccess', None)
if addUnexpectedSuccess is not None:
addUnexpectedSuccess(self)
else:
warnings.warn("Use of a TestResult without an addUnexpectedSuccess method is deprecated",
DeprecationWarning)
result.addFailure(self, sys.exc_info())
except SkipTest, e:
self._addSkip(result, str(e))
except Exception:
result.addError(self, sys.exc_info())
else:
success = True
try:
self.tearDown()
except Exception:
result.addError(self, sys.exc_info())
success = False
cleanUpSuccess = self.doCleanups()
success = success and cleanUpSuccess
if success:
result.addSuccess(self)
finally:
result.stopTest(self)
if orig_result is None:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
def doCleanups(self):
"""Execute all cleanup functions. Normally called for you after
tearDown."""
result = self._resultForDoCleanups
ok = True
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
try:
function(*args, **kwargs)
except Exception:
ok = False
result.addError(self, sys.exc_info())
return ok
def __call__(self, *args, **kwds):
return self.run(*args, **kwds)
def debug(self):
"""Run the test without collecting errors in a TestResult"""
self.setUp()
getattr(self, self._testMethodName)()
self.tearDown()
while self._cleanups:
function, args, kwargs = self._cleanups.pop(-1)
function(*args, **kwargs)
def skipTest(self, reason):
"""Skip this test."""
raise SkipTest(reason)
def fail(self, msg=None):
"""Fail immediately, with the given message."""
raise self.failureException(msg)
def assertFalse(self, expr, msg=None):
"Fail the test if the expression is true."
if expr:
msg = self._formatMessage(msg, "%s is not False" % safe_repr(expr))
raise self.failureException(msg)
def assertTrue(self, expr, msg=None):
"""Fail the test unless the expression is true."""
if not expr:
msg = self._formatMessage(msg, "%s is not True" % safe_repr(expr))
raise self.failureException(msg)
def _formatMessage(self, msg, standardMsg):
"""Honour the longMessage attribute when generating failure messages.
If longMessage is False this means:
* Use only an explicit message if it is provided
* Otherwise use the standard message for the assert
If longMessage is True:
* Use the standard message
* If an explicit message is provided, plus ' : ' and the explicit message
"""
if not self.longMessage:
return msg or standardMsg
if msg is None:
return standardMsg
try:
return '%s : %s' % (standardMsg, msg)
except UnicodeDecodeError:
return '%s : %s' % (safe_str(standardMsg), safe_str(msg))
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Fail unless an exception of class excClass is thrown
by callableObj when invoked with arguments args and keyword
arguments kwargs. If a different type of exception is
thrown, it will not be caught, and the test case will be
deemed to have suffered an error, exactly as for an
unexpected exception.
If called with callableObj omitted or None, will return a
context object used like this::
with self.assertRaises(SomeException):
do_something()
The context manager keeps a reference to the exception as
the 'exception' attribute. This allows you to inspect the
exception after the assertion::
with self.assertRaises(SomeException) as cm:
do_something()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, 3)
"""
if callableObj is None:
return _AssertRaisesContext(excClass, self)
try:
callableObj(*args, **kwargs)
except excClass:
return
if hasattr(excClass,'__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def _getAssertEqualityFunc(self, first, second):
"""Get a detailed comparison function for the types of the two args.
Returns: A callable accepting (first, second, msg=None) that will
raise a failure exception if first != second with a useful human
readable error message for those types.
"""
#
# NOTE(gregory.p.smith): I considered isinstance(first, type(second))
# and vice versa. I opted for the conservative approach in case
# subclasses are not intended to be compared in detail to their super
# class instances using a type equality func. This means testing
# subtypes won't automagically use the detailed comparison. Callers
# should use their type specific assertSpamEqual method to compare
# subclasses if the detailed comparison is desired and appropriate.
# See the discussion in http://bugs.python.org/issue2578.
#
if type(first) is type(second):
asserter = self._type_equality_funcs.get(type(first))
if asserter is not None:
return asserter
return self._baseAssertEqual
def _baseAssertEqual(self, first, second, msg=None):
"""The default assertEqual implementation, not type specific."""
if not first == second:
standardMsg = '%s != %s' % (safe_repr(first), safe_repr(second))
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertEqual(self, first, second, msg=None):
"""Fail if the two objects are unequal as determined by the '=='
operator.
"""
assertion_func = self._getAssertEqualityFunc(first, second)
assertion_func(first, second, msg=msg)
def assertNotEqual(self, first, second, msg=None):
"""Fail if the two objects are equal as determined by the '=='
operator.
"""
if not first != second:
msg = self._formatMessage(msg, '%s == %s' % (safe_repr(first),
safe_repr(second)))
raise self.failureException(msg)
def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are unequal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is more than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
If the two objects compare equal then they will automatically
compare almost equal.
"""
if first == second:
# shortcut
return
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if abs(first - second) <= delta:
return
standardMsg = '%s != %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if round(abs(second-first), places) == 0:
return
standardMsg = '%s != %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
def assertNotAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Fail if the two objects are equal as determined by their
difference rounded to the given number of decimal places
(default 7) and comparing to zero, or by comparing that the
between the two objects is less than the given delta.
Note that decimal places (from zero) are usually not the same
as significant digits (measured from the most signficant digit).
Objects that are equal automatically fail.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if delta is not None:
if not (first == second) and abs(first - second) > delta:
return
standardMsg = '%s == %s within %s delta' % (safe_repr(first),
safe_repr(second),
safe_repr(delta))
else:
if places is None:
places = 7
if not (first == second) and round(abs(second-first), places) != 0:
return
standardMsg = '%s == %s within %r places' % (safe_repr(first),
safe_repr(second),
places)
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# Synonyms for assertion methods
# The plurals are undocumented. Keep them that way to discourage use.
# Do not add more. Do not remove.
# Going through a deprecation cycle on these would annoy many people.
assertEquals = assertEqual
assertNotEquals = assertNotEqual
assertAlmostEquals = assertAlmostEqual
assertNotAlmostEquals = assertNotAlmostEqual
assert_ = assertTrue
# These fail* assertion method names are pending deprecation and will
# be a DeprecationWarning in 3.2; http://bugs.python.org/issue2578
def _deprecate(original_func):
def deprecated_func(*args, **kwargs):
warnings.warn(
('Please use %s instead.' % original_func.__name__),
PendingDeprecationWarning, 2)
return original_func(*args, **kwargs)
return deprecated_func
failUnlessEqual = _deprecate(assertEqual)
failIfEqual = _deprecate(assertNotEqual)
failUnlessAlmostEqual = _deprecate(assertAlmostEqual)
failIfAlmostEqual = _deprecate(assertNotAlmostEqual)
failUnless = _deprecate(assertTrue)
failUnlessRaises = _deprecate(assertRaises)
failIf = _deprecate(assertFalse)
def assertSequenceEqual(self, seq1, seq2,
msg=None, seq_type=None, max_diff=80*8):
"""An equality assertion for ordered sequences (like lists and tuples).
For the purposes of this function, a valid ordered sequence type is one
which can be indexed, has a length, and has an equality operator.
Args:
seq1: The first sequence to compare.
seq2: The second sequence to compare.
seq_type: The expected datatype of the sequences, or None if no
datatype should be enforced.
msg: Optional message to use on failure instead of a list of
differences.
max_diff: Maximum size off the diff, larger diffs are not shown
"""
if seq_type is not None:
seq_type_name = seq_type.__name__
if not isinstance(seq1, seq_type):
raise self.failureException('First sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq1)))
if not isinstance(seq2, seq_type):
raise self.failureException('Second sequence is not a %s: %s'
% (seq_type_name, safe_repr(seq2)))
else:
seq_type_name = "sequence"
differing = None
try:
len1 = len(seq1)
except (TypeError, NotImplementedError):
differing = 'First %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
try:
len2 = len(seq2)
except (TypeError, NotImplementedError):
differing = 'Second %s has no length. Non-sequence?' % (
seq_type_name)
if differing is None:
if seq1 == seq2:
return
seq1_repr = repr(seq1)
seq2_repr = repr(seq2)
if len(seq1_repr) > 30:
seq1_repr = seq1_repr[:30] + '...'
if len(seq2_repr) > 30:
seq2_repr = seq2_repr[:30] + '...'
elements = (seq_type_name.capitalize(), seq1_repr, seq2_repr)
differing = '%ss differ: %s != %s\n' % elements
for i in xrange(min(len1, len2)):
try:
item1 = seq1[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of first %s\n' %
(i, seq_type_name))
break
try:
item2 = seq2[i]
except (TypeError, IndexError, NotImplementedError):
differing += ('\nUnable to index element %d of second %s\n' %
(i, seq_type_name))
break
if item1 != item2:
differing += ('\nFirst differing element %d:\n%s\n%s\n' %
(i, item1, item2))
break
else:
if (len1 == len2 and seq_type is None and
type(seq1) != type(seq2)):
# The sequences are the same, but have differing types.
return
if len1 > len2:
differing += ('\nFirst %s contains %d additional '
'elements.\n' % (seq_type_name, len1 - len2))
try:
differing += ('First extra element %d:\n%s\n' %
(len2, seq1[len2]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of first %s\n' % (len2, seq_type_name))
elif len1 < len2:
differing += ('\nSecond %s contains %d additional '
'elements.\n' % (seq_type_name, len2 - len1))
try:
differing += ('First extra element %d:\n%s\n' %
(len1, seq2[len1]))
except (TypeError, IndexError, NotImplementedError):
differing += ('Unable to index element %d '
'of second %s\n' % (len1, seq_type_name))
standardMsg = differing
diffMsg = '\n' + '\n'.join(
difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
standardMsg = self._truncateMessage(standardMsg, diffMsg)
msg = self._formatMessage(msg, standardMsg)
self.fail(msg)
def _truncateMessage(self, message, diff):
max_diff = self.maxDiff
if max_diff is None or len(diff) <= max_diff:
return message + diff
return message + (DIFF_OMITTED % len(diff))
def assertListEqual(self, list1, list2, msg=None):
"""A list-specific equality assertion.
Args:
list1: The first list to compare.
list2: The second list to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(list1, list2, msg, seq_type=list)
def assertTupleEqual(self, tuple1, tuple2, msg=None):
"""A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.
"""
self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
def assertSetEqual(self, set1, set2, msg=None):
"""A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).
"""
try:
difference1 = set1.difference(set2)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('first argument does not support set difference: %s' % e)
try:
difference2 = set2.difference(set1)
except TypeError, e:
self.fail('invalid type when attempting set difference: %s' % e)
except AttributeError, e:
self.fail('second argument does not support set difference: %s' % e)
if not (difference1 or difference2):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
def assertIn(self, member, container, msg=None):
"""Just like self.assertTrue(a in b), but with a nicer default message."""
if member not in container:
standardMsg = '%s not found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIn(self, member, container, msg=None):
"""Just like self.assertTrue(a not in b), but with a nicer default message."""
if member in container:
standardMsg = '%s unexpectedly found in %s' % (safe_repr(member),
safe_repr(container))
self.fail(self._formatMessage(msg, standardMsg))
def assertIs(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is b), but with a nicer default message."""
if expr1 is not expr2:
standardMsg = '%s is not %s' % (safe_repr(expr1), safe_repr(expr2))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNot(self, expr1, expr2, msg=None):
"""Just like self.assertTrue(a is not b), but with a nicer default message."""
if expr1 is expr2:
standardMsg = 'unexpectedly identical: %s' % (safe_repr(expr1),)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictEqual(self, d1, d2, msg=None):
self.assert_(isinstance(d1, dict), 'First argument is not a dictionary')
self.assert_(isinstance(d2, dict), 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertDictContainsSubset(self, expected, actual, msg=None):
"""Checks whether actual is a superset of expected."""
missing = []
mismatched = []
for key, value in expected.iteritems():
if key not in actual:
missing.append(key)
elif value != actual[key]:
mismatched.append('%s, expected: %s, actual: %s' %
(safe_repr(key), safe_repr(value),
safe_repr(actual[key])))
if not (missing or mismatched):
return
standardMsg = ''
if missing:
standardMsg = 'Missing: %s' % ','.join(safe_repr(m) for m in
missing)
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += 'Mismatched values: %s' % ','.join(mismatched)
self.fail(self._formatMessage(msg, standardMsg))
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
"""
try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
# Unsortable items (example: set(), complex(), ...)
expected = list(expected_seq)
actual = list(actual_seq)
missing, unexpected = unorderable_list_difference(
expected, actual, ignore_duplicate=False
)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append('Expected, but missing:\n %s' %
safe_repr(missing))
if unexpected:
errors.append('Unexpected, but present:\n %s' %
safe_repr(unexpected))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
def assertMultiLineEqual(self, first, second, msg=None):
"""Assert that two multi-line strings are equal."""
self.assert_(isinstance(first, basestring), (
'First argument is not a string'))
self.assert_(isinstance(second, basestring), (
'Second argument is not a string'))
if first != second:
standardMsg = '%s != %s' % (safe_repr(first, True), safe_repr(second, True))
diff = '\n' + ''.join(difflib.ndiff(first.splitlines(True),
second.splitlines(True)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertLess(self, a, b, msg=None):
"""Just like self.assertTrue(a < b), but with a nicer default message."""
if not a < b:
standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertLessEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a <= b), but with a nicer default message."""
if not a <= b:
standardMsg = '%s not less than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreater(self, a, b, msg=None):
"""Just like self.assertTrue(a > b), but with a nicer default message."""
if not a > b:
standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertGreaterEqual(self, a, b, msg=None):
"""Just like self.assertTrue(a >= b), but with a nicer default message."""
if not a >= b:
standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNone(self, obj, msg=None):
"""Same as self.assertTrue(obj is None), with a nicer default message."""
if obj is not None:
standardMsg = '%s is not None' % (safe_repr(obj),)
self.fail(self._formatMessage(msg, standardMsg))
def assertIsNotNone(self, obj, msg=None):
"""Included for symmetry with assertIsNone."""
if obj is None:
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
def assertIsInstance(self, obj, cls, msg=None):
"""Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message."""
if not isinstance(obj, cls):
standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Included for symmetry with assertIsInstance."""
if isinstance(obj, cls):
standardMsg = '%s is an instance of %r' % (safe_repr(obj), cls)
self.fail(self._formatMessage(msg, standardMsg))
def assertRaisesRegexp(self, expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.
"""
if callable_obj is None:
return _AssertRaisesContext(expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception, exc_value:
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(str(exc_value)):
raise self.failureException('"%s" does not match "%s"' %
(expected_regexp.pattern, str(exc_value)))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException, "%s not raised" % excName
def assertRegexpMatches(self, text, expected_regexp, msg=None):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if not expected_regexp.search(text):
msg = msg or "Regexp didn't match"
msg = '%s: %r not found in %r' % (msg, expected_regexp.pattern, text)
raise self.failureException(msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = msg or "Regexp matched"
msg = '%s: %r matches %r in %r' % (msg,
text[match.start():match.end()],
unexpected_regexp.pattern,
text)
raise self.failureException(msg)
class FunctionTestCase(TestCase):
"""A test case that wraps a test function.
This is useful for slipping pre-existing test functions into the
unittest framework. Optionally, set-up and tidy-up functions can be
supplied. As with TestCase, the tidy-up ('tearDown') function will
always be called if the set-up ('setUp') function ran successfully.
"""
def __init__(self, testFunc, setUp=None, tearDown=None, description=None):
super(FunctionTestCase, self).__init__()
self._setUpFunc = setUp
self._tearDownFunc = tearDown
self._testFunc = testFunc
self._description = description
def setUp(self):
if self._setUpFunc is not None:
self._setUpFunc()
def tearDown(self):
if self._tearDownFunc is not None:
self._tearDownFunc()
def runTest(self):
self._testFunc()
def id(self):
return self._testFunc.__name__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self._setUpFunc == other._setUpFunc and \
self._tearDownFunc == other._tearDownFunc and \
self._testFunc == other._testFunc and \
self._description == other._description
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((type(self), self._setUpFunc, self._tearDownFunc,
self._testFunc, self._description))
def __str__(self):
return "%s (%s)" % (strclass(self.__class__),
self._testFunc.__name__)
def __repr__(self):
return "<%s testFunc=%s>" % (strclass(self.__class__),
self._testFunc)
def shortDescription(self):
if self._description is not None:
return self._description
doc = self._testFunc.__doc__
return doc and doc.split("\n")[0].strip() or None
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import page as gcdc_page
from google.protobuf import empty_pb2 # type: ignore
from .base import PagesTransport, DEFAULT_CLIENT_INFO
from .grpc import PagesGrpcTransport
class PagesGrpcAsyncIOTransport(PagesTransport):
"""gRPC AsyncIO backend transport for Pages.
Service for managing
[Pages][google.cloud.dialogflow.cx.v3beta1.Page].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def list_pages(
self,
) -> Callable[[page.ListPagesRequest], Awaitable[page.ListPagesResponse]]:
r"""Return a callable for the list pages method over gRPC.
Returns the list of all pages in the specified flow.
Returns:
Callable[[~.ListPagesRequest],
Awaitable[~.ListPagesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_pages" not in self._stubs:
self._stubs["list_pages"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Pages/ListPages",
request_serializer=page.ListPagesRequest.serialize,
response_deserializer=page.ListPagesResponse.deserialize,
)
return self._stubs["list_pages"]
@property
def get_page(self) -> Callable[[page.GetPageRequest], Awaitable[page.Page]]:
r"""Return a callable for the get page method over gRPC.
Retrieves the specified page.
Returns:
Callable[[~.GetPageRequest],
Awaitable[~.Page]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_page" not in self._stubs:
self._stubs["get_page"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Pages/GetPage",
request_serializer=page.GetPageRequest.serialize,
response_deserializer=page.Page.deserialize,
)
return self._stubs["get_page"]
@property
def create_page(
self,
) -> Callable[[gcdc_page.CreatePageRequest], Awaitable[gcdc_page.Page]]:
r"""Return a callable for the create page method over gRPC.
Creates a page in the specified flow.
Returns:
Callable[[~.CreatePageRequest],
Awaitable[~.Page]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_page" not in self._stubs:
self._stubs["create_page"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Pages/CreatePage",
request_serializer=gcdc_page.CreatePageRequest.serialize,
response_deserializer=gcdc_page.Page.deserialize,
)
return self._stubs["create_page"]
@property
def update_page(
self,
) -> Callable[[gcdc_page.UpdatePageRequest], Awaitable[gcdc_page.Page]]:
r"""Return a callable for the update page method over gRPC.
Updates the specified page.
Returns:
Callable[[~.UpdatePageRequest],
Awaitable[~.Page]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_page" not in self._stubs:
self._stubs["update_page"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Pages/UpdatePage",
request_serializer=gcdc_page.UpdatePageRequest.serialize,
response_deserializer=gcdc_page.Page.deserialize,
)
return self._stubs["update_page"]
@property
def delete_page(
self,
) -> Callable[[page.DeletePageRequest], Awaitable[empty_pb2.Empty]]:
r"""Return a callable for the delete page method over gRPC.
Deletes the specified page.
Returns:
Callable[[~.DeletePageRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_page" not in self._stubs:
self._stubs["delete_page"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.cx.v3beta1.Pages/DeletePage",
request_serializer=page.DeletePageRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_page"]
def close(self):
return self.grpc_channel.close()
__all__ = ("PagesGrpcAsyncIOTransport",)
| |
from pgmpy.exceptions import CardinalityError
from pgmpy.factors import Factor
from pgmpy.models import MarkovModel
from pgmpy.tests import help_functions as hf
import numpy as np
import unittest
class TestMarkovModelCreation(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, MarkovModel)
def test_class_init_with_data_string(self):
self.g = MarkovModel([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.g.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.g.edges()),
[['a', 'b'], ['b', 'c']])
def test_class_init_with_data_nonstring(self):
self.g = MarkovModel([(1, 2), (2, 3)])
def test_add_node_string(self):
self.graph.add_node('a')
self.assertListEqual(self.graph.nodes(), ['a'])
def test_add_node_nonstring(self):
self.graph.add_node(1)
def test_add_nodes_from_string(self):
self.graph.add_nodes_from(['a', 'b', 'c', 'd'])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c', 'd'])
def test_add_nodes_from_non_string(self):
self.graph.add_nodes_from([1, 2, 3, 4])
def test_add_edge_string(self):
self.graph.add_edge('d', 'e')
self.assertListEqual(sorted(self.graph.nodes()), ['d', 'e'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['d', 'e']])
self.graph.add_nodes_from(['a', 'b', 'c'])
self.graph.add_edge('a', 'b')
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['d', 'e']])
def test_add_edge_nonstring(self):
self.graph.add_edge(1, 2)
def test_add_edge_selfloop(self):
self.assertRaises(ValueError, self.graph.add_edge, 'a', 'a')
def test_add_edges_from_string(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(sorted(self.graph.nodes()), ['a', 'b', 'c'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['b', 'c']])
self.graph.add_nodes_from(['d', 'e', 'f'])
self.graph.add_edges_from([('d', 'e'), ('e', 'f')])
self.assertListEqual(sorted(self.graph.nodes()),
['a', 'b', 'c', 'd', 'e', 'f'])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted([('a', 'b'), ('b', 'c'),
('d', 'e'), ('e', 'f')]))
def test_add_edges_from_nonstring(self):
self.graph.add_edges_from([(1, 2), (2, 3)])
def test_add_edges_from_self_loop(self):
self.assertRaises(ValueError, self.graph.add_edges_from,
[('a', 'a')])
def test_number_of_neighbors(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertEqual(len(self.graph.neighbors('b')), 2)
def tearDown(self):
del self.graph
class TestMarkovModelMethods(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_factor_graph(self):
from pgmpy.models import FactorGraph
phi1 = Factor(['Alice', 'Bob'], [3, 2], np.random.rand(6))
phi2 = Factor(['Bob', 'Charles'], [3, 2], np.random.rand(6))
self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])
self.graph.add_factors(phi1, phi2)
factor_graph = self.graph.to_factor_graph()
self.assertIsInstance(factor_graph, FactorGraph)
self.assertListEqual(sorted(factor_graph.nodes()),
['Alice', 'Bob', 'Charles', 'phi_Alice_Bob',
'phi_Bob_Charles'])
self.assertListEqual(hf.recursive_sorted(factor_graph.edges()),
[['Alice', 'phi_Alice_Bob'], ['Bob', 'phi_Alice_Bob'],
['Bob', 'phi_Bob_Charles'], ['Charles', 'phi_Bob_Charles']])
self.assertListEqual(factor_graph.get_factors(), [phi1, phi2])
def test_factor_graph_raises_error(self):
self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles')])
self.assertRaises(ValueError, self.graph.to_factor_graph)
def test_junction_tree(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
junction_tree = self.graph.to_junction_tree()
self.assertListEqual(hf.recursive_sorted(junction_tree.nodes()),
[['a', 'b', 'd'], ['b', 'c', 'd']])
self.assertEqual(len(junction_tree.edges()), 1)
def test_markov_blanket(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertListEqual(self.graph.markov_blanket('a'), ['b'])
self.assertListEqual(sorted(self.graph.markov_blanket('b')),
['a', 'c'])
def test_local_independencies(self):
from pgmpy.independencies import Independencies
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
independencies = self.graph.get_local_independecies()
self.assertIsInstance(independencies, Independencies)
self.assertEqual(len(independencies.get_independencies()), 2)
string = ''
for assertion in sorted(independencies.get_independencies(),
key=lambda x: list(x.event1)):
string += str(assertion) + '\n'
self.assertEqual(string, 'a _|_ c | b\nc _|_ a | b\n')
def test_bayesian_model(self):
from pgmpy.models import BayesianModel
import networkx as nx
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
bm = self.graph.to_bayesian_model()
self.assertIsInstance(bm, BayesianModel)
self.assertListEqual(sorted(bm.nodes()), ['a', 'b', 'c', 'd'])
self.assertTrue(nx.is_chordal(bm.to_undirected()))
def tearDown(self):
del self.graph
class TestUndirectedGraphFactorOperations(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_add_factor_raises_error(self):
self.graph.add_edges_from([('Alice', 'Bob'), ('Bob', 'Charles'),
('Charles', 'Debbie'), ('Debbie', 'Alice')])
factor = Factor(['Alice', 'Bob', 'John'], [2, 2, 2], np.random.rand(8))
self.assertRaises(ValueError, self.graph.add_factors, factor)
def test_add_single_factor(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi = Factor(['a', 'b'], [2, 2], range(4))
self.graph.add_factors(phi)
self.assertListEqual(self.graph.get_factors(), [phi])
def test_add_multiple_factors(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.assertListEqual(self.graph.get_factors(), [phi1, phi2])
def test_remove_single_factor(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.graph.remove_factors(phi1)
self.assertListEqual(self.graph.get_factors(), [phi2])
def test_remove_multiple_factors(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.graph.remove_factors(phi1, phi2)
self.assertListEqual(self.graph.get_factors(), [])
def test_partition_function(self):
self.graph.add_nodes_from(['a', 'b', 'c'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.graph.add_edges_from([('a', 'b'), ('b', 'c')])
self.assertEqual(self.graph.get_partition_function(), 22.0)
def test_partition_function_raises_error(self):
self.graph.add_nodes_from(['a', 'b', 'c', 'd'])
phi1 = Factor(['a', 'b'], [2, 2], range(4))
phi2 = Factor(['b', 'c'], [2, 2], range(4))
self.graph.add_factors(phi1, phi2)
self.assertRaises(ValueError,
self.graph.get_partition_function)
def tearDown(self):
del self.graph
class TestUndirectedGraphTriangulation(unittest.TestCase):
def setUp(self):
self.graph = MarkovModel()
def test_check_clique(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'a')])
self.assertTrue(self.graph.check_clique(['a', 'b', 'c']))
def test_is_triangulated(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'a')])
self.assertTrue(self.graph.is_triangulated())
def test_triangulation_h1_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H1', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h2_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H2', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h3_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H3', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h4_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H4', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h5_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H4', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h6_inplace(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
self.graph.triangulate(heuristic='H4', inplace=True)
self.assertTrue(self.graph.is_triangulated())
self.assertListEqual(hf.recursive_sorted(self.graph.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_cardinality_mismatch_raises_error(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
factor_list = [Factor(edge, [2, 2], np.random.rand(4)) for edge in
self.graph.edges()]
self.graph.add_factors(*factor_list)
self.graph.add_factors(Factor(['a', 'b'], [2, 3], np.random.rand(6)))
self.assertRaises(CardinalityError, self.graph.triangulate)
def test_triangulation_h1_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H1', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h2_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H2', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'c'], ['a', 'd'],
['b', 'c'], ['c', 'd']])
def test_triangulation_h3_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H3', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h4_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H4', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h5_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H5', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def test_triangulation_h6_create_new(self):
self.graph.add_edges_from([('a', 'b'), ('b', 'c'), ('c', 'd'),
('d', 'a')])
phi1 = Factor(['a', 'b'], [2, 3], np.random.rand(6))
phi2 = Factor(['b', 'c'], [3, 4], np.random.rand(12))
phi3 = Factor(['c', 'd'], [4, 5], np.random.rand(20))
phi4 = Factor(['d', 'a'], [5, 2], np.random.random(10))
self.graph.add_factors(phi1, phi2, phi3, phi4)
H = self.graph.triangulate(heuristic='H6', inplace=True)
self.assertListEqual(hf.recursive_sorted(H.edges()),
[['a', 'b'], ['a', 'd'], ['b', 'c'],
['b', 'd'], ['c', 'd']])
def tearDown(self):
del self.graph
| |
# informix.py
# Copyright (C) 2005,2006, 2007, 2008 Michael Bayer mike_mp@zzzcomputing.com
#
# coding: gbk
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import datetime
from sqlalchemy import sql, schema, exceptions, pool, util
from sqlalchemy.sql import compiler
from sqlalchemy.engine import default
from sqlalchemy import types as sqltypes
# for offset
class informix_cursor(object):
def __init__( self , con ):
self.__cursor = con.cursor()
self.rowcount = 0
def offset( self , n ):
if n > 0:
self.fetchmany( n )
self.rowcount = self.__cursor.rowcount - n
if self.rowcount < 0:
self.rowcount = 0
else:
self.rowcount = self.__cursor.rowcount
def execute( self , sql , params ):
if params is None or len( params ) == 0:
params = []
return self.__cursor.execute( sql , params )
def __getattr__( self , name ):
if name not in ( 'offset' , '__cursor' , 'rowcount' , '__del__' , 'execute' ):
return getattr( self.__cursor , name )
class InfoNumeric(sqltypes.Numeric):
def get_col_spec(self):
if not self.precision:
return 'NUMERIC'
else:
return "NUMERIC(%(precision)s, %(length)s)" % {'precision': self.precision, 'length' : self.length}
class InfoInteger(sqltypes.Integer):
def get_col_spec(self):
return "INTEGER"
class InfoSmallInteger(sqltypes.Smallinteger):
def get_col_spec(self):
return "SMALLINT"
class InfoDate(sqltypes.Date):
def get_col_spec( self ):
return "DATE"
class InfoDateTime(sqltypes.DateTime ):
def get_col_spec(self):
return "DATETIME YEAR TO SECOND"
def bind_processor(self, dialect):
def process(value):
if value is not None:
if value.microsecond:
value = value.replace( microsecond = 0 )
return value
return process
class InfoTime(sqltypes.Time ):
def get_col_spec(self):
return "DATETIME HOUR TO SECOND"
def bind_processor(self, dialect):
def process(value):
if value is not None:
if value.microsecond:
value = value.replace( microsecond = 0 )
return value
return process
def result_processor(self, dialect):
def process(value):
if isinstance( value , datetime.datetime ):
return value.time()
else:
return value
return process
class InfoText(sqltypes.String):
def get_col_spec(self):
return "VARCHAR(255)"
class InfoString(sqltypes.String):
def get_col_spec(self):
return "VARCHAR(%(length)s)" % {'length' : self.length}
def bind_processor(self, dialect):
def process(value):
if value == '':
return None
else:
return value
return process
class InfoChar(sqltypes.CHAR):
def get_col_spec(self):
return "CHAR(%(length)s)" % {'length' : self.length}
class InfoBinary(sqltypes.Binary):
def get_col_spec(self):
return "BYTE"
class InfoBoolean(sqltypes.Boolean):
default_type = 'NUM'
def get_col_spec(self):
return "SMALLINT"
def result_processor(self, dialect):
def process(value):
if value is None:
return None
return value and True or False
return process
def bind_processor(self, dialect):
def process(value):
if value is True:
return 1
elif value is False:
return 0
elif value is None:
return None
else:
return value and True or False
return process
colspecs = {
sqltypes.Integer : InfoInteger,
sqltypes.Smallinteger : InfoSmallInteger,
sqltypes.Numeric : InfoNumeric,
sqltypes.Float : InfoNumeric,
sqltypes.DateTime : InfoDateTime,
sqltypes.Date : InfoDate,
sqltypes.Time: InfoTime,
sqltypes.String : InfoString,
sqltypes.Binary : InfoBinary,
sqltypes.Boolean : InfoBoolean,
sqltypes.Text : InfoText,
sqltypes.CHAR: InfoChar,
}
ischema_names = {
0 : InfoString, # CHAR
1 : InfoSmallInteger, # SMALLINT
2 : InfoInteger, # INT
3 : InfoNumeric, # Float
3 : InfoNumeric, # SmallFloat
5 : InfoNumeric, # DECIMAL
6 : InfoInteger, # Serial
7 : InfoDate, # DATE
8 : InfoNumeric, # MONEY
10 : InfoDateTime, # DATETIME
11 : InfoBinary, # BYTE
12 : InfoText, # TEXT
13 : InfoString, # VARCHAR
15 : InfoString, # NCHAR
16 : InfoString, # NVARCHAR
17 : InfoInteger, # INT8
18 : InfoInteger, # Serial8
43 : InfoString, # LVARCHAR
-1 : InfoBinary, # BLOB
-1 : InfoText, # CLOB
}
def descriptor():
return {'name':'informix',
'description':'Informix',
'arguments':[
('dsn', 'Data Source Name', None),
('user', 'Username', None),
('password', 'Password', None)
]}
class InfoExecutionContext(default.DefaultExecutionContext):
# cursor.sqlerrd
# 0 - estimated number of rows returned
# 1 - serial value after insert or ISAM error code
# 2 - number of rows processed
# 3 - estimated cost
# 4 - offset of the error into the SQL statement
# 5 - rowid after insert
def post_exec(self):
if getattr(self.compiled, "isinsert", False) and self.last_inserted_ids() is None:
self._last_inserted_ids = [self.cursor.sqlerrd[1],]
elif hasattr( self.compiled , 'offset' ):
self.cursor.offset( self.compiled.offset )
super(InfoExecutionContext, self).post_exec()
def create_cursor( self ):
return informix_cursor( self.connection.connection )
class InfoDialect(default.DefaultDialect):
default_paramstyle = 'qmark'
# for informix 7.31
max_identifier_length = 18
def __init__(self, use_ansi=True,**kwargs):
self.use_ansi = use_ansi
default.DefaultDialect.__init__(self, **kwargs)
def dbapi(cls):
import informixdb
return informixdb
dbapi = classmethod(dbapi)
def is_disconnect(self, e):
if isinstance(e, self.dbapi.OperationalError):
return 'closed the connection' in str(e) or 'connection not open' in str(e)
else:
return False
def do_begin(self , connect ):
cu = connect.cursor()
cu.execute( 'SET LOCK MODE TO WAIT' )
#cu.execute( 'SET ISOLATION TO REPEATABLE READ' )
def type_descriptor(self, typeobj):
return sqltypes.adapt_type(typeobj, colspecs)
def create_connect_args(self, url):
if url.host:
dsn = '%s@%s' % ( url.database , url.host )
else:
dsn = url.database
if url.username:
opt = { 'user':url.username , 'password': url.password }
else:
opt = {}
return ([dsn,], opt )
def create_execution_context(self , *args, **kwargs):
return InfoExecutionContext(self, *args, **kwargs)
def oid_column_name(self,column):
return "rowid"
def table_names(self, connection, schema):
s = "select tabname from systables"
return [row[0] for row in connection.execute(s)]
def has_table(self, connection, table_name,schema=None):
cursor = connection.execute("""select tabname from systables where tabname=?""", table_name.lower() )
return bool( cursor.fetchone() is not None )
def reflecttable(self, connection, table, include_columns):
c = connection.execute ("select distinct OWNER from systables where tabname=?", table.name.lower() )
rows = c.fetchall()
if not rows :
raise exceptions.NoSuchTableError(table.name)
else:
if table.owner is not None:
if table.owner.lower() in [r[0] for r in rows]:
owner = table.owner.lower()
else:
raise exceptions.AssertionError("Specified owner %s does not own table %s"%(table.owner, table.name))
else:
if len(rows)==1:
owner = rows[0][0]
else:
raise exceptions.AssertionError("There are multiple tables with name %s in the schema, you must specifie owner"%table.name)
c = connection.execute ("""select colname , coltype , collength , t3.default , t1.colno from syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=?
and t3.tabid = t2.tabid and t3.colno = t1.colno
order by t1.colno""", table.name.lower(), owner )
rows = c.fetchall()
if not rows:
raise exceptions.NoSuchTableError(table.name)
for name , colattr , collength , default , colno in rows:
name = name.lower()
if include_columns and name not in include_columns:
continue
# in 7.31, coltype = 0x000
# ^^-- column type
# ^-- 1 not null , 0 null
nullable , coltype = divmod( colattr , 256 )
if coltype not in ( 0 , 13 ) and default:
default = default.split()[-1]
if coltype == 0 or coltype == 13: # char , varchar
coltype = ischema_names.get(coltype, InfoString)(collength)
if default:
default = "'%s'" % default
elif coltype == 5: # decimal
precision , scale = ( collength & 0xFF00 ) >> 8 , collength & 0xFF
if scale == 255:
scale = 0
coltype = InfoNumeric(precision, scale)
else:
try:
coltype = ischema_names[coltype]
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NULLTYPE
colargs = []
if default is not None:
colargs.append(schema.PassiveDefault(sql.text(default)))
table.append_column(schema.Column(name, coltype, nullable = (nullable == 0), *colargs))
# FK
c = connection.execute("""select t1.constrname as cons_name , t1.constrtype as cons_type ,
t4.colname as local_column , t7.tabname as remote_table ,
t6.colname as remote_column
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
sysconstraints as t8 , sysindexes as t9
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R'
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno = t3.part1
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno = t9.part1 and t9.idxname = t8.idxname
and t7.tabid = t5.ptabid""", table.name.lower(), owner )
rows = c.fetchall()
fks = {}
for cons_name, cons_type, local_column, remote_table, remote_column in rows:
try:
fk = fks[cons_name]
except KeyError:
fk = ([], [])
fks[cons_name] = fk
refspec = ".".join([remote_table, remote_column])
schema.Table(remote_table, table.metadata, autoload=True, autoload_with=connection)
if local_column not in fk[0]:
fk[0].append(local_column)
if refspec not in fk[1]:
fk[1].append(refspec)
for name, value in fks.iteritems():
table.append_constraint(schema.ForeignKeyConstraint(value[0], value[1] , None ))
# PK
c = connection.execute("""select t1.constrname as cons_name , t1.constrtype as cons_type ,
t4.colname as local_column
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4
where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'P'
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno = t3.part1""", table.name.lower(), owner )
rows = c.fetchall()
for cons_name, cons_type, local_column in rows:
table.primary_key.add( table.c[local_column] )
class InfoCompiler(compiler.DefaultCompiler):
"""Info compiler modifies the lexical structure of Select statements to work under
non-ANSI configured Oracle databases, if the use_ansi flag is False."""
def __init__(self, *args, **kwargs):
self.limit = 0
self.offset = 0
compiler.DefaultCompiler.__init__( self , *args, **kwargs )
def default_from(self):
return " from systables where tabname = 'systables' "
def get_select_precolumns( self , select ):
s = select._distinct and "DISTINCT " or ""
# only has limit
if select._limit:
off = select._offset or 0
s += " FIRST %s " % ( select._limit + off )
else:
s += ""
return s
def visit_select(self, select):
if select._offset:
self.offset = select._offset
self.limit = select._limit or 0
# the column in order by clause must in select too
def __label( c ):
try:
return c._label.lower()
except:
return ''
# TODO: dont modify the original select, generate a new one
a = [ __label(c) for c in select._raw_columns ]
for c in select._order_by_clause.clauses:
if ( __label(c) not in a ) and getattr( c , 'name' , '' ) != 'oid':
select.append_column( c )
return compiler.DefaultCompiler.visit_select(self, select)
def limit_clause(self, select):
return ""
def visit_function( self , func ):
if func.name.lower() == 'current_date':
return "today"
elif func.name.lower() == 'current_time':
return "CURRENT HOUR TO SECOND"
elif func.name.lower() in ( 'current_timestamp' , 'now' ):
return "CURRENT YEAR TO SECOND"
else:
return compiler.DefaultCompiler.visit_function( self , func )
def visit_clauselist(self, list):
try:
li = [ c for c in list.clauses if c.name != 'oid' ]
except:
li = [ c for c in list.clauses ]
return ', '.join([s for s in [self.process(c) for c in li] if s is not None])
class InfoSchemaGenerator(compiler.SchemaGenerator):
def get_column_specification(self, column, first_pk=False):
colspec = self.preparer.format_column(column)
if column.primary_key and len(column.foreign_keys)==0 and column.autoincrement and \
isinstance(column.type, sqltypes.Integer) and not getattr( self , 'has_serial' , False ) and first_pk:
colspec += " SERIAL"
self.has_serial = True
else:
colspec += " " + column.type.dialect_impl(self.dialect, _for_ddl=column).get_col_spec()
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def post_create_table(self, table):
if hasattr( self , 'has_serial' ):
del self.has_serial
return ''
def visit_primary_key_constraint(self, constraint):
# for informix 7.31 not support constraint name
name = constraint.name
constraint.name = None
super(InfoSchemaGenerator, self).visit_primary_key_constraint(constraint)
constraint.name = name
def visit_unique_constraint(self, constraint):
# for informix 7.31 not support constraint name
name = constraint.name
constraint.name = None
super(InfoSchemaGenerator, self).visit_unique_constraint(constraint)
constraint.name = name
def visit_foreign_key_constraint( self , constraint ):
if constraint.name is not None:
constraint.use_alter = True
else:
super( InfoSchemaGenerator , self ).visit_foreign_key_constraint( constraint )
def define_foreign_key(self, constraint):
# for informix 7.31 not support constraint name
if constraint.use_alter:
name = constraint.name
constraint.name = None
self.append( "CONSTRAINT " )
super(InfoSchemaGenerator, self).define_foreign_key(constraint)
constraint.name = name
if name is not None:
self.append( " CONSTRAINT " + name )
else:
super(InfoSchemaGenerator, self).define_foreign_key(constraint)
def visit_index(self, index):
if len( index.columns ) == 1 and index.columns[0].foreign_key:
return
super(InfoSchemaGenerator, self).visit_index(index)
class InfoIdentifierPreparer(compiler.IdentifierPreparer):
def __init__(self, dialect):
super(InfoIdentifierPreparer, self).__init__(dialect, initial_quote="'")
def _requires_quotes(self, value):
return False
class InfoSchemaDropper(compiler.SchemaDropper):
def drop_foreignkey(self, constraint):
if constraint.name is not None:
super( InfoSchemaDropper , self ).drop_foreignkey( constraint )
dialect = InfoDialect
poolclass = pool.SingletonThreadPool
dialect.statement_compiler = InfoCompiler
dialect.schemagenerator = InfoSchemaGenerator
dialect.schemadropper = InfoSchemaDropper
dialect.preparer = InfoIdentifierPreparer
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.utils.data
import numpy as np
import random
import argparse
import os
import pickle
import importlib
import time
from CustomSummaryWriter import *
from models import MLP1, Lin
from utils import *
if __name__ == '__main__':
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--no_cuda', default=False, action='store_true',
help='disables CUDA')
parser.add_argument('--no_BN', default=False, action='store_true',
help='disables BatchNorm')
parser.add_argument('--no_ES', default=False, action='store_true',
help='disable Early Stopping')
parser.add_argument('--make_linear', default=False, action='store_true',
help='do not apply activation function')
parser.add_argument('--NTK_style', default=False, action='store_true',
help='use NTK-style model parametrization')
parser.add_argument('--max_epochs', type=int, default=1,
help='max number of epochs (default: 1')
parser.add_argument('--dataset', type=str, default='MNIST', help='dataset')
parser.add_argument('--dataset_dir', type=str, default='./data', help='dataset directory')
parser.add_argument('--normalize_pixelwise', default=False, action='store_true',
help='do pixelwise data normalization')
parser.add_argument('--model_type', type=str, default='MLP1', choices=['Lin', 'MLP1'],
help='model type (architecture)')
parser.add_argument('--init_distrib', type=str, default='uniform',
choices=['uniform', 'normal'],
help='probability distribution for parameter initialization; gets overwritten if NTK-style is chosen')
parser.add_argument('--no_bias', default=False, action='store_true',
help='no bias in the layers')
parser.add_argument('--base_width', type=int, default=56,
help='number of units in the hidden layer in the baseline model (default: 56)')
parser.add_argument('--width', type=int, default=56,
help='number of units in the hidden layer in the given (sparse) model (default: 56)')
parser.add_argument('--nwtf_cl', type=int, default=0,
help='number of weights to freeze in cl layer')
parser.add_argument('--nwtf_fc', type=int, default=0,
help='number of weights to freeze in fc layer')
parser.add_argument('--lr', type=float, default=0.1, help='learning rate')
parser.add_argument('--mbs', type=int, default=100, help='mini-batch size')
parser.add_argument('--train_subset_size', type=int, default=0,
help='number of samples if training on a subset of the original train set')
parser.add_argument('--seed', type=int, default=888, help='random seed')
parser.add_argument('--output_dir', default='default_dir', type=str,
help='folder name for saving experiment outputs')
args= parser.parse_args()
#print('Experiment arguments summary:')
#print(vars(args))
# ==== device configuration
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
seed=args.seed
torch.manual_seed(seed)
np.random.seed(seed)
# ========== training and dataset hyper-params ==========
# =======================================================
dataset = args.dataset
dataset_dir = args.dataset_dir
normalize_pixelwise= args.normalize_pixelwise
train_subset_size= args.train_subset_size
no_ES=args.no_ES
learning_rate = args.lr
model_type=args.model_type
ckpt_every= 25
max_epochs= args.max_epochs # cut-off value for train loop
# ========== load dataset ==========
# ==================================
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if train_subset_size>0: # training on a subset
train_batch_size= train_subset_size
else: # training on original whole train set
train_batch_size= args.mbs
test_batch_size = 1000
train_loader, test_loader, input_size, num_classes =\
load_dataset(dataset, dataset_dir, train_batch_size, test_batch_size, kwargs)
batch_size= args.mbs
# ========== model hyper-params ==========
# ========================================
do_batch_norm = not args.no_BN
init_distrib= args.init_distrib
make_linear= args.make_linear
add_bias= not args.no_bias
NTK_style= args.NTK_style
NTK_tag='_NTK_style' if NTK_style else ''
base_width= args.base_width
width= args.width
lkeys= ['fc', 'cl']
# sparsity
nwtf_fc=args.nwtf_fc
nwtf_cl=args.nwtf_cl
ctvt_total=(base_width/width) if width>0 else 1
sparse= ctvt_total<1
output_dir=args.output_dir
writer= CustomSummaryWriter(output_dir)
# ========== set up model ==========
# ==================================
if model_type=='Lin':
model = Lin(input_size, num_classes, init_distrib, add_bias).to(device)
else:
model = MLP1(input_size, num_classes, width, base_width, init_distrib,
nwtf_fc, nwtf_cl,
do_batch_norm, make_linear, NTK_style, add_bias).to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
if sparse:
# ==== get smask from model ====
smask={}
for lkey in lkeys:
smask[lkey]= model._modules['layers'][lkey].weight==0
# ======== save initial model checkpoint
start_epoch=0
state= {'epoch': start_epoch, 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(), 'args': args}
save_name= f'{output_dir}_init'
sshproc = save_checkpoint(state, save_name)
# ============= train ==============
# ==================================
train_loss=1 #init
epoch=start_epoch+1 #init
best_test_acc=0 #init
patience= 20
test_acc_tracker=list(np.zeros(2*patience)) # keep a list of test acc over past some eps
model.train()
if train_subset_size>0:
images_, labels_ = next(iter(train_loader))
new_train_set = torch.utils.data.TensorDataset(images_, labels_)
train_loader = torch.utils.data.DataLoader(new_train_set, batch_size=batch_size, shuffle=True)
while epoch<max_epochs:
loss_sum, total, correct = 0, 0, 0
for i, (images, labels) in enumerate(train_loader):
images= images.reshape(-1, input_size).to(device)
if normalize_pixelwise: images= pixelwise_normalization(images)
labels = labels.to(device)
# ==== forward pass
outputs = model(images)
loss = criterion(outputs, labels)
loss_sum += len(images)*loss.item()
_, predicted = torch.max(outputs.data, 1)
correct += (predicted==labels).cpu().sum().item()
total += len(images)
# ==== backward and optimize
optimizer.zero_grad()
loss.backward()
if sparse: # apply smask to gradients
for lkey in lkeys:
if smask[lkey] is not None: # smask is None if layer is not sparsified
layer= model._modules['layers'][lkey]
layer.weight.grad[ smask[lkey] ] = 0
optimizer.step()
# === epoch completed ===
train_loss = loss_sum/total
train_acc = correct/total
# ======== save model checkpoint
if epoch%ckpt_every==0:
state= {'epoch': epoch, 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(), 'args': args}
save_name = f'{output_dir}_epoch_{epoch}'
sshproc = save_checkpoint(state, save_name)
# ======== evaluate ========
test_acc, test_loss = evaluate(model, test_loader, normalize_pixelwise, input_size, device, criterion)
# ======== write to TB and stats file
# (saves both to tb event files and a separate dict called "stats") every epoch
writer.add_scalars('acc', {'test': test_acc, 'train': train_acc},
global_step=epoch, walltime=time.time()-start_time )
writer.add_scalars('loss', {'test': test_loss, 'train': train_loss},
global_step=epoch, walltime=time.time()-start_time )
# ======== Early Stopping routine
if not no_ES:
test_acc_tracker.append(test_acc)
_=test_acc_tracker.pop(0)
prev_avg_acc=np.mean(test_acc_tracker[:patience])
curr_avg_acc=np.mean(test_acc_tracker[patience:])
if curr_avg_acc<prev_avg_acc and epoch>(2*patience):
print(f'>>> Early Stopping: epoch {epoch}')
print(f'* current avg: {curr_avg_acc}')
print(f'* previous avg: {prev_avg_acc}')
print(f'(no improvement over past {patience} epochs)')
break
# ==== remember best test acc and save checkpoint
is_best= test_acc > best_test_acc
best_test_acc= max(test_acc, best_test_acc)
if is_best:
state= {'epoch': epoch, 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(), 'args': args}
save_name= f'{output_dir}_best'
sshproc= save_checkpoint(state, save_name)
epoch+=1
writer.close() # close current event file
# ========== save final model checkpoint =============
# ====================================================
state= {'epoch': epoch, 'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(), 'args': args}
save_name = f'{output_dir}_final'
print(f'Saving checkpoint as {save_name}')
sshproc= save_checkpoint(state, save_name)
sshproc.wait()
| |
import warnings
import numpy as np
from numpy import asarray_chkfinite, single
from misc import LinAlgError, _datacopied
from lapack import get_lapack_funcs
__all__ = ['qz']
_double_precision = ['i','l','d']
def _select_function(sort, typ):
if typ in ['F','D']:
if callable(sort):
#assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y: (np.real(x/y) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y: (np.real(x/y) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y: (abs(x/y) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y: (abs(x/y) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
elif typ in ['f','d']:
if callable(sort):
#assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
else: # to avoid an error later
raise ValueError("dtype %s not understood" % typ)
return sfunction
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False):
"""
QZ decompostion for generalized eigenvalues of a pair of matrices.
The QZ, or generalized Schur, decomposition for a pair of N x N
nonsymmetric matrices (A,B) is::
(A,B) = (Q*AA*Z', Q*BB*Z')
where AA, BB is in generalized Schur form if BB is upper-triangular
with non-negative diagonal and AA is upper-triangular, or for real QZ
decomposition (``output='real'``) block upper triangular with 1x1
and 2x2 blocks. In this case, the 1x1 blocks correspond to real
generalized eigenvalues and 2x2 blocks are 'standardized' by making
the corresponding elements of BB have the form::
[ a 0 ]
[ 0 b ]
and the pair of corresponding 2x2 blocks in AA and BB will have a complex
conjugate pair of generalized eigenvalues. If (``output='complex'``) or
A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
Q and Z are unitary matrices.
Parameters
----------
A : array_like, shape (N,N)
2-D array to decompose.
B : array_like, shape (N,N)
2-D array to decompose.
output : {'real','complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
NOTE: THIS INPUT IS DISABLED FOR NOW, IT DOESN'T WORK WELL ON WINDOWS.
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue x = (alphar + alphai*1j)/beta.
For complex matrix pairs or output='complex', the sort function
takes two complex arguments (alpha, beta). The eigenvalue
x = (alpha/beta).
Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
Returns
-------
AA : ndarray, shape (N,N)
Generalized Schur form of A.
BB : ndarray, shape (N,N)
Generalized Schur form of B.
Q : ndarray, shape (N,N)
The left Schur vectors.
Z : ndarray, shape (N,N)
The right Schur vectors.
sdim : int, optional
If sorting was requested, a fifth return value will contain the
number of eigenvalues for which the sort condition was True.
Notes
-----
Q is transposed versus the equivalent function in Matlab.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import linalg
>>> np.random.seed(1234)
>>> A = np.arange(9).reshape((3, 3))
>>> B = np.random.randn(3, 3)
>>> AA, BB, Q, Z = linalg.qz(A, B)
>>> AA
array([[-13.40928183, -4.62471562, 1.09215523],
[ 0. , 0. , 1.22805978],
[ 0. , 0. , 0.31973817]])
>>> BB
array([[ 0.33362547, -1.37393632, 0.02179805],
[ 0. , 1.68144922, 0.74683866],
[ 0. , 0. , 0.9258294 ]])
>>> Q
array([[ 0.14134727, -0.97562773, 0.16784365],
[ 0.49835904, -0.07636948, -0.86360059],
[ 0.85537081, 0.20571399, 0.47541828]])
>>> Z
array([[-0.24900855, -0.51772687, 0.81850696],
[-0.79813178, 0.58842606, 0.12938478],
[-0.54861681, -0.6210585 , -0.55973739]])
"""
if sort is not None:
# Disabled due to segfaults on win32, see ticket 1717.
raise ValueError("The 'sort' input of qz() has to be None (will "
" change when this functionality is made more robust).")
if not output in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
a_m, a_n = a1.shape
b_m, b_n = b1.shape
try:
assert a_m == a_n == b_m == b_n
except AssertionError:
raise ValueError("Array dimensions must be square and agree")
typa = a1.dtype.char
if output in ['complex', 'c'] and typa not in ['F','D']:
if typa in _double_precision:
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if output in ['complex', 'c'] and typb not in ['F','D']:
if typb in _double_precision:
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = overwrite_a or (_datacopied(a1,A))
overwrite_b = overwrite_b or (_datacopied(b1,B))
gges, = get_lapack_funcs(('gges',), (a1,b1))
if lwork is None or lwork == -1:
# get optimal work array size
result = gges(lambda x: None, a1, b1, lwork=-1)
lwork = result[-2][0].real.astype(np.int)
if sort is None:
sort_t = 0
sfunction = lambda x : None
else:
sort_t = 1
sfunction = _select_function(sort, typa)
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b, sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of gges" % -info)
elif info > 0 and info <= a_n:
warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be correct "
"for J=%d,...,N" % info-1, UserWarning)
elif info == a_n+1:
raise LinAlgError("Something other than QZ iteration failed")
elif info == a_n+2:
raise LinAlgError("After reordering, roundoff changed values of some "
"complex eigenvalues so that leading eigenvalues in the "
"Generalized Schur form no longer satisfy sort=True. "
"This could also be caused due to scaling.")
elif info == a_n+3:
raise LinAlgError("Reordering failed in <s,d,c,z>tgsen")
# output for real
#AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
# output for complex
#AA, BB, sdim, alphai, beta, vsl, vsr, work, info
if sort_t == 0:
return result[0], result[1], result[-4], result[-3]
else:
return result[0], result[1], result[-4], result[-3], result[2]
| |
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""File wrangling."""
import fnmatch
import ntpath
import os
import os.path
import posixpath
import re
import sys
from coverage import env
from coverage.backward import unicode_class
from coverage.misc import contract, CoverageException, join_regex, isolate_module
os = isolate_module(os)
def set_relative_directory():
"""Set the directory that `relative_filename` will be relative to."""
global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
# The absolute path to our current directory.
RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
# Cache of results of calling the canonical_filename() method, to
# avoid duplicating work.
CANONICAL_FILENAME_CACHE = {}
def relative_directory():
"""Return the directory that `relative_filename` is relative to."""
return RELATIVE_DIR
@contract(returns='unicode')
def relative_filename(filename):
"""Return the relative form of `filename`.
The file name will be relative to the current directory when the
`set_relative_directory` was called.
"""
fnorm = os.path.normcase(filename)
if fnorm.startswith(RELATIVE_DIR):
filename = filename[len(RELATIVE_DIR):]
return unicode_filename(filename)
@contract(returns='unicode')
def canonical_filename(filename):
"""Return a canonical file name for `filename`.
An absolute path with no redundant components and normalized case.
"""
if filename not in CANONICAL_FILENAME_CACHE:
if not os.path.isabs(filename):
for path in [os.curdir] + sys.path:
if path is None:
continue
f = os.path.join(path, filename)
if os.path.exists(f):
filename = f
break
cf = abs_file(filename)
CANONICAL_FILENAME_CACHE[filename] = cf
return CANONICAL_FILENAME_CACHE[filename]
def flat_rootname(filename):
"""A base for a flat file name to correspond to this file.
Useful for writing files about the code where you want all the files in
the same directory, but need to differentiate same-named files from
different directories.
For example, the file a/b/c.py will return 'a_b_c_py'
"""
name = ntpath.splitdrive(filename)[1]
return re.sub(r"[\\/.:]", "_", name)
if env.WINDOWS:
_ACTUAL_PATH_CACHE = {}
_ACTUAL_PATH_LIST_CACHE = {}
def actual_path(path):
"""Get the actual path of `path`, including the correct case."""
if env.PY2 and isinstance(path, unicode_class):
path = path.encode(sys.getfilesystemencoding())
if path in _ACTUAL_PATH_CACHE:
return _ACTUAL_PATH_CACHE[path]
head, tail = os.path.split(path)
if not tail:
# This means head is the drive spec: normalize it.
actpath = head.upper()
elif not head:
actpath = tail
else:
head = actual_path(head)
if head in _ACTUAL_PATH_LIST_CACHE:
files = _ACTUAL_PATH_LIST_CACHE[head]
else:
try:
files = os.listdir(head)
except OSError:
files = []
_ACTUAL_PATH_LIST_CACHE[head] = files
normtail = os.path.normcase(tail)
for f in files:
if os.path.normcase(f) == normtail:
tail = f
break
actpath = os.path.join(head, tail)
_ACTUAL_PATH_CACHE[path] = actpath
return actpath
else:
def actual_path(filename):
"""The actual path for non-Windows platforms."""
return filename
if env.PY2:
@contract(returns='unicode')
def unicode_filename(filename):
"""Return a Unicode version of `filename`."""
if isinstance(filename, str):
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
filename = filename.decode(encoding, "replace")
return filename
else:
@contract(filename='unicode', returns='unicode')
def unicode_filename(filename):
"""Return a Unicode version of `filename`."""
return filename
@contract(returns='unicode')
def abs_file(filename):
"""Return the absolute normalized form of `filename`."""
path = os.path.expandvars(os.path.expanduser(filename))
path = os.path.abspath(os.path.realpath(path))
path = actual_path(path)
path = unicode_filename(path)
return path
RELATIVE_DIR = None
CANONICAL_FILENAME_CACHE = None
set_relative_directory()
def isabs_anywhere(filename):
"""Is `filename` an absolute path on any OS?"""
return ntpath.isabs(filename) or posixpath.isabs(filename)
def prep_patterns(patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
prepped = []
for p in patterns or []:
if p.startswith(("*", "?")):
prepped.append(p)
else:
prepped.append(abs_file(p))
return prepped
class TreeMatcher(object):
"""A matcher for files in a tree."""
def __init__(self, directories):
self.dirs = list(directories)
def __repr__(self):
return "<TreeMatcher %r>" % self.dirs
def info(self):
"""A list of strings for displaying when dumping state."""
return self.dirs
def match(self, fpath):
"""Does `fpath` indicate a file in one of our trees?"""
for d in self.dirs:
if fpath.startswith(d):
if fpath == d:
# This is the same file!
return True
if fpath[len(d)] == os.sep:
# This is a file in the directory
return True
return False
class ModuleMatcher(object):
"""A matcher for modules in a tree."""
def __init__(self, module_names):
self.modules = list(module_names)
def __repr__(self):
return "<ModuleMatcher %r>" % (self.modules)
def info(self):
"""A list of strings for displaying when dumping state."""
return self.modules
def match(self, module_name):
"""Does `module_name` indicate a module in one of our packages?"""
if not module_name:
return False
for m in self.modules:
if module_name.startswith(m):
if module_name == m:
return True
if module_name[len(m)] == '.':
# This is a module in the package
return True
return False
class FnmatchMatcher(object):
"""A matcher for files by file name pattern."""
def __init__(self, pats):
self.pats = pats[:]
# fnmatch is platform-specific. On Windows, it does the Windows thing
# of treating / and \ as equivalent. But on other platforms, we need to
# take care of that ourselves.
fnpats = (fnmatch.translate(p) for p in pats)
fnpats = (p.replace(r"\/", r"[\\/]") for p in fnpats)
if env.WINDOWS:
# Windows is also case-insensitive. BTW: the regex docs say that
# flags like (?i) have to be at the beginning, but fnmatch puts
# them at the end, and having two there seems to work fine.
fnpats = (p + "(?i)" for p in fnpats)
self.re = re.compile(join_regex(fnpats))
def __repr__(self):
return "<FnmatchMatcher %r>" % self.pats
def info(self):
"""A list of strings for displaying when dumping state."""
return self.pats
def match(self, fpath):
"""Does `fpath` match one of our file name patterns?"""
return self.re.match(fpath) is not None
def sep(s):
"""Find the path separator used in this string, or os.sep if none."""
sep_match = re.search(r"[\\/]", s)
if sep_match:
the_sep = sep_match.group(0)
else:
the_sep = os.sep
return the_sep
class PathAliases(object):
"""A collection of aliases for paths.
When combining data files from remote machines, often the paths to source
code are different, for example, due to OS differences, or because of
serialized checkouts on continuous integration machines.
A `PathAliases` object tracks a list of pattern/result pairs, and can
map a path through those aliases to produce a unified path.
"""
def __init__(self):
self.aliases = []
def add(self, pattern, result):
"""Add the `pattern`/`result` pair to the list of aliases.
`pattern` is an `fnmatch`-style pattern. `result` is a simple
string. When mapping paths, if a path starts with a match against
`pattern`, then that match is replaced with `result`. This models
isomorphic source trees being rooted at different places on two
different machines.
`pattern` can't end with a wildcard component, since that would
match an entire tree, and not just its root.
"""
# The pattern can't end with a wildcard component.
pattern = pattern.rstrip(r"\/")
if pattern.endswith("*"):
raise CoverageException("Pattern must not end with wildcards.")
pattern_sep = sep(pattern)
# The pattern is meant to match a filepath. Let's make it absolute
# unless it already is, or is meant to match any prefix.
if not pattern.startswith('*') and not isabs_anywhere(pattern):
pattern = abs_file(pattern)
pattern += pattern_sep
# Make a regex from the pattern. fnmatch always adds a \Z to
# match the whole string, which we don't want.
regex_pat = fnmatch.translate(pattern).replace(r'\Z(', '(')
# We want */a/b.py to match on Windows too, so change slash to match
# either separator.
regex_pat = regex_pat.replace(r"\/", r"[\\/]")
# We want case-insensitive matching, so add that flag.
regex = re.compile(r"(?i)" + regex_pat)
# Normalize the result: it must end with a path separator.
result_sep = sep(result)
result = result.rstrip(r"\/") + result_sep
self.aliases.append((regex, result, pattern_sep, result_sep))
def map(self, path):
"""Map `path` through the aliases.
`path` is checked against all of the patterns. The first pattern to
match is used to replace the root of the path with the result root.
Only one pattern is ever used. If no patterns match, `path` is
returned unchanged.
The separator style in the result is made to match that of the result
in the alias.
Returns the mapped path. If a mapping has happened, this is a
canonical path. If no mapping has happened, it is the original value
of `path` unchanged.
"""
for regex, result, pattern_sep, result_sep in self.aliases:
m = regex.match(path)
if m:
new = path.replace(m.group(0), result)
if pattern_sep != result_sep:
new = new.replace(pattern_sep, result_sep)
new = canonical_filename(new)
return new
return path
def find_python_files(dirname):
"""Yield all of the importable Python files in `dirname`, recursively.
To be importable, the files have to be in a directory with a app.py,
except for `dirname` itself, which isn't required to have one. The
assumption is that `dirname` was specified directly, so the user knows
best, but sub-directories are checked for a app.py to be sure we only
find the importable files.
"""
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
if i > 0 and 'app.py' not in filenames:
# If a directory doesn't have app.py, then it isn't
# importable and neither are its files
del dirnames[:]
continue
for filename in filenames:
# We're only interested in files that look like reasonable Python
# files: Must end with .py or .pyw, and must not have certain funny
# characters that probably mean they are editor junk.
if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
yield os.path.join(dirpath, filename)
| |
# -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline, \
guess_decode, guess_decode_from_terminal, terminal_encoding, \
UnclosingTextIOWrapper
from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
from pygments.lexers.special import TextLexer
from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
load_formatter_from_file, get_formatter_for_filename, find_formatter_class
from pygments.formatters.terminal import TerminalFormatter
from pygments.formatters.terminal256 import Terminal256Formatter
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-s] [-v] [-x] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
If -s is passed, lexing will be done in "streaming" mode, reading and
highlighting one line at a time. This will only work properly with
lexers that have no constructs spanning multiple lines!
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
The additional option -x allows custom lexers and formatters to be
loaded from a .py file relative to the current working directory. For
example, ``-l ./customlexer.py -x``. By default, this option expects a
file with a class named CustomLexer or CustomFormatter; you can also
specify your own class name with a colon (``-l ./lexer.py:MyLexer``).
Users should be very careful not to use this option with untrusted files,
because it will import and run them.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -s option processes lines one at a time until EOF, rather than
waiting to process the entire file. This only works for stdin, and
is intended for streaming input such as you get from 'tail -f'.
Example usage: "tail -f sql.log | pygmentize -s -l sql"
The -v option prints a detailed traceback on unhandled exceptions,
which is useful for debugging and bug reports.
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str.strip():
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=', 1)
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = get_lexer_by_name(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
return 0
except (AttributeError, ValueError):
print("%s not found!" % what, file=sys.stderr)
return 1
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main_inner(popts, args, usage):
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2020 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args # pylint: disable=unbalanced-tuple-unpacking
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
return _print_help(what, name)
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# encodings
inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
lexer = find_lexer_class_for_filename(infn)
if lexer is None:
lexer = TextLexer
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
print(fmter.get_style_defs(a_opt or ''))
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
allow_custom_lexer_formatter = False
# -x: allow custom (eXternal) lexers and formatters
if opts.pop('-x', None) is not None:
allow_custom_lexer_formatter = True
# select lexer
lexer = None
# given by name?
lexername = opts.pop('-l', None)
if lexername:
# custom lexer, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in lexername:
try:
filename = None
name = None
if ':' in lexername:
filename, name = lexername.rsplit(':', 1)
if '.py' in name:
# This can happen on Windows: If the lexername is
# C:\lexer.py -- return to normal load path in that case
name = None
if filename and name:
lexer = load_lexer_from_file(filename, name,
**parsed_opts)
else:
lexer = load_lexer_from_file(lexername, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
lexer = get_lexer_by_name(lexername, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
# read input code
code = None
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
if '-s' in opts:
print('Error: -s option not usable when input file specified',
file=sys.stderr)
return 2
infn = args[0]
try:
with open(infn, 'rb') as infp:
code = infp.read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not inencoding:
code, inencoding = guess_decode(code)
# do we have to guess the lexer?
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
elif '-s' not in opts: # treat stdin as full file (-s support is later)
# read code from terminal, always in binary mode since we want to
# decode ourselves and be tolerant with it
code = sys.stdin.buffer.read() # use .buffer to get a binary stream
if not inencoding:
code, inencoding = guess_decode_from_terminal(code, sys.stdin)
# else the lexer will do the decoding
if not lexer:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else: # -s option needs a lexer with -l
if not lexer:
print('Error: when using -s a lexer has to be selected with -l',
file=sys.stderr)
return 2
# process filters
for fname, fopts in F_opts:
try:
lexer.add_filter(fname, **fopts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
# custom formatter, located relative to user's cwd
if allow_custom_lexer_formatter and '.py' in fmter:
try:
filename = None
name = None
if ':' in fmter:
# Same logic as above for custom lexer
filename, name = fmter.rsplit(':', 1)
if '.py' in name:
name = None
if filename and name:
fmter = load_formatter_from_file(filename, name,
**parsed_opts)
else:
fmter = load_formatter_from_file(fmter, **parsed_opts)
except ClassNotFound as err:
print('Error:', err, file=sys.stderr)
return 1
else:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
if '256' in os.environ.get('TERM', ''):
fmter = Terminal256Formatter(**parsed_opts)
else:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout.buffer
# determine output encoding if not explicitly selected
if not outencoding:
if outfn:
# output file? use lexer encoding for now (can still be None)
fmter.encoding = inencoding
else:
# else use terminal encoding
fmter.encoding = terminal_encoding(sys.stdout)
# provide coloring under Windows, if possible
if not outfn and sys.platform in ('win32', 'cygwin') and \
fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
# unfortunately colorama doesn't support binary streams on Py3
outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
fmter.encoding = None
try:
import colorama.initialise
except ImportError:
pass
else:
outfile = colorama.initialise.wrap_stream(
outfile, convert=None, strip=None, autoreset=False, wrap=True)
# When using the LaTeX formatter and the option `escapeinside` is
# specified, we need a special lexer which collects escaped text
# before running the chosen language lexer.
escapeinside = parsed_opts.get('escapeinside', '')
if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
left = escapeinside[0]
right = escapeinside[1]
lexer = LatexEmbeddedLexer(left, right, lexer)
# ... and do it!
if '-s' not in opts:
# process whole input as per normal...
try:
highlight(code, lexer, fmter, outfile)
finally:
if outfn:
outfile.close()
return 0
else:
# line by line processing of stdin (eg: for 'tail -f')...
try:
while 1:
line = sys.stdin.buffer.readline()
if not line:
break
if not inencoding:
line = guess_decode_from_terminal(line, sys.stdin)[0]
highlight(line, lexer, fmter, outfile)
if hasattr(outfile, 'flush'):
outfile.flush()
return 0
except KeyboardInterrupt: # pragma: no cover
return 0
finally:
if outfn:
outfile.close()
def main(args=sys.argv):
"""
Main command line entry point.
"""
usage = USAGE % ((args[0],) * 6)
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:vhVHgsx")
except getopt.GetoptError:
print(usage, file=sys.stderr)
return 2
try:
return main_inner(popts, args, usage)
except Exception:
if '-v' in dict(popts):
print(file=sys.stderr)
print('*' * 65, file=sys.stderr)
print('An unhandled exception occurred while highlighting.',
file=sys.stderr)
print('Please report the whole traceback to the issue tracker at',
file=sys.stderr)
print('<https://github.com/pygments/pygments/issues>.',
file=sys.stderr)
print('*' * 65, file=sys.stderr)
print(file=sys.stderr)
raise
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
print('*** If this is a bug you want to report, please rerun with -v.',
file=sys.stderr)
return 1
| |
"""Compressed Block Sparse Row matrix format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats, spmatrix
from .sputils import (isshape, getdtype, to_native, upcast, get_index_dtype,
check_shape)
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,
bsr_matmat, bsr_transpose, bsr_sort_indices,
bsr_tocsr)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of stored values, including explicit zeros
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
format = 'bsr'
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self._shape = check_shape(arg1)
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize))
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = 1
if shape is not None:
maxval = max(shape)
if blocksize is not None:
maxval = max(maxval, max(blocksize))
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=getdtype(dtype, data))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except Exception:
raise ValueError('unable to infer matrix dimensions')
else:
R,C = self.blocksize
self._shape = check_shape((M*R,N*C))
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self._shape = check_shape(shape)
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for BSR format")
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
getnnz.__doc__ = spmatrix.getnnz.__doc__
def __repr__(self):
format = _formats[self.getformat()][1]
return ("<%dx%d sparse matrix of type '%s'\n"
"\twith %d stored elements (blocksize = %dx%d) in %s format>" %
(self.shape + (self.dtype.type, self.nnz) + self.blocksize +
(format,)))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
raise ValueError("k exceeds matrix dimensions")
R, C = self.blocksize
y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
##########################
# NotImplemented methods #
##########################
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
@np.deprecate(message="BSR matvec is deprecated in SciPy 0.19.0. "
"Use * operator instead.")
def matvec(self, other):
"""Multiply matrix by vector."""
return self * other
@np.deprecate(message="BSR matmat is deprecated in SciPy 0.19.0. "
"Use * operator instead.")
def matmat(self, other):
"""Multiply this sparse matrix by other matrix."""
return self * other
def _add_dense(self, other):
return self.tocoo(copy=False)._add_dense(other)
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
bnnz = csr_matmat_maxnnz(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat(bnnz, M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix into Block Sparse Row Format.
With copy=False, the data/indices may be shared between this
matrix and the resultant bsr_matrix.
If blocksize=(R, C) is provided, it will be used for determining
block size of the bsr_matrix.
"""
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self, copy=False):
M, N = self.shape
R, C = self.blocksize
nnz = self.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype))
bsr_tocsr(M // R, # n_brow
N // C, # n_bcol
R, C,
self.indptr.astype(idx_dtype, copy=False),
self.indices.astype(idx_dtype, copy=False),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
return self.tocsr(copy=False).tocsc(copy=copy)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def toarray(self, order=None, out=None):
return self.tocoo(copy=False).toarray(order=order, out=out)
toarray.__doc__ = spmatrix.toarray.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
R, C = self.blocksize
M, N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N, M), blocksize=(C, R),
dtype=self.dtype, copy=copy)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK, C, R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data, indices, indptr),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero elements in-place."""
if not self.nnz:
return # nothing to do
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
# modifies self.indptr and self.indices *in place*
_sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
self.indices, mask)
self.prune()
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
R, C = self.blocksize
M, N = self.shape
# port of _sparsetools.csr_sum_duplicates
n_row = M // R
nnz = 0
row_end = 0
for i in range(n_row):
jj = row_end
row_end = self.indptr[i+1]
while jj < row_end:
j = self.indices[jj]
x = self.data[jj]
jj += 1
while jj < row_end and self.indices[jj] == j:
x += self.data[jj]
jj += 1
self.indices[nnz] = j
self.data[nnz] = x
nnz += 1
self.indptr[i+1] = nnz
self.prune() # nnz may have changed
self.has_canonical_format = True
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
"""Is x of a bsr_matrix type?
Parameters
----------
x
object to check for being a bsr matrix
Returns
-------
bool
True if x is a bsr matrix, False otherwise
Examples
--------
>>> from scipy.sparse import bsr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(bsr_matrix([[5]]))
True
>>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(csr_matrix([[5]]))
False
"""
return isinstance(x, bsr_matrix)
| |
import unittest
from django.core.exceptions import FieldError
from django.db import IntegrityError, connection, transaction
from django.db.models import CharField, Count, F, IntegerField, Max
from django.db.models.functions import Abs, Concat, Lower
from django.test import TestCase
from django.test.utils import register_lookup
from .models import A, B, Bar, D, DataPoint, Foo, RelatedPoint, UniqueNumber
class SimpleTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = A.objects.create()
cls.a2 = A.objects.create()
for x in range(20):
B.objects.create(a=cls.a1)
D.objects.create(a=cls.a1)
def test_nonempty_update(self):
"""
Update changes the right number of rows for a nonempty queryset
"""
num_updated = self.a1.b_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update(self):
"""
Update changes the right number of rows for an empty queryset
"""
num_updated = self.a2.b_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = B.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_nonempty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a1.d_set.update(y=100)
self.assertEqual(num_updated, 20)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 20)
def test_empty_update_with_inheritance(self):
"""
Update changes the right number of rows for an empty queryset
when the update affects only a base table
"""
num_updated = self.a2.d_set.update(y=100)
self.assertEqual(num_updated, 0)
cnt = D.objects.filter(y=100).count()
self.assertEqual(cnt, 0)
def test_foreign_key_update_with_id(self):
"""
Update works using <field>_id for foreign keys
"""
num_updated = self.a1.d_set.update(a_id=self.a2)
self.assertEqual(num_updated, 20)
self.assertEqual(self.a2.d_set.count(), 20)
class AdvancedTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.d0 = DataPoint.objects.create(name="d0", value="apple")
cls.d2 = DataPoint.objects.create(name="d2", value="banana")
cls.d3 = DataPoint.objects.create(name="d3", value="banana")
cls.r1 = RelatedPoint.objects.create(name="r1", data=cls.d3)
def test_update(self):
"""
Objects are updated by first filtering the candidates into a queryset
and then calling the update() method. It executes immediately and
returns nothing.
"""
resp = DataPoint.objects.filter(value="apple").update(name="d1")
self.assertEqual(resp, 1)
resp = DataPoint.objects.filter(value="apple")
self.assertEqual(list(resp), [self.d0])
def test_update_multiple_objects(self):
"""
We can update multiple objects at once.
"""
resp = DataPoint.objects.filter(value='banana').update(value='pineapple')
self.assertEqual(resp, 2)
self.assertEqual(DataPoint.objects.get(name="d2").value, 'pineapple')
def test_update_fk(self):
"""
Foreign key fields can also be updated, although you can only update
the object referred to, not anything inside the related object.
"""
resp = RelatedPoint.objects.filter(name="r1").update(data=self.d0)
self.assertEqual(resp, 1)
resp = RelatedPoint.objects.filter(data__name="d0")
self.assertEqual(list(resp), [self.r1])
def test_update_multiple_fields(self):
"""
Multiple fields can be updated at once
"""
resp = DataPoint.objects.filter(value="apple").update(
value="fruit", another_value="peach")
self.assertEqual(resp, 1)
d = DataPoint.objects.get(name="d0")
self.assertEqual(d.value, 'fruit')
self.assertEqual(d.another_value, 'peach')
def test_update_all(self):
"""
In the rare case you want to update every instance of a model, update()
is also a manager method.
"""
self.assertEqual(DataPoint.objects.update(value='thing'), 3)
resp = DataPoint.objects.values('value').distinct()
self.assertEqual(list(resp), [{'value': 'thing'}])
def test_update_slice_fail(self):
"""
We do not support update on already sliced query sets.
"""
method = DataPoint.objects.all()[:2].update
msg = 'Cannot update a query once a slice has been taken.'
with self.assertRaisesMessage(AssertionError, msg):
method(another_value='another thing')
def test_update_respects_to_field(self):
"""
Update of an FK field which specifies a to_field works.
"""
a_foo = Foo.objects.create(target='aaa')
b_foo = Foo.objects.create(target='bbb')
bar = Bar.objects.create(foo=a_foo)
self.assertEqual(bar.foo_id, a_foo.target)
bar_qs = Bar.objects.filter(pk=bar.pk)
self.assertEqual(bar_qs[0].foo_id, a_foo.target)
bar_qs.update(foo=b_foo)
self.assertEqual(bar_qs[0].foo_id, b_foo.target)
def test_update_m2m_field(self):
msg = (
'Cannot update model field '
'<django.db.models.fields.related.ManyToManyField: m2m_foo> '
'(only non-relations and foreign keys permitted).'
)
with self.assertRaisesMessage(FieldError, msg):
Bar.objects.update(m2m_foo='whatever')
def test_update_transformed_field(self):
A.objects.create(x=5)
A.objects.create(x=-6)
with register_lookup(IntegerField, Abs):
A.objects.update(x=F('x__abs'))
self.assertCountEqual(A.objects.values_list('x', flat=True), [5, 6])
def test_update_annotated_queryset(self):
"""
Update of a queryset that's been annotated.
"""
# Trivial annotated update
qs = DataPoint.objects.annotate(alias=F('value'))
self.assertEqual(qs.update(another_value='foo'), 3)
# Update where annotation is used for filtering
qs = DataPoint.objects.annotate(alias=F('value')).filter(alias='apple')
self.assertEqual(qs.update(another_value='foo'), 1)
# Update where annotation is used in update parameters
qs = DataPoint.objects.annotate(alias=F('value'))
self.assertEqual(qs.update(another_value=F('alias')), 3)
# Update where aggregation annotation is used in update parameters
qs = DataPoint.objects.annotate(max=Max('value'))
msg = (
'Aggregate functions are not allowed in this query '
'(another_value=Max(Col(update_datapoint, update.DataPoint.value))).'
)
with self.assertRaisesMessage(FieldError, msg):
qs.update(another_value=F('max'))
def test_update_annotated_multi_table_queryset(self):
"""
Update of a queryset that's been annotated and involves multiple tables.
"""
# Trivial annotated update
qs = DataPoint.objects.annotate(related_count=Count('relatedpoint'))
self.assertEqual(qs.update(value='Foo'), 3)
# Update where annotation is used for filtering
qs = DataPoint.objects.annotate(related_count=Count('relatedpoint'))
self.assertEqual(qs.filter(related_count=1).update(value='Foo'), 1)
# Update where aggregation annotation is used in update parameters
qs = RelatedPoint.objects.annotate(max=Max('data__value'))
msg = 'Joined field references are not permitted in this query'
with self.assertRaisesMessage(FieldError, msg):
qs.update(name=F('max'))
def test_update_with_joined_field_annotation(self):
msg = 'Joined field references are not permitted in this query'
with register_lookup(CharField, Lower):
for annotation in (
F('data__name'),
F('data__name__lower'),
Lower('data__name'),
Concat('data__name', 'data__value'),
):
with self.subTest(annotation=annotation):
with self.assertRaisesMessage(FieldError, msg):
RelatedPoint.objects.annotate(
new_name=annotation,
).update(name=F('new_name'))
@unittest.skipUnless(
connection.vendor == 'mysql',
'UPDATE...ORDER BY syntax is supported on MySQL/MariaDB',
)
class MySQLUpdateOrderByTest(TestCase):
"""Update field with a unique constraint using an ordered queryset."""
@classmethod
def setUpTestData(cls):
UniqueNumber.objects.create(number=1)
UniqueNumber.objects.create(number=2)
def test_order_by_update_on_unique_constraint(self):
tests = [
('-number', 'id'),
(F('number').desc(), 'id'),
(F('number') * -1, 'id'),
]
for ordering in tests:
with self.subTest(ordering=ordering), transaction.atomic():
updated = UniqueNumber.objects.order_by(*ordering).update(
number=F('number') + 1,
)
self.assertEqual(updated, 2)
def test_order_by_update_on_unique_constraint_annotation(self):
# Ordering by annotations is omitted because they cannot be resolved in
# .update().
with self.assertRaises(IntegrityError):
UniqueNumber.objects.annotate(
number_inverse=F('number').desc(),
).order_by('number_inverse').update(
number=F('number') + 1,
)
| |
""" Abstractions for handling resources via Amazon Web Services (AWS) API
The intention of these utilities is to allow other infrastructure to
interact with AWS without having to understand AWS APIs. Additionally,
this module provides helper functions for the most common queries required
to manipulate and test a DC/OS cluster, which would be otherwise cumbersome
to do with AWS API calls only
BotoWrapper: AWS credentials and region bound to various helper methods
CfStack: Generic representation of a CloudFormation stack
DcosCfStack: Represents DC/OS in a simple deployment
DcosZenCfStack: Represents DC/OS deployed from a zen template
MasterStack: thin wrapper for master stack in a zen template
PrivateAgentStack: thin wrapper for public agent stack in a zen template
PublicAgentStack: thin wrapper for public agent stack in a zen template
BareClusterCfStack: Represents a homogeneous cluster of hosts with a specific AMI
"""
import logging
import time
import boto3
import pkg_resources
import retrying
from botocore.exceptions import ClientError
from test_util.helpers import Host, retry_boto_rate_limits, SshInfo
log = logging.getLogger(__name__)
def template_by_instance_type(instance_type):
if instance_type.split('.')[0] in ('c4', 't2', 'm4'):
template = pkg_resources.resource_string('test_util', 'templates/vpc-ebs-only-cluster-template.json')
else:
template = pkg_resources.resource_string('test_util', 'templates/vpc-cluster-template.json')
return template.decode('utf-8')
def param_dict_to_aws_format(user_parameters):
return [{'ParameterKey': str(k), 'ParameterValue': str(v)} for k, v in user_parameters.items()]
@retry_boto_rate_limits
def instances_to_hosts(instances):
return [Host(i.private_ip_address, i.public_ip_address) for i in instances]
def fetch_stack(stack_name, boto_wrapper):
log.debug('Attemping to fetch AWS Stack: {}'.format(stack_name))
stack = boto_wrapper.resource('cloudformation').Stack(stack_name)
for resource in stack.resource_summaries.all():
if resource.logical_resource_id == 'MasterStack':
log.debug('Using Zen DC/OS Cloudformation interface')
return DcosZenCfStack(stack_name, boto_wrapper)
if resource.logical_resource_id == 'MasterServerGroup':
log.debug('Using Basic DC/OS Cloudformation interface')
return DcosCfStack(stack_name, boto_wrapper)
log.debug('Using VPC Cloudformation interface')
return BareClusterCfStack(stack_name, boto_wrapper)
class BotoWrapper():
def __init__(self, region, aws_access_key_id, aws_secret_access_key):
self.region = region
self.session = boto3.session.Session(
aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def client(self, name):
return self.session.client(service_name=name, region_name=self.region)
def resource(self, name):
return self.session.resource(service_name=name, region_name=self.region)
def create_key_pair(self, key_name):
"""Returns private key of newly generated pair
"""
log.info('Creating KeyPair: {}'.format(key_name))
key = self.client('ec2').create_key_pair(KeyName=key_name)
return key['KeyMaterial']
def delete_key_pair(self, key_name):
log.info('Deleting KeyPair: {}'.format(key_name))
self.resource('ec2').KeyPair(key_name).delete()
def create_stack(self, name, parameters, template_url=None, template_body=None, deploy_timeout=60):
"""Pulls template and checks user params versus temlate params.
Does simple casting of strings or numbers
Starts stack creation if validation is successful
"""
log.info('Requesting AWS CloudFormation: {}'.format(name))
args = {
'StackName': name,
'DisableRollback': True,
'TimeoutInMinutes': deploy_timeout,
'Capabilities': ['CAPABILITY_IAM'],
# this python API only accepts data in string format; cast as string here
# so that we may pass parameters directly from yaml (which parses numbers as non-strings)
'Parameters': param_dict_to_aws_format(parameters)}
if template_body is not None:
assert template_url is None, 'tempate_body and template_url cannot be supplied simultaneously'
args['TemplateBody'] = template_body
else:
assert template_url is not None, 'template_url must be set if template_body is not provided'
args['TemplateURL'] = template_url
return self.resource('cloudformation').create_stack(**args)
def create_vpc_tagged(self, cidr, name_tag):
ec2 = self.client('ec2')
log.info('Creating new VPC...')
vpc_id = ec2.create_vpc(CidrBlock=cidr, InstanceTenancy='default')['Vpc']['VpcId']
ec2.get_waiter('vpc_available').wait(VpcIds=[vpc_id])
ec2.create_tags(Resources=[vpc_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
log.info('Created VPC with ID: {}'.format(vpc_id))
return vpc_id
def create_internet_gateway_tagged(self, vpc_id, name_tag):
ec2 = self.client('ec2')
log.info('Creating new InternetGateway...')
gateway_id = ec2.create_internet_gateway()['InternetGateway']['InternetGatewayId']
ec2.attach_internet_gateway(InternetGatewayId=gateway_id, VpcId=vpc_id)
ec2.create_tags(Resources=[gateway_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
log.info('Created internet gateway with ID: {}'.format(gateway_id))
return gateway_id
def create_subnet_tagged(self, vpc_id, cidr, name_tag):
ec2 = self.client('ec2')
log.info('Creating new Subnet...')
subnet_id = ec2.create_subnet(VpcId=vpc_id, CidrBlock=cidr)['Subnet']['SubnetId']
ec2.create_tags(Resources=[subnet_id], Tags=[{'Key': 'Name', 'Value': name_tag}])
ec2.get_waiter('subnet_available').wait(SubnetIds=[subnet_id])
log.info('Created subnet with ID: {}'.format(subnet_id))
return subnet_id
def delete_subnet(self, subnet_id):
log.info('Deleting subnet: {}'.format(subnet_id))
self.client('ec2').delete_subnet(SubnetId=subnet_id)
def delete_internet_gateway(self, gateway_id):
ig = self.resource('ec2').InternetGateway(gateway_id)
for vpc in ig.attachments:
vpc_id = vpc['VpcId']
log.info('Detaching gateway {} from vpc {}'.format(gateway_id, vpc_id))
ig.detach_from_vpc(VpcId=vpc_id)
log.info('Deleting internet gateway: {}'.format(gateway_id))
ig.delete()
def delete_vpc(self, vpc_id):
log.info('Deleting vpc: {}'.format(vpc_id))
self.client('ec2').delete_vpc(VpcId=vpc_id)
@retry_boto_rate_limits
def get_auto_scaling_instances(self, asg_physical_resource_id):
""" Returns instance objects as described here:
http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#instance
"""
ec2 = self.resource('ec2')
return [ec2.Instance(i['InstanceId']) for asg in self.client('autoscaling').
describe_auto_scaling_groups(
AutoScalingGroupNames=[asg_physical_resource_id])
['AutoScalingGroups'] for i in asg['Instances']]
class CfStack:
def __init__(self, stack_name, boto_wrapper):
self.boto_wrapper = boto_wrapper
self.stack = self.boto_wrapper.resource('cloudformation').Stack(stack_name)
def wait_for_status_change(self, state_1, state_2, wait_before_poll_min, timeout=60 * 60):
"""
Note: Do not use unwrapped boto waiter class, it has very poor error handling
Stacks can have one of the following statuses. See:
http://boto3.readthedocs.io/en/latest/reference/
services/cloudformation.html#CloudFormation.Client.describe_stacks
CREATE_IN_PROGRESS, CREATE_FAILED, CREATE_COMPLETE
ROLLBACK_IN_PROGRESS, ROLLBACK_FAILED, ROLLBACK_COMPLETE
DELETE_IN_PROGRESS, DELETE_FAILED, DELETE_COMPLETE
UPDATE_IN_PROGRESS, UPDATE_COMPLETE_CLEANUP_IN_PROGRESS
UPDATE_COMPLETE, UPDATE_ROLLBACK_IN_PROGRESS
UPDATE_ROLLBACK_FAILED, UPDATE_ROLLBACK_COMPLETE
UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS
"""
log.info('Waiting for status to change from {} to {}'.format(state_1, state_2))
log.info('Sleeping for {} minutes before polling'.format(wait_before_poll_min))
time.sleep(60 * wait_before_poll_min)
@retrying.retry(wait_fixed=10 * 1000,
stop_max_delay=timeout * 1000,
retry_on_result=lambda res: res is False,
retry_on_exception=lambda ex: False)
def wait_loop():
stack_details = self.get_stack_details()
stack_status = stack_details['StackStatus']
if stack_status == state_2:
return True
if stack_status != state_1:
log.error('Stack Details: {}'.format(stack_details))
for event in self.get_stack_events():
log.error('Stack Events: {}'.format(event))
raise Exception('StackStatus changed unexpectedly to: {}'.format(stack_status))
log.info('Continuing to wait...')
return False
wait_loop()
def wait_for_complete(self, wait_before_poll_min=0):
status = self.get_stack_details()['StackStatus']
if status.endswith('_COMPLETE'):
return
elif status.endswith('_IN_PROGRESS'):
self.wait_for_status_change(
status, status.replace('IN_PROGRESS', 'COMPLETE'),
wait_before_poll_min)
else:
raise Exception('AWS Stack has entered unexpected state: {}'.format(status))
@retry_boto_rate_limits
def get_stack_details(self):
details = self.boto_wrapper.client('cloudformation').describe_stacks(
StackName=self.stack.stack_id)['Stacks'][0]
log.debug('Stack details: {}'.format(details))
return details
@retry_boto_rate_limits
def get_stack_events(self):
log.debug('Requesting stack events')
return self.boto_wrapper.client('cloudformation').describe_stack_events(
StackName=self.stack.stack_id)['StackEvents']
def get_parameter(self, param):
"""Returns param if in stack parameters, else returns None
"""
for p in self.stack.parameters:
if p['ParameterKey'] == param:
return p['ParameterValue']
raise KeyError('Key not found in template parameters: {}. Parameters: {}'.
format(param, self.stack.parameters))
def delete(self):
stack_id = self.stack.stack_id
log.info('Deleting stack: {}'.format(stack_id))
# boto stacks become unusable after deletion (e.g. status/info checks) if name-based
self.stack = self.boto_wrapper.resource('cloudformation').Stack(stack_id)
self.stack.delete()
log.info('Delete successfully initiated for {}'.format(stack_id))
class CleanupS3BucketMixin:
def delete_exhibitor_s3_bucket(self):
""" A non-empty S3 bucket cannot be deleted, so check to
see if it should be emptied first. If its non-empty, but
has more than one item, error out as the bucket is perhaps
not an exhibitor bucket and the user should be alerted
"""
try:
bucket = self.boto_wrapper.resource('s3').Bucket(
self.stack.Resource('ExhibitorS3Bucket').physical_resource_id)
except ClientError:
log.exception('Bucket could not be fetched')
log.warning('S3 bucket not found when expected during delete, moving on...')
return
log.info('Starting bucket {} deletion'.format(bucket))
all_objects = list(bucket.objects.all())
obj_count = len(all_objects)
if obj_count == 1:
all_objects[0].delete()
elif obj_count > 1:
raise Exception('Expected on item in Exhibitor S3 bucket but found: ' + obj_count)
log.info('Trying deleting bucket {} itself'.format(bucket))
bucket.delete()
def delete(self):
self.delete_exhibitor_s3_bucket()
super().delete()
class DcosCfStack(CleanupS3BucketMixin, CfStack):
""" This abstraction will work for a simple DC/OS template.
A simple template has its exhibitor bucket and auto scaling groups
for each of the master, public agent, and private agent groups
"""
@classmethod
def create(cls, stack_name: str, template_url: str, public_agents: int, private_agents: int,
admin_location: str, key_pair_name: str, boto_wrapper: BotoWrapper):
parameters = {
'KeyName': key_pair_name,
'AdminLocation': admin_location,
'PublicSlaveInstanceCount': str(public_agents),
'SlaveInstanceCount': str(private_agents)}
stack = boto_wrapper.create_stack(stack_name, parameters, template_url=template_url)
# Use stack_name as the binding identifier. At time of implementation,
# stack.stack_name returns stack_id if Stack was created with ID
return cls(stack.stack_id, boto_wrapper), SSH_INFO['coreos']
@property
def master_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('MasterServerGroup').physical_resource_id)
@property
def private_agent_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('SlaveServerGroup').physical_resource_id)
@property
def public_agent_instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PublicSlaveServerGroup').physical_resource_id)
def get_master_ips(self):
return instances_to_hosts(self.master_instances)
def get_private_agent_ips(self):
return instances_to_hosts(self.private_agent_instances)
def get_public_agent_ips(self):
return instances_to_hosts(self.public_agent_instances)
class MasterStack(CleanupS3BucketMixin, CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('MasterServerGroup').physical_resource_id)
class PrivateAgentStack(CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PrivateAgentServerGroup').physical_resource_id)
class PublicAgentStack(CfStack):
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('PublicAgentServerGroup').physical_resource_id)
class DcosZenCfStack(CfStack):
"""Zen stacks are stacks that have the masters, infra, public agents, and private
agents split into resources stacks under one zen stack
"""
@classmethod
def create(cls, stack_name, boto_wrapper, template_url,
public_agents, private_agents, key_pair_name,
private_agent_type, public_agent_type, master_type,
gateway, vpc, private_subnet, public_subnet):
parameters = {
'KeyName': key_pair_name,
'Vpc': vpc,
'InternetGateway': gateway,
'MasterInstanceType': master_type,
'PublicAgentInstanceCount': public_agents,
'PublicAgentInstanceType': public_agent_type,
'PublicSubnet': public_subnet,
'PrivateAgentInstanceCount': private_agents,
'PrivateAgentInstanceType': private_agent_type,
'PrivateSubnet': private_subnet}
stack = boto_wrapper.create_stack(stack_name, parameters, template_url=template_url)
os_string = None
try:
os_string = template_url.split('/')[-1].split('.')[-2].split('-')[0]
ssh_info = CF_OS_SSH_INFO[os_string]
except (KeyError, IndexError):
log.critical('Unexpected template URL: {}'.format(template_url))
if os_string is not None:
log.critical('No SSH info for OS string: {}'.format(os_string))
raise
return cls(stack.stack_id, boto_wrapper), ssh_info
@property
def master_stack(self):
return MasterStack(
self.stack.Resource('MasterStack').physical_resource_id, self.boto_wrapper)
@property
def private_agent_stack(self):
return PrivateAgentStack(
self.stack.Resource('PrivateAgentStack').physical_resource_id, self.boto_wrapper)
@property
def public_agent_stack(self):
return PublicAgentStack(
self.stack.Resource('PublicAgentStack').physical_resource_id, self.boto_wrapper)
@property
def infrastructure(self):
return CfStack(self.stack.Resource('Infrastructure').physical_resource_id, self.boto_wrapper)
def delete(self):
log.info('Starting deletion of Zen CF stack')
# boto stacks become unusable after deletion (e.g. status/info checks) if name-based
self.stack = self.boto_wrapper.resource('cloudformation').Stack(self.stack.stack_id)
# These resources might have failed to create or been removed prior, except their
# failures and log it out
for s in [self.infrastructure, self.master_stack, self.private_agent_stack,
self.public_agent_stack]:
try:
s.delete()
except:
log.exception('Delete encountered an error!')
super().delete()
@property
def master_instances(self):
yield from self.master_stack.instances
@property
def private_agent_instances(self):
yield from self.private_agent_stack.instances
@property
def public_agent_instances(self):
yield from self.public_agent_stack.instances
def get_master_ips(self):
return instances_to_hosts(self.master_instances)
def get_private_agent_ips(self):
return instances_to_hosts(self.private_agent_instances)
def get_public_agent_ips(self):
return instances_to_hosts(self.public_agent_instances)
class BareClusterCfStack(CfStack):
@classmethod
def create(cls, stack_name, instance_type, instance_os, instance_count,
admin_location, key_pair_name, boto_wrapper):
stack = cls.create_from_ami(
stack_name,
instance_type,
OS_AMIS[instance_os][boto_wrapper.region],
instance_count,
admin_location,
key_pair_name,
boto_wrapper,
)
return stack, OS_SSH_INFO[instance_os]
@classmethod
def create_from_ami(cls, stack_name, instance_type, instance_ami, instance_count,
admin_location, key_pair_name, boto_wrapper):
template = template_by_instance_type(instance_type)
parameters = {
'KeyPair': key_pair_name,
'AllowAccessFrom': admin_location,
'ClusterSize': instance_count,
'InstanceType': instance_type,
'AmiCode': instance_ami,
}
stack = boto_wrapper.create_stack(stack_name, parameters, template_body=template)
return cls(stack.stack_id, boto_wrapper)
def delete(self):
# boto stacks become unusable after deletion (e.g. status/info checks) if name-based
self.stack = self.boto_wrapper.resource('cloudformation').Stack(self.stack.stack_id)
self.stack.delete()
@property
def instances(self):
yield from self.boto_wrapper.get_auto_scaling_instances(
self.stack.Resource('BareServerAutoScale').physical_resource_id)
def get_host_ips(self):
return instances_to_hosts(self.instances)
SSH_INFO = {
'centos': SshInfo(
user='centos',
home_dir='/home/centos',
),
'coreos': SshInfo(
user='core',
home_dir='/home/core',
),
'debian': SshInfo(
user='admin',
home_dir='/home/admin',
),
'rhel': SshInfo(
user='ec2-user',
home_dir='/home/ec2-user',
),
'ubuntu': SshInfo(
user='ubuntu',
home_dir='/home/ubuntu',
),
}
OS_SSH_INFO = {
'cent-os-7': SSH_INFO['centos'],
'cent-os-7-dcos-prereqs': SSH_INFO['centos'],
'coreos': SSH_INFO['coreos'],
'debian-8': SSH_INFO['debian'],
'rhel-7': SSH_INFO['rhel'],
'ubuntu-16-04': SSH_INFO['ubuntu'],
}
CF_OS_SSH_INFO = {
'el7': SSH_INFO['centos'],
'coreos': SSH_INFO['coreos']
}
OS_AMIS = {
'cent-os-7': {'ap-northeast-1': 'ami-965345f8',
'ap-southeast-1': 'ami-332de750',
'ap-southeast-2': 'ami-c80320ab',
'eu-central-1': 'ami-1548ae7a',
'eu-west-1': 'ami-2ea92f5d',
'sa-east-1': 'ami-2921ad45',
'us-east-1': 'ami-fa9b9390',
'us-west-1': 'ami-12b3ce72',
'us-west-2': 'ami-edf11b8d'},
'cent-os-7-dcos-prereqs': {'ap-northeast-1': 'ami-5942133e',
'ap-southeast-1': 'ami-83ea59e0',
'ap-southeast-2': 'ami-7f393b1c',
'eu-central-1': 'ami-9e13c7f1',
'eu-west-1': 'ami-41b89327',
'sa-east-1': 'ami-6d600101',
'us-east-1': 'ami-84862092',
'us-west-1': 'ami-794f1619',
'us-west-2': 'ami-4953df29'},
'coreos': {'ap-northeast-1': 'ami-84e0c7ea',
'ap-southeast-1': 'ami-84e0c7ea',
'ap-southeast-2': 'ami-f35b0590',
'eu-central-1': 'ami-fdd4c791',
'eu-west-1': 'ami-55d20b26',
'sa-east-1': 'ami-f35b0590',
'us-east-1': 'ami-37bdc15d',
'us-west-1': 'ami-27553a47',
'us-west-2': 'ami-00ebfc61'},
'debian-8': {'ap-northeast-1': 'ami-fe54f3fe',
'ap-southeast-1': 'ami-60989c32',
'ap-southeast-2': 'ami-07e3993d',
'eu-central-1': 'ami-b092aaad',
'eu-west-1': 'ami-0ed89d79',
'sa-east-1': 'ami-a5bd3fb8',
'us-east-1': 'ami-8b9a63e0',
'us-west-1': 'ami-a5d621e1',
'us-west-2': 'ami-3d56520d'},
'rhel-7': {'ap-northeast-1': 'ami-35556534',
'ap-southeast-1': 'ami-941031c6',
'ap-southeast-2': 'ami-83e08db9',
'eu-central-1': 'ami-e25e6cff',
'eu-west-1': 'ami-8cff51fb',
'sa-east-1': 'ami-595ce844',
'us-east-1': 'ami-a8d369c0',
'us-west-1': 'ami-33cdd876',
'us-west-2': 'ami-99bef1a9'},
'ubuntu-16-04': {'ap-northeast-1': 'ami-0919cd68',
'ap-southeast-1': 'ami-42934921',
'ap-southeast-2': 'ami-623c0d01',
'eu-central-1': 'ami-a9a557c6',
'eu-west-1': 'ami-643d4217',
'sa-east-1': 'ami-60bd2d0c',
'us-east-1': 'ami-2ef48339',
'us-west-1': 'ami-a9a8e4c9',
'us-west-2': 'ami-746aba14'}
}
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
from openstackclient.identity.v3 import role_assignment
from openstackclient.tests import fakes
from openstackclient.tests.identity.v3 import fakes as identity_fakes
class TestRoleAssignment(identity_fakes.TestIdentityv3):
def setUp(self):
super(TestRoleAssignment, self).setUp()
class TestRoleAssignmentList(TestRoleAssignment):
def setUp(self):
super(TestRoleAssignment, self).setUp()
# Get a shortcut to the UserManager Mock
self.users_mock = self.app.client_manager.identity.users
self.users_mock.reset_mock()
# Get a shortcut to the GroupManager Mock
self.groups_mock = self.app.client_manager.identity.groups
self.groups_mock.reset_mock()
# Get a shortcut to the DomainManager Mock
self.domains_mock = self.app.client_manager.identity.domains
self.domains_mock.reset_mock()
# Get a shortcut to the ProjectManager Mock
self.projects_mock = self.app.client_manager.identity.projects
self.projects_mock.reset_mock()
# Get a shortcut to the RoleManager Mock
self.roles_mock = self.app.client_manager.identity.roles
self.roles_mock.reset_mock()
self.role_assignments_mock = self.app.client_manager.identity.\
role_assignments
self.role_assignments_mock.reset_mock()
# Get the command object to test
self.cmd = role_assignment.ListRoleAssignment(self.app, None)
def test_role_assignment_list_no_filters(self):
self.role_assignments_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID),
loaded=True,
),
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID),
loaded=True,
),
]
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.role_assignments_mock.list.assert_called_with(
domain=None,
group=None,
effective=False,
role=None,
user=None,
project=None)
collist = ('Role', 'User', 'Group', 'Project', 'Domain')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.role_id,
identity_fakes.user_id,
'',
identity_fakes.project_id,
''
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
identity_fakes.project_id,
''
),)
self.assertEqual(tuple(data), datalist)
def test_role_assignment_list_user(self):
self.role_assignments_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID),
loaded=True,
),
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID),
loaded=True,
),
]
arglist = [
'--user', identity_fakes.user_name
]
verifylist = [
('user', identity_fakes.user_name),
('group', None),
('domain', None),
('project', None),
('role', None),
('effective', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.role_assignments_mock.list.assert_called_with(
domain=None,
user=self.users_mock.get(),
group=None,
project=None,
role=None,
effective=False)
collist = ('Role', 'User', 'Group', 'Project', 'Domain')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.role_id,
identity_fakes.user_id,
'',
'',
identity_fakes.domain_id
), (identity_fakes.role_id,
identity_fakes.user_id,
'',
identity_fakes.project_id,
''
),)
self.assertEqual(tuple(data), datalist)
def test_role_assignment_list_group(self):
self.role_assignments_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_GROUP_ID),
loaded=True,
),
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID),
loaded=True,
),
]
arglist = [
'--group', identity_fakes.group_name
]
verifylist = [
('user', None),
('group', identity_fakes.group_name),
('domain', None),
('project', None),
('role', None),
('effective', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.role_assignments_mock.list.assert_called_with(
domain=None,
group=self.groups_mock.get(),
effective=False,
project=None,
role=None,
user=None)
collist = ('Role', 'User', 'Group', 'Project', 'Domain')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.role_id,
'',
identity_fakes.group_id,
'',
identity_fakes.domain_id
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
identity_fakes.project_id,
''
),)
self.assertEqual(tuple(data), datalist)
def test_role_assignment_list_domain(self):
self.role_assignments_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID),
loaded=True,
),
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_GROUP_ID),
loaded=True,
),
]
arglist = [
'--domain', identity_fakes.domain_name
]
verifylist = [
('user', None),
('group', None),
('domain', identity_fakes.domain_name),
('project', None),
('role', None),
('effective', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.role_assignments_mock.list.assert_called_with(
domain=self.domains_mock.get(),
group=None,
effective=False,
project=None,
role=None,
user=None)
collist = ('Role', 'User', 'Group', 'Project', 'Domain')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.role_id,
identity_fakes.user_id,
'',
'',
identity_fakes.domain_id
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
'',
identity_fakes.domain_id
),)
self.assertEqual(tuple(data), datalist)
def test_role_assignment_list_project(self):
self.role_assignments_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID),
loaded=True,
),
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_GROUP_ID),
loaded=True,
),
]
arglist = [
'--project', identity_fakes.project_name
]
verifylist = [
('user', None),
('group', None),
('domain', None),
('project', identity_fakes.project_name),
('role', None),
('effective', False),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.role_assignments_mock.list.assert_called_with(
domain=None,
group=None,
effective=False,
project=self.projects_mock.get(),
role=None,
user=None)
collist = ('Role', 'User', 'Group', 'Project', 'Domain')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.role_id,
identity_fakes.user_id,
'',
identity_fakes.project_id,
''
), (identity_fakes.role_id,
'',
identity_fakes.group_id,
identity_fakes.project_id,
''
),)
self.assertEqual(tuple(data), datalist)
def test_role_assignment_list_effective(self):
self.role_assignments_mock.list.return_value = [
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_PROJECT_ID_AND_USER_ID),
loaded=True,
),
fakes.FakeResource(
None,
copy.deepcopy(
identity_fakes.ASSIGNMENT_WITH_DOMAIN_ID_AND_USER_ID),
loaded=True,
),
]
arglist = ['--effective']
verifylist = [
('user', None),
('group', None),
('domain', None),
('project', None),
('role', None),
('effective', True),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.role_assignments_mock.list.assert_called_with(
domain=None,
group=None,
effective=True,
project=None,
role=None,
user=None)
collist = ('Role', 'User', 'Group', 'Project', 'Domain')
self.assertEqual(columns, collist)
datalist = ((
identity_fakes.role_id,
identity_fakes.user_id,
'',
identity_fakes.project_id,
''
), (identity_fakes.role_id,
identity_fakes.user_id,
'',
'',
identity_fakes.domain_id,
),)
self.assertEqual(tuple(data), datalist)
| |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import datetime
from enum import Enum
import six
from cryptography import utils
from cryptography.hazmat.primitives.asymmetric import dsa, ec, rsa
from cryptography.x509.extensions import Extension, ExtensionType
from cryptography.x509.name import Name
_UNIX_EPOCH = datetime.datetime(1970, 1, 1)
class Version(Enum):
v1 = 0
v3 = 2
def load_pem_x509_certificate(data, backend):
return backend.load_pem_x509_certificate(data)
def load_der_x509_certificate(data, backend):
return backend.load_der_x509_certificate(data)
def load_pem_x509_csr(data, backend):
return backend.load_pem_x509_csr(data)
def load_der_x509_csr(data, backend):
return backend.load_der_x509_csr(data)
def load_pem_x509_crl(data, backend):
return backend.load_pem_x509_crl(data)
def load_der_x509_crl(data, backend):
return backend.load_der_x509_crl(data)
class InvalidVersion(Exception):
def __init__(self, msg, parsed_version):
super(InvalidVersion, self).__init__(msg)
self.parsed_version = parsed_version
@six.add_metaclass(abc.ABCMeta)
class Certificate(object):
@abc.abstractmethod
def fingerprint(self, algorithm):
"""
Returns bytes using digest passed.
"""
@abc.abstractproperty
def serial_number(self):
"""
Returns certificate serial number
"""
@abc.abstractproperty
def version(self):
"""
Returns the certificate version
"""
@abc.abstractmethod
def public_key(self):
"""
Returns the public key
"""
@abc.abstractproperty
def not_valid_before(self):
"""
Not before time (represented as UTC datetime)
"""
@abc.abstractproperty
def not_valid_after(self):
"""
Not after time (represented as UTC datetime)
"""
@abc.abstractproperty
def issuer(self):
"""
Returns the issuer name object.
"""
@abc.abstractproperty
def subject(self):
"""
Returns the subject name object.
"""
@abc.abstractproperty
def signature_hash_algorithm(self):
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def extensions(self):
"""
Returns an Extensions object.
"""
@abc.abstractproperty
def signature(self):
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certificate_bytes(self):
"""
Returns the tbsCertificate payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Checks not equal.
"""
@abc.abstractmethod
def __hash__(self):
"""
Computes a hash.
"""
@abc.abstractmethod
def public_bytes(self, encoding):
"""
Serializes the certificate to PEM or DER format.
"""
@six.add_metaclass(abc.ABCMeta)
class CertificateRevocationList(object):
@abc.abstractmethod
def public_bytes(self, encoding):
"""
Serializes the CRL to PEM or DER format.
"""
@abc.abstractmethod
def fingerprint(self, algorithm):
"""
Returns bytes using digest passed.
"""
@abc.abstractproperty
def signature_hash_algorithm(self):
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def issuer(self):
"""
Returns the X509Name with the issuer of this CRL.
"""
@abc.abstractproperty
def next_update(self):
"""
Returns the date of next update for this CRL.
"""
@abc.abstractproperty
def last_update(self):
"""
Returns the date of last update for this CRL.
"""
@abc.abstractproperty
def extensions(self):
"""
Returns an Extensions object containing a list of CRL extensions.
"""
@abc.abstractproperty
def signature(self):
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certlist_bytes(self):
"""
Returns the tbsCertList payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Checks not equal.
"""
@six.add_metaclass(abc.ABCMeta)
class CertificateSigningRequest(object):
@abc.abstractmethod
def __eq__(self, other):
"""
Checks equality.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Checks not equal.
"""
@abc.abstractmethod
def __hash__(self):
"""
Computes a hash.
"""
@abc.abstractmethod
def public_key(self):
"""
Returns the public key
"""
@abc.abstractproperty
def subject(self):
"""
Returns the subject name object.
"""
@abc.abstractproperty
def signature_hash_algorithm(self):
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@abc.abstractproperty
def extensions(self):
"""
Returns the extensions in the signing request.
"""
@abc.abstractmethod
def public_bytes(self, encoding):
"""
Encodes the request to PEM or DER format.
"""
@abc.abstractproperty
def signature(self):
"""
Returns the signature bytes.
"""
@abc.abstractproperty
def tbs_certrequest_bytes(self):
"""
Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC
2986.
"""
@abc.abstractproperty
def is_signature_valid(self):
"""
Verifies signature of signing request.
"""
@six.add_metaclass(abc.ABCMeta)
class RevokedCertificate(object):
@abc.abstractproperty
def serial_number(self):
"""
Returns the serial number of the revoked certificate.
"""
@abc.abstractproperty
def revocation_date(self):
"""
Returns the date of when this certificate was revoked.
"""
@abc.abstractproperty
def extensions(self):
"""
Returns an Extensions object containing a list of Revoked extensions.
"""
class CertificateSigningRequestBuilder(object):
def __init__(self, subject_name=None, extensions=[]):
"""
Creates an empty X.509 certificate request (v1).
"""
self._subject_name = subject_name
self._extensions = extensions
def subject_name(self, name):
"""
Sets the certificate requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError('Expecting x509.Name object.')
if self._subject_name is not None:
raise ValueError('The subject name may only be set once.')
return CertificateSigningRequestBuilder(name, self._extensions)
def add_extension(self, extension, critical):
"""
Adds an X.509 extension to the certificate request.
"""
if not isinstance(extension, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extension.oid, critical, extension)
# TODO: This is quadratic in the number of extensions
for e in self._extensions:
if e.oid == extension.oid:
raise ValueError('This extension has already been set.')
return CertificateSigningRequestBuilder(
self._subject_name, self._extensions + [extension]
)
def sign(self, private_key, algorithm, backend):
"""
Signs the request using the requestor's private key.
"""
if self._subject_name is None:
raise ValueError("A CertificateSigningRequest must have a subject")
return backend.create_x509_csr(self, private_key, algorithm)
class CertificateBuilder(object):
def __init__(self, issuer_name=None, subject_name=None,
public_key=None, serial_number=None, not_valid_before=None,
not_valid_after=None, extensions=[]):
self._version = Version.v3
self._issuer_name = issuer_name
self._subject_name = subject_name
self._public_key = public_key
self._serial_number = serial_number
self._not_valid_before = not_valid_before
self._not_valid_after = not_valid_after
self._extensions = extensions
def issuer_name(self, name):
"""
Sets the CA's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError('Expecting x509.Name object.')
if self._issuer_name is not None:
raise ValueError('The issuer name may only be set once.')
return CertificateBuilder(
name, self._subject_name, self._public_key,
self._serial_number, self._not_valid_before,
self._not_valid_after, self._extensions
)
def subject_name(self, name):
"""
Sets the requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError('Expecting x509.Name object.')
if self._subject_name is not None:
raise ValueError('The subject name may only be set once.')
return CertificateBuilder(
self._issuer_name, name, self._public_key,
self._serial_number, self._not_valid_before,
self._not_valid_after, self._extensions
)
def public_key(self, key):
"""
Sets the requestor's public key (as found in the signing request).
"""
if not isinstance(key, (dsa.DSAPublicKey, rsa.RSAPublicKey,
ec.EllipticCurvePublicKey)):
raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey,'
' or EllipticCurvePublicKey.')
if self._public_key is not None:
raise ValueError('The public key may only be set once.')
return CertificateBuilder(
self._issuer_name, self._subject_name, key,
self._serial_number, self._not_valid_before,
self._not_valid_after, self._extensions
)
def serial_number(self, number):
"""
Sets the certificate serial number.
"""
if not isinstance(number, six.integer_types):
raise TypeError('Serial number must be of integral type.')
if self._serial_number is not None:
raise ValueError('The serial number may only be set once.')
if number < 0:
raise ValueError('The serial number should be non-negative.')
if utils.bit_length(number) > 160: # As defined in RFC 5280
raise ValueError('The serial number should not be more than 160 '
'bits.')
return CertificateBuilder(
self._issuer_name, self._subject_name,
self._public_key, number, self._not_valid_before,
self._not_valid_after, self._extensions
)
def not_valid_before(self, time):
"""
Sets the certificate activation time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError('Expecting datetime object.')
if self._not_valid_before is not None:
raise ValueError('The not valid before may only be set once.')
if time <= _UNIX_EPOCH:
raise ValueError('The not valid before date must be after the unix'
' epoch (1970 January 1).')
if self._not_valid_after is not None and time > self._not_valid_after:
raise ValueError(
'The not valid before date must be before the not valid after '
'date.'
)
return CertificateBuilder(
self._issuer_name, self._subject_name,
self._public_key, self._serial_number, time,
self._not_valid_after, self._extensions
)
def not_valid_after(self, time):
"""
Sets the certificate expiration time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError('Expecting datetime object.')
if self._not_valid_after is not None:
raise ValueError('The not valid after may only be set once.')
if time <= _UNIX_EPOCH:
raise ValueError('The not valid after date must be after the unix'
' epoch (1970 January 1).')
if (self._not_valid_before is not None and
time < self._not_valid_before):
raise ValueError(
'The not valid after date must be after the not valid before '
'date.'
)
return CertificateBuilder(
self._issuer_name, self._subject_name,
self._public_key, self._serial_number, self._not_valid_before,
time, self._extensions
)
def add_extension(self, extension, critical):
"""
Adds an X.509 extension to the certificate.
"""
if not isinstance(extension, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extension.oid, critical, extension)
# TODO: This is quadratic in the number of extensions
for e in self._extensions:
if e.oid == extension.oid:
raise ValueError('This extension has already been set.')
return CertificateBuilder(
self._issuer_name, self._subject_name,
self._public_key, self._serial_number, self._not_valid_before,
self._not_valid_after, self._extensions + [extension]
)
def sign(self, private_key, algorithm, backend):
"""
Signs the certificate using the CA's private key.
"""
if self._subject_name is None:
raise ValueError("A certificate must have a subject name")
if self._issuer_name is None:
raise ValueError("A certificate must have an issuer name")
if self._serial_number is None:
raise ValueError("A certificate must have a serial number")
if self._not_valid_before is None:
raise ValueError("A certificate must have a not valid before time")
if self._not_valid_after is None:
raise ValueError("A certificate must have a not valid after time")
if self._public_key is None:
raise ValueError("A certificate must have a public key")
return backend.create_x509_certificate(self, private_key, algorithm)
class CertificateRevocationListBuilder(object):
def __init__(self, issuer_name=None, last_update=None, next_update=None,
extensions=[], revoked_certificates=[]):
self._issuer_name = issuer_name
self._last_update = last_update
self._next_update = next_update
self._extensions = extensions
self._revoked_certificates = revoked_certificates
def issuer_name(self, issuer_name):
if not isinstance(issuer_name, Name):
raise TypeError('Expecting x509.Name object.')
if self._issuer_name is not None:
raise ValueError('The issuer name may only be set once.')
return CertificateRevocationListBuilder(
issuer_name, self._last_update, self._next_update,
self._extensions, self._revoked_certificates
)
def last_update(self, last_update):
if not isinstance(last_update, datetime.datetime):
raise TypeError('Expecting datetime object.')
if self._last_update is not None:
raise ValueError('Last update may only be set once.')
if last_update <= _UNIX_EPOCH:
raise ValueError('The last update date must be after the unix'
' epoch (1970 January 1).')
if self._next_update is not None and last_update > self._next_update:
raise ValueError(
'The last update date must be before the next update date.'
)
return CertificateRevocationListBuilder(
self._issuer_name, last_update, self._next_update,
self._extensions, self._revoked_certificates
)
def next_update(self, next_update):
if not isinstance(next_update, datetime.datetime):
raise TypeError('Expecting datetime object.')
if self._next_update is not None:
raise ValueError('Last update may only be set once.')
if next_update <= _UNIX_EPOCH:
raise ValueError('The last update date must be after the unix'
' epoch (1970 January 1).')
if self._last_update is not None and next_update < self._last_update:
raise ValueError(
'The next update date must be after the last update date.'
)
return CertificateRevocationListBuilder(
self._issuer_name, self._last_update, next_update,
self._extensions, self._revoked_certificates
)
def add_extension(self, extension, critical):
"""
Adds an X.509 extension to the certificate revocation list.
"""
if not isinstance(extension, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extension.oid, critical, extension)
# TODO: This is quadratic in the number of extensions
for e in self._extensions:
if e.oid == extension.oid:
raise ValueError('This extension has already been set.')
return CertificateRevocationListBuilder(
self._issuer_name, self._last_update, self._next_update,
self._extensions + [extension], self._revoked_certificates
)
def add_revoked_certificate(self, revoked_certificate):
"""
Adds a revoked certificate to the CRL.
"""
if not isinstance(revoked_certificate, RevokedCertificate):
raise TypeError("Must be an instance of RevokedCertificate")
return CertificateRevocationListBuilder(
self._issuer_name, self._last_update,
self._next_update, self._extensions,
self._revoked_certificates + [revoked_certificate]
)
def sign(self, private_key, algorithm, backend):
if self._issuer_name is None:
raise ValueError("A CRL must have an issuer name")
if self._last_update is None:
raise ValueError("A CRL must have a last update time")
if self._next_update is None:
raise ValueError("A CRL must have a next update time")
return backend.create_x509_crl(self, private_key, algorithm)
class RevokedCertificateBuilder(object):
def __init__(self, serial_number=None, revocation_date=None,
extensions=[]):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
def serial_number(self, number):
if not isinstance(number, six.integer_types):
raise TypeError('Serial number must be of integral type.')
if self._serial_number is not None:
raise ValueError('The serial number may only be set once.')
if number < 0:
raise ValueError('The serial number should be non-negative.')
if utils.bit_length(number) > 160: # As defined in RFC 5280
raise ValueError('The serial number should not be more than 160 '
'bits.')
return RevokedCertificateBuilder(
number, self._revocation_date, self._extensions
)
def revocation_date(self, time):
if not isinstance(time, datetime.datetime):
raise TypeError('Expecting datetime object.')
if self._revocation_date is not None:
raise ValueError('The revocation date may only be set once.')
if time <= _UNIX_EPOCH:
raise ValueError('The revocation date must be after the unix'
' epoch (1970 January 1).')
return RevokedCertificateBuilder(
self._serial_number, time, self._extensions
)
def add_extension(self, extension, critical):
if not isinstance(extension, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extension.oid, critical, extension)
# TODO: This is quadratic in the number of extensions
for e in self._extensions:
if e.oid == extension.oid:
raise ValueError('This extension has already been set.')
return RevokedCertificateBuilder(
self._serial_number, self._revocation_date,
self._extensions + [extension]
)
def build(self, backend):
if self._serial_number is None:
raise ValueError("A revoked certificate must have a serial number")
if self._revocation_date is None:
raise ValueError(
"A revoked certificate must have a revocation date"
)
return backend.create_x509_revoked_certificate(self)
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
# Situation 1: shape is a list(without tensor)
class TestExpandV2OpRank1(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.init_data()
self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")}
self.attrs = {'shape': self.shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [100]
self.shape = [100]
self.expand_times = [1]
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = [120]
self.shape = [2, 120]
self.expand_times = [2, 1]
class TestExpandV2OpRank2(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = [1, 140]
self.shape = [12, 140]
self.expand_times = [12, 1]
class TestExpandV2OpRank3_Corner(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = (2, 10, 5)
self.shape = (2, 10, 5)
self.expand_times = (1, 1, 1)
class TestExpandV2OpRank4(TestExpandV2OpRank1):
def init_data(self):
self.ori_shape = (2, 4, 5, 7)
self.shape = (-1, -1, -1, -1)
self.expand_times = (1, 1, 1, 1)
# Situation 2: shape is a list(with tensor)
class TestExpandV2OpRank1_tensor_attr(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.init_data()
expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape):
expand_shapes_tensor.append(("x" + str(index), np.ones(
(1)).astype('int32') * ele))
self.inputs = {
'X': np.random.random(self.ori_shape).astype("float64"),
'expand_shapes_tensor': expand_shapes_tensor,
}
self.attrs = {"shape": self.infer_expand_shape}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [100]
self.expand_times = [1]
self.expand_shape = [100]
self.infer_expand_shape = [-1]
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
def init_data(self):
self.ori_shape = [12, 14]
self.expand_times = [1, 1]
self.expand_shape = [12, 14]
self.infer_expand_shape = [12, -1]
# Situation 3: shape is a tensor
class TestExpandV2OpRank1_tensor(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.init_data()
self.inputs = {
'X': np.random.random(self.ori_shape).astype("float64"),
'Shape': np.array(self.expand_shape).astype("int32"),
}
self.attrs = {}
output = np.tile(self.inputs['X'], self.expand_times)
self.outputs = {'Out': output}
def init_data(self):
self.ori_shape = [100]
self.expand_times = [2, 1]
self.expand_shape = [2, 100]
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
# Situation 4: input x is Integer
class TestExpandV2OpInteger(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.inputs = {
'X': np.random.randint(
10, size=(2, 4, 5)).astype("int32")
}
self.attrs = {'shape': [2, 4, 5]}
output = np.tile(self.inputs['X'], (1, 1, 1))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
# Situation 5: input x is Bool
class TestExpandV2OpBoolean(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")}
self.attrs = {'shape': [2, 4, 5]}
output = np.tile(self.inputs['X'], (1, 1, 1))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
# Situation 56: input x is Integer
class TestExpandV2OpInt64_t(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.inputs = {
'X': np.random.randint(
10, size=(2, 4, 5)).astype("int64")
}
self.attrs = {'shape': [2, 4, 5]}
output = np.tile(self.inputs['X'], (1, 1, 1))
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
class TestExpandV2Error(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
x1 = fluid.create_lod_tensor(
np.array([[-1]]), [[1]], fluid.CPUPlace())
shape = [2, 2]
self.assertRaises(TypeError, paddle.tensor.expand, x1, shape)
x2 = fluid.layers.data(name='x2', shape=[4], dtype="uint8")
self.assertRaises(TypeError, paddle.tensor.expand, x2, shape)
x3 = fluid.layers.data(name='x3', shape=[4], dtype="bool")
x3.stop_gradient = False
self.assertRaises(ValueError, paddle.tensor.expand, x3, shape)
# Test python API
class TestExpandV2API(unittest.TestCase):
def test_api(self):
input = np.random.random([12, 14]).astype("float32")
x = fluid.layers.data(
name='x', shape=[12, 14], append_batch_size=False, dtype="float32")
positive_2 = fluid.layers.fill_constant([1], "int32", 12)
expand_shape = fluid.layers.data(
name="expand_shape",
shape=[2],
append_batch_size=False,
dtype="int32")
out_1 = paddle.expand(x, shape=[12, 14])
out_2 = paddle.expand(x, shape=[positive_2, 14])
out_3 = paddle.expand(x, shape=expand_shape)
g0 = fluid.backward.calc_gradient(out_2, x)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3 = exe.run(fluid.default_main_program(),
feed={
"x": input,
"expand_shape":
np.array([12, 14]).astype("int32")
},
fetch_list=[out_1, out_2, out_3])
assert np.array_equal(res_1, np.tile(input, (1, 1)))
assert np.array_equal(res_2, np.tile(input, (1, 1)))
assert np.array_equal(res_3, np.tile(input, (1, 1)))
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
| |
# sqlalchemy/exc.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Exceptions used with SQLAlchemy.
The base exception class is :class:`.SQLAlchemyError`. Exceptions which are raised as a
result of DBAPI exceptions are all subclasses of
:class:`.DBAPIError`.
"""
class SQLAlchemyError(Exception):
"""Generic error class."""
class ArgumentError(SQLAlchemyError):
"""Raised when an invalid or conflicting function argument is supplied.
This error generally corresponds to construction time state errors.
"""
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected"""
def __init__(self, message, cycles, edges):
message += ": cycles: %r all edges: %r" % (cycles, edges)
SQLAlchemyError.__init__(self, message)
self.cycles = cycles
self.edges = edges
class CompileError(SQLAlchemyError):
"""Raised when an error occurs during SQL compilation"""
class IdentifierError(SQLAlchemyError):
"""Raised when a schema name is beyond the max character limit"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
ConcurrentModificationError = None
class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by a ``PoolListener`` so that the host pool forces a disconnect.
"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
FlushError = None
class TimeoutError(SQLAlchemyError):
"""Raised when a connection pool times out on getting a connection."""
class InvalidRequestError(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
class ResourceClosedError(InvalidRequestError):
"""An operation was requested from a connection, cursor, or other
object that's in a closed state."""
class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``RowProxy``."""
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
class NoReferencedTableError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Table`` cannot be located."""
def __init__(self, message, tname):
NoReferenceError.__init__(self, message)
self.table_name = tname
class NoReferencedColumnError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be located."""
def __init__(self, message, tname, cname):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
class NoSuchTableError(InvalidRequestError):
"""Table does not exist or is not visible to a connection."""
class UnboundExecutionError(InvalidRequestError):
"""SQL was attempted without a database connection to execute it on."""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`.StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
def __init__(self, message, statement, params, orig):
SQLAlchemyError.__init__(self, message)
self.statement = statement
self.params = params
self.orig = orig
def __str__(self):
if isinstance(self.params, (list, tuple)) and \
len(self.params) > 10 and \
isinstance(self.params[0], (list, dict, tuple)):
return ' '.join((SQLAlchemyError.__str__(self),
repr(self.statement),
repr(self.params[:2]),
'... and a total of %i bound parameter sets' % len(self.params)))
return ' '.join((SQLAlchemyError.__str__(self),
repr(self.statement), repr(self.params)))
class DBAPIError(StatementError):
"""Raised when the execution of a database operation fails.
``DBAPIError`` wraps exceptions raised by the DB-API underlying the
database operation. Driver-specific implementations of the standard
DB-API exception types are wrapped by matching sub-types of SQLAlchemy's
``DBAPIError`` when possible. DB-API's ``Error`` type maps to
``DBAPIError`` in SQLAlchemy, otherwise the names are identical. Note
that there is no guarantee that different DB-API implementations will
raise the same exception type for any given error condition.
:class:`.DBAPIError` features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
The wrapped exception object is available in the :attr:`.orig` attribute.
Its type and properties are DB-API implementation specific.
"""
@classmethod
def instance(cls, statement, params,
orig,
dbapi_base_err,
connection_invalidated=False):
# Don't ever wrap these, just return them directly as if
# DBAPIError didn't exist.
if isinstance(orig, (KeyboardInterrupt, SystemExit)):
return orig
if orig is not None:
# not a DBAPI error, statement is present.
# raise a StatementError
if not isinstance(orig, dbapi_base_err) and statement:
return StatementError(str(orig), statement, params, orig)
name, glob = orig.__class__.__name__, globals()
if name in glob and issubclass(glob[name], DBAPIError):
cls = glob[name]
return cls(statement, params, orig, connection_invalidated)
def __init__(self, statement, params, orig, connection_invalidated=False):
try:
text = str(orig)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, e:
text = 'Error in str() of DB-API-generated exception: ' + str(e)
StatementError.__init__(
self,
'(%s) %s' % (orig.__class__.__name__, text),
statement,
params,
orig
)
self.connection_invalidated = connection_invalidated
class InterfaceError(DBAPIError):
"""Wraps a DB-API InterfaceError."""
class DatabaseError(DBAPIError):
"""Wraps a DB-API DatabaseError."""
class DataError(DatabaseError):
"""Wraps a DB-API DataError."""
class OperationalError(DatabaseError):
"""Wraps a DB-API OperationalError."""
class IntegrityError(DatabaseError):
"""Wraps a DB-API IntegrityError."""
class InternalError(DatabaseError):
"""Wraps a DB-API InternalError."""
class ProgrammingError(DatabaseError):
"""Wraps a DB-API ProgrammingError."""
class NotSupportedError(DatabaseError):
"""Wraps a DB-API NotSupportedError."""
# Warnings
class SADeprecationWarning(DeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAPendingDeprecationWarning(PendingDeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAWarning(RuntimeWarning):
"""Issued at runtime."""
| |
# (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
'''
Monitor the Windows Event Log
'''
# stdlib
import calendar
from datetime import datetime, timedelta
# project
from checks.wmi_check import WinWMICheck, to_time, from_time
from utils.containers import hash_mutable
from utils.timeout import TimeoutException
SOURCE_TYPE_NAME = 'event viewer'
EVENT_TYPE = 'win32_log_event'
class Win32EventLogWMI(WinWMICheck):
EVENT_PROPERTIES = [
"Message",
"SourceName",
"TimeGenerated",
"Type",
"User",
"InsertionStrings",
"EventCode"
]
NAMESPACE = "root\\CIMV2"
CLASS = "Win32_NTLogEvent"
def __init__(self, name, init_config, agentConfig, instances=None):
WinWMICheck.__init__(self, name, init_config, agentConfig,
instances=instances)
self.last_ts = {}
def check(self, instance):
# Connect to the WMI provider
host = instance.get('host', "localhost")
username = instance.get('username', "")
password = instance.get('password', "")
instance_tags = instance.get('tags', [])
notify = instance.get('notify', [])
user = instance.get('user')
ltypes = instance.get('type', [])
source_names = instance.get('source_name', [])
log_files = instance.get('log_file', [])
event_ids = instance.get('event_id', [])
message_filters = instance.get('message_filters', [])
instance_hash = hash_mutable(instance)
instance_key = self._get_instance_key(host, self.NAMESPACE, self.CLASS, instance_hash)
# Store the last timestamp by instance
if instance_key not in self.last_ts:
self.last_ts[instance_key] = datetime.utcnow()
return
query = {}
filters = []
last_ts = self.last_ts[instance_key]
query['TimeGenerated'] = ('>=', self._dt_to_wmi(last_ts))
if user:
query['User'] = ('=', user)
if ltypes:
query['Type'] = []
for ltype in ltypes:
query['Type'].append(('=', ltype))
if source_names:
query['SourceName'] = []
for source_name in source_names:
query['SourceName'].append(('=', source_name))
if log_files:
query['LogFile'] = []
for log_file in log_files:
query['LogFile'].append(('=', log_file))
if event_ids:
query['EventCode'] = []
for event_id in event_ids:
query['EventCode'].append(('=', event_id))
if message_filters:
query['NOT Message'] = []
query['Message'] = []
for filt in message_filters:
if filt[0] == '-':
query['NOT Message'].append(('LIKE', filt[1:]))
else:
query['Message'].append(('LIKE', filt))
filters.append(query)
wmi_sampler = self._get_wmi_sampler(
instance_key,
self.CLASS, self.EVENT_PROPERTIES,
filters=filters,
host=host, namespace=self.NAMESPACE,
username=username, password=password,
and_props=['Message']
)
try:
wmi_sampler.sample()
except TimeoutException:
self.log.warning(
u"[Win32EventLog] WMI query timed out."
u" class={wmi_class} - properties={wmi_properties} -"
u" filters={filters} - tags={tags}".format(
wmi_class=self.CLASS, wmi_properties=self.EVENT_PROPERTIES,
filters=filters, tags=instance_tags
)
)
else:
for ev in wmi_sampler:
# for local events we dont need to specify a hostname
hostname = None if (host == "localhost" or host == ".") else host
log_ev = LogEvent(ev, hostname, instance_tags, notify,
self.init_config.get('tag_event_id', False))
# Since WQL only compares on the date and NOT the time, we have to
# do a secondary check to make sure events are after the last
# timestamp
if log_ev.is_after(last_ts):
self.event(log_ev.to_event_dict())
else:
self.log.debug('Skipping event after %s. ts=%s' % (last_ts, log_ev.timestamp))
# Update the last time checked
self.last_ts[instance_key] = datetime.utcnow()
def _dt_to_wmi(self, dt):
''' A wrapper around wmi.from_time to get a WMI-formatted time from a
time struct.
'''
return from_time(year=dt.year, month=dt.month, day=dt.day,
hours=dt.hour, minutes=dt.minute,
seconds=dt.second, microseconds=0, timezone=0)
class LogEvent(object):
def __init__(self, ev, hostname, tags, notify_list, tag_event_id):
self.event = ev
self.hostname = hostname
self.tags = self._tags(tags, self.event['EventCode']) if tag_event_id else tags
self.notify_list = notify_list
self.timestamp = self._wmi_to_ts(self.event['TimeGenerated'])
@property
def _msg_title(self):
return '{logfile}/{source}'.format(
logfile=self.event['Logfile'],
source=self.event['SourceName'])
@property
def _msg_text(self):
msg_text = ""
if 'Message' in self.event:
msg_text = "{message}\n".format(message=self.event['Message'])
elif 'InsertionStrings' in self.event:
msg_text = "\n".join([i_str for i_str in self.event['InsertionStrings']
if i_str.strip()])
if self.notify_list:
msg_text += "\n{notify_list}".format(
notify_list=' '.join([" @" + n for n in self.notify_list]))
return msg_text
@property
def _alert_type(self):
event_type = self.event['Type']
# Convert to a Datadog alert type
if event_type == 'Warning':
return 'warning'
elif event_type == 'Error':
return 'error'
return 'info'
@property
def _aggregation_key(self):
return self.event['SourceName']
def to_event_dict(self):
event_dict = {
'timestamp': self.timestamp,
'event_type': EVENT_TYPE,
'msg_title': self._msg_title,
'msg_text': self._msg_text.strip(),
'aggregation_key': self._aggregation_key,
'alert_type': self._alert_type,
'source_type_name': SOURCE_TYPE_NAME,
'tags': self.tags
}
if self.hostname:
event_dict['host'] = self.hostname
return event_dict
def is_after(self, ts):
''' Compare this event's timestamp to a give timestamp. '''
if self.timestamp >= int(calendar.timegm(ts.timetuple())):
return True
return False
def _wmi_to_ts(self, wmi_ts):
''' Convert a wmi formatted timestamp into an epoch.
'''
year, month, day, hour, minute, second, microsecond, tz = to_time(wmi_ts)
tz_delta = timedelta(minutes=int(tz))
if '+' in wmi_ts:
tz_delta = - tz_delta
dt = datetime(year=year, month=month, day=day, hour=hour, minute=minute,
second=second, microsecond=microsecond) + tz_delta
return int(calendar.timegm(dt.timetuple()))
def _tags(self, tags, event_code):
''' Inject additional tags into the list already supplied to LogEvent.
'''
tags_list = []
if tags is not None:
tags_list += list(tags)
tags_list.append("event_id:{event_id}".format(event_id=event_code))
return tags_list
| |
"""This module is the heart of PyEcore. It defines all the basic concepts that
are common to EMF-Java and PyEcore (EObject/EClass...).
It defines the basic classes and behavior for PyEcore implementation:
* EObject
* EPackage
* EClass
* EAttribute
* EReference
* EDataType
These concepts are enough if dynamic metamodel instance are handled (code
generation is not required).
In addition, ``@EMetaclass`` annotation and ``MetaEClass`` metaclass are
used for static metamodels definition.
"""
import sys
import keyword
import inspect
from decimal import Decimal
from datetime import datetime
from ordered_set import OrderedSet
from weakref import WeakSet
from RestrictedPython import compile_restricted, safe_builtins
from .notification import ENotifer, Kind
from .innerutils import ignored, javaTransMap, parse_date
name = 'ecore'
nsPrefix = 'ecore'
nsURI = 'http://www.eclipse.org/emf/2002/Ecore'
# This var will be automatically populated.
# In this case, it MUST be set to an empty dict,
# otherwise, the getEClassifier would be overriden
eClassifiers = {} # Will be automatically populated
eSubpackages = []
def default_eURIFragment():
"""
Gets the default root URI fragment.
:return: the root URI fragment
:rtype: str
"""
return '/'
def eURIFragment():
"""
Gets the URI fragment for the Ecore module.
:return: the root URI fragment for Ecore
:rtype: str
"""
return '#/'
def getEClassifier(name, searchspace=None):
searchspace = searchspace or eClassifiers
try:
return searchspace[name]
except KeyError:
return None
class Core(object):
@staticmethod
def _promote(rcls, abstract=False):
rcls.eClass = EClass(rcls.__name__, metainstance=rcls)
rcls.eClass.abstract = abstract
rcls._staticEClass = True
# init super types
eSuperTypes_add = rcls.eClass.eSuperTypes.append
for _cls in rcls.__bases__:
if _cls is EObject:
continue
with ignored(Exception):
eSuperTypes_add(_cls.eClass)
# init eclass by reflection
eStructuralFeatures_add = rcls.eClass.eStructuralFeatures.append
eTypeParameters_add = rcls.eClass.eTypeParameters.add
for k, feature in rcls.__dict__.items():
if isinstance(feature, EStructuralFeature):
if not feature.name:
feature.name = k
eStructuralFeatures_add(feature)
elif isinstance(feature, ETypeParameter):
if not feature.name:
feature.name = k
eTypeParameters_add(feature)
elif inspect.isfunction(feature):
if k.startswith('__'):
continue
argspect = inspect.getfullargspec(feature)
args = argspect.args
if len(args) < 1 or args[0] != 'self':
continue
operation = EOperation(feature.__name__)
defaults = argspect.defaults
len_defaults = len(defaults) if defaults else 0
nb_required = len(args) - len_defaults
for i, parameter_name in enumerate(args):
parameter = EParameter(parameter_name, eType=ENativeType)
if i < nb_required:
parameter.required = True
operation.eParameters.append(parameter)
rcls.eClass.eOperations.append(operation)
@classmethod
def register_classifier(cls, rcls, abstract=False, promote=False):
if promote:
cls._promote(rcls, abstract)
epackage = sys.modules[rcls.__module__]
if not hasattr(epackage, 'eClassifiers'):
eclassifs = {}
epackage.eClassifiers = eclassifs
epackage.getEClassifier = \
lambda x: getEClassifier(x, searchspace=eclassifs)
if not hasattr(epackage, 'eClass'):
pack_name = (epackage.__name__ if epackage.__name__ != '__main__'
else 'default_package')
epackage.eClass = EPackage(name=pack_name,
nsPrefix=pack_name,
nsURI='http://{}/'.format(pack_name))
if not hasattr(epackage, 'eURIFragment'):
epackage.eURIFragment = eURIFragment
cname = rcls.name if isinstance(rcls, EClassifier) else rcls.__name__
epackage.eClassifiers[cname] = rcls
if isinstance(rcls, EDataType):
epackage.eClass.eClassifiers.append(rcls)
rcls._container = epackage
else:
epackage.eClass.eClassifiers.append(rcls.eClass)
rcls.eClass._container = epackage
class Metasubinstance(type):
def __subclasscheck__(cls, other):
if isinstance(other, EClass):
other = other.python_class
return type.__subclasscheck__(cls, other)
# Meta methods for static EClass
class MetaEClass(Metasubinstance):
def __init__(cls, name, bases, nmspc):
super().__init__(name, bases, nmspc)
Core.register_classifier(cls, promote=True)
cls._staticEClass = True
def __call__(cls, *args, **kwargs):
if cls.eClass.abstract:
raise TypeError("Can't instantiate abstract EClass {0}"
.format(cls.eClass.name))
return super().__call__(*args, **kwargs)
class EObject(ENotifer, metaclass=Metasubinstance):
_staticEClass = True
_instances = WeakSet()
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls)
instance._internal_id = None
instance._isset = set()
instance._container = None
instance._containment_feature = None
instance._eresource = None
instance.listeners = []
instance._eternal_listener = []
instance._inverse_rels = set()
instance._staticEClass = False
cls._instances.add(instance)
return instance
def __init__(self, **kwargs):
super().__init__(**kwargs)
@classmethod
def allInstances(cls, resources=None):
if resources:
yield from (x for x in cls._instances
if isinstance(x, cls) and x.eResource in resources)
else:
yield from (x for x in cls._instances if isinstance(x, cls))
def eContainer(self):
return self._container
def eContainmentFeature(self):
return self._containment_feature
def eIsSet(self, feature):
if isinstance(feature, str):
feature = self.eClass.findEStructuralFeature(feature)
return feature in self._isset
@property
def eResource(self):
try:
return self._container.eResource
except AttributeError:
return self._eresource
def eGet(self, feature):
if isinstance(feature, str):
return self.__getattribute__(feature)
elif isinstance(feature, EStructuralFeature):
return self.__getattribute__(feature.name)
raise TypeError('Feature must have str or EStructuralFeature type')
def eSet(self, feature, value):
if isinstance(feature, str):
self.__setattr__(feature, value)
elif isinstance(feature, EStructuralFeature):
self.__setattr__(feature.name, value)
else:
raise TypeError('Feature must have str or '
'EStructuralFeature type')
def delete(self, recursive=True):
if recursive:
for obj in self.eAllContents():
obj.delete()
seek = set(self._inverse_rels)
# we also clean all the object references
seek.update((self, ref) for ref in self.eClass.eAllReferences())
for owner, feature in seek:
fvalue = owner.eGet(feature)
if feature.many:
if self in fvalue:
fvalue.remove(self)
continue
elif self is owner:
fvalue.clear()
continue
value = next((val for val in fvalue
if getattr(val, '_wrapped', None) is self),
None)
if value:
fvalue.remove(value)
else:
if self is fvalue or self is owner:
owner.eSet(feature, None)
continue
value = (fvalue if getattr(fvalue, '_wrapped', None) is self
else None)
if value:
owner.eSet(feature, None)
@property
def eContents(self):
children = []
for feature in self.eClass.eAllReferences():
if not feature.containment or feature.derived:
continue
if feature.many:
values = self.__getattribute__(feature.name)
else:
values = [self.__getattribute__(feature.name)]
children.extend((x for x in values if x))
return children
def eAllContents(self):
contents = self.eContents
yield from contents
for x in contents:
yield from x.eAllContents()
def eURIFragment(self):
if not self.eContainer():
if not self.eResource or len(self.eResource.contents) == 1:
return '/'
else:
return '/{}'.format(self.eResource.contents.index(self))
feat = self.eContainmentFeature()
parent = self.eContainer()
name = feat.name
if feat.many:
index = parent.__getattribute__(name).index(self)
return '{0}/@{1}.{2}' \
.format(parent.eURIFragment(), name, str(index))
else:
return '{0}/@{1}'.format(parent.eURIFragment(), name)
def eRoot(self):
if not self.eContainer():
return self
if not isinstance(self.eContainer(), EObject):
return self.eContainer()
return self.eContainer().eRoot()
def __dir__(self):
eclass = self.eClass
relevant = [x.name for x in eclass.eAllStructuralFeatures()]
relevant.extend([x.name for x in eclass.eAllOperations()
if not x.name.startswith('_')])
return relevant
class EModelElement(EObject):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def eURIFragment(self):
if not self.eContainer():
if not self.eResource or len(self.eResource.contents) == 1:
return '#/'
else:
return '#/{}'.format(self.eResource.contents.index(self))
parent = self.eContainer()
if hasattr(self, 'name'):
return '{0}/{1}'.format(parent.eURIFragment(), self.name)
else:
return super().eURIFragment()
def getEAnnotation(self, source):
"""Return the annotation with a matching source attribute."""
return next((a for a in self.eAnnotations if a.source == source), None)
class EAnnotation(EModelElement):
def __init__(self, source=None, **kwargs):
super().__init__(**kwargs)
self.source = source
self.details = {}
class ENamedElement(EModelElement):
def __init__(self, name=None, **kwargs):
super().__init__(**kwargs)
self.name = name
class SpecialEPackage(Metasubinstance):
def __instancecheck__(cls, instance):
if inspect.ismodule(instance) and hasattr(instance, 'nsURI'):
return True
return type.__instancecheck__(cls, instance)
class EPackage(ENamedElement, metaclass=SpecialEPackage):
def __init__(self, name=None, nsURI=None, nsPrefix=None, **kwargs):
super().__init__(name, **kwargs)
self.nsURI = nsURI
self.nsPrefix = nsPrefix
def getEClassifier(self, name):
return next((c for c in self.eClassifiers if c.name == name), None)
# @staticmethod
# def __isinstance__(self, instance=None):
# return (instance is None
# and (isinstance(self, EPackage)
# or inspect.ismodule(self) and hasattr(self, 'nsURI')))
class ETypedElement(ENamedElement):
def __init__(self, name=None, eType=None, ordered=True, unique=True,
lower=0, upper=1, required=False, eGenericType=None,
**kwargs):
super().__init__(name, **kwargs)
self.eType = eType
self.lowerBound = int(lower)
self.upperBound = int(upper)
self.ordered = ordered
self.unique = unique
self.required = required
if eGenericType:
self.eGenericType = eGenericType
self._many_cache = self._compute_many()
self._eternal_listener.append(self)
def _compute_many(self):
upper = self.upperBound
return upper < 0 or upper > 1
def notifyChanged(self, notif):
if notif.feature is ETypedElement.upperBound:
self._many_cache = self._compute_many()
@property
def upper(self):
return self.upperBound
@property
def lower(self):
return self.lowerBound
@property
def many(self):
return self._many_cache
class EOperation(ETypedElement):
def __init__(self, name=None, eType=None, params=None, exceptions=None,
**kwargs):
super().__init__(name, eType, **kwargs)
if params:
self.eParameters.extend(params)
if exceptions:
self.eExceptions.extend(exceptions)
def normalized_name(self):
name = self.name
if keyword.iskeyword(name):
name = name + '_'
return name
def to_code(self):
parameters = [x.to_code() for x in self.eParameters]
if len(parameters) == 0 or parameters[0] != 'self':
parameters.insert(0, 'self')
return """def {0}({1}):
raise NotImplementedError('Method {0}({1}) is not yet implemented')
""".format(self.normalized_name(), ', '.join(parameters))
class EParameter(ETypedElement):
def __init__(self, name=None, eType=None, **kwargs):
super().__init__(name, eType, **kwargs)
def to_code(self):
if self.required:
return "{0}".format(self.name)
default_value = getattr(self.eType, 'default_value', None)
return "{0}={1}".format(self.name, default_value)
class ETypeParameter(ENamedElement):
def __init__(self, name=None, eBounds=None, **kwargs):
super().__init__(name, **kwargs)
if eBounds:
self.eBounds.extend(eBounds)
def raw_types(self):
raw_types = tuple(x.eRawType for x in self.eBounds)
if not raw_types:
raw_types = object
return raw_types
def __instancecheck__(self, instance):
raw_types = self.raw_types()
return isinstance(instance, raw_types)
def __str__(self):
raw_types = self.raw_types()
return '<{}[{}] object at {}>'.format(self.__class__.__name__,
raw_types,
hex(id(self)))
class EGenericType(EObject):
def __init__(self, eTypeParameter=None, eClassifier=None, **kwargs):
super().__init__(**kwargs)
self.eTypeParameter = eTypeParameter
self.eClassifier = eClassifier
@property
def eRawType(self):
return self.eClassifier or self.eTypeParameter
# class SpecialEClassifier(Metasubinstance):
# def __instancecheck__(cls, instance):
# if cls is not EClassifier:
# return type.__instancecheck__(cls, instance)
# return isinstance(instance, Metasubinstance) or \
# isinstance(instance, (EClass, EDataType, EPackage))
class EClassifier(ENamedElement):
def __init__(self, name=None, **kwargs):
super().__init__(name, **kwargs)
@staticmethod
def __isinstance__(self, instance=None):
return (instance is None
and (self is EClassifier
or isinstance(self, (EClassifier, MetaEClass))
or getattr(self, '_staticEClass', False)))
class EDataType(EClassifier):
transmap = javaTransMap
def __init__(self, name=None, eType=None, default_value=None,
from_string=None, to_string=None, instanceClassName=None,
type_as_factory=False, **kwargs):
super().__init__(name, **kwargs)
self.eType = eType
self.type_as_factory = type_as_factory
self._default_value = default_value
if instanceClassName:
self.instanceClassName = instanceClassName
else:
self.instanceClassName_ = None
if from_string:
self.from_string = from_string
if to_string:
self.to_string = to_string
def from_string(self, value):
return value
def to_string(self, value):
return str(value)
def __instancecheck__(self, instance):
return isinstance(instance, self.eType)
@property
def default_value(self):
if self.type_as_factory:
return self.eType()
else:
return self._default_value
@default_value.setter
def default_value(self, value):
self._default_value = value
@property
def instanceClassName(self):
return self.instanceClassName_
@instanceClassName.setter
def instanceClassName(self, name):
self.instanceClassName_ = name
default_type = (object, True, None)
type_, type_as_factory, default = self.transmap.get(name, default_type)
self.eType = type_
self.type_as_factory = type_as_factory
self.default_value = default
def __repr__(self):
etype = self.eType.__name__ if self.eType else None
return '{0}({1})'.format(self.name, etype)
class EEnum(EDataType):
def __init__(self, name=None, default_value=None, literals=None, **kwargs):
super().__init__(name, eType=self, **kwargs)
self._eternal_listener.append(self)
if literals:
for i, lit_name in enumerate(literals):
lit_name = '_' + lit_name if lit_name[:1].isnumeric() \
else lit_name
literal = EEnumLiteral(value=i, name=lit_name)
self.eLiterals.append(literal)
if default_value:
self.default_value = default_value
def notifyChanged(self, notif):
if notif.feature is EEnum.eLiterals:
if notif.kind is Kind.ADD:
literal = notif.new
self.__setattr__(literal.name, literal)
elif notif.kind is Kind.REMOVE:
literal = notif.old
del self.__dict__[literal.name]
@property
def default_value(self):
return self.eLiterals[0] if self.eLiterals else None
@default_value.setter
def default_value(self, value):
if value in self:
literal = (value if isinstance(value, EEnumLiteral)
else self.getEEnumLiteral(value))
literals = self.eLiterals
i = literals.index(literal)
literals.insert(0, literals.pop(i))
else:
raise AttributeError('Enumeration literal {} does not exist '
'in {}'.format(value, self))
def __contains__(self, key):
if isinstance(key, EEnumLiteral):
return key in self.eLiterals
return any(lit for lit in self.eLiterals if lit.name == key)
def __instancecheck__(self, instance):
return instance in self
def getEEnumLiteral(self, name=None, value=0):
try:
if name:
return next(lit for lit in self.eLiterals if lit.name == name)
return next(lit for lit in self.eLiterals if lit.value == value)
except StopIteration:
return None
def from_string(self, value):
return self.getEEnumLiteral(name=value)
def __repr__(self):
name = self.name or ''
return '{}[{}]'.format(name, str(self.eLiterals))
class EEnumLiteral(ENamedElement):
def __init__(self, name=None, value=0, **kwargs):
super().__init__(name, **kwargs)
self.value = value
def __repr__(self):
return '{0}={1}'.format(self.name, self.value)
def __str__(self):
return self.name
class EStructuralFeature(ETypedElement):
def __init__(self, name=None, eType=None, changeable=True, volatile=False,
transient=False, unsettable=False, derived=False,
derived_class=None, **kwargs):
super().__init__(name, eType, **kwargs)
self.changeable = changeable
self.volatile = volatile
self.transient = transient
self.unsettable = unsettable
self.derived = derived
self.derived_class = derived_class or ECollection
self._name = name
self._eType = eType
def notifyChanged(self, notif):
super().notifyChanged(notif)
if notif.feature is ENamedElement.name:
self._name = notif.new
if notif.feature is ETypedElement.eType:
self._eType = notif.new
def __get__(self, instance, owner=None):
if instance is None:
return self
name = self._name
instance_dict = instance.__dict__
if name in instance_dict:
value = instance_dict[name]
try:
return value._get()
except AttributeError:
return value
if self.many:
new_value = self.derived_class.create(instance, self)
else:
new_value = EValue(instance, self)
instance_dict[name] = new_value
return new_value._get()
def __set__(self, instance, value):
name = self._name
instance_dict = instance.__dict__
if name not in instance_dict:
if self.many:
new_value = self.derived_class.create(instance, self)
else:
new_value = EValue(instance, self)
instance_dict[name] = new_value
previous_value = new_value
else:
previous_value = instance_dict[name]
if isinstance(previous_value, ECollection):
if value is previous_value:
return
if value is not previous_value and isinstance(value, ECollection):
raise AttributeError('Cannot reafect an ECollection with '
'another one, even if compatible')
raise BadValueError(got=value, expected=previous_value.__class__)
instance_dict[name]._set(value)
def __delete__(self, instance):
name = self._name
value = getattr(instance, name)
if self.many:
value.clear()
else:
setattr(instance, name, self.get_default_value())
def __repr__(self):
eType = getattr(self, 'eType', None)
name = getattr(self, 'name', None)
return '<{0} {1}: {2}>'.format(self.__class__.__name__, name, eType)
class EAttribute(EStructuralFeature):
def __init__(self, name=None, eType=None, default_value=None, iD=False,
defaultValueLiteral=None, **kwargs):
super().__init__(name, eType, **kwargs)
self.iD = iD
self.default_value = default_value
self.defaultValueLiteral = defaultValueLiteral
if default_value is None and isinstance(eType, EDataType):
self.default_value = eType.default_value
def get_default_value(self):
etype = self._eType
if etype is None:
self.eType = ENativeType
return object()
default_literal = self.defaultValueLiteral
if default_literal is not None:
return etype.from_string(default_literal)
if self.default_value is not None:
return self.default_value
return etype.default_value
@property
def is_reference(self):
return False
@property
def is_attribute(self):
return True
class EReference(EStructuralFeature):
def __init__(self, name=None, eType=None, containment=False,
eOpposite=None, **kwargs):
super().__init__(name, eType, **kwargs)
self.containment = containment
self.eOpposite = eOpposite
if not isinstance(eType, EClass) and hasattr(eType, 'eClass'):
self.eType = eType.eClass
@staticmethod
def get_default_value():
return None
@property
def eOpposite(self):
return self._eopposite
@eOpposite.setter
def eOpposite(self, value):
self._eopposite = value
if value:
value._eopposite = self
@property
def container(self):
return self._eopposite and self._eopposite.containment
@property
def is_reference(self):
return True
@property
def is_attribute(self):
return False
class EClass(EClassifier):
def __new__(cls, name=None, superclass=None, metainstance=None, **kwargs):
if not isinstance(name, str):
raise BadValueError(got=name, expected=str)
instance = super().__new__(cls)
if isinstance(superclass, tuple):
instance.eSuperTypes.extend(superclass)
elif isinstance(superclass, EClass):
instance.eSuperTypes.append(superclass)
if metainstance:
instance.python_class = metainstance
instance.__name__ = metainstance.__name__
else:
def new_init(self, *args, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
attr_dict = {
'eClass': instance,
'_staticEClass': instance._staticEClass,
'__init__': new_init
}
super_types = instance.__compute_supertypes()
try:
instance.python_class = type(name, super_types, attr_dict)
except Exception:
super_types = sorted(super_types,
key=lambda x: len(x.eClass
.eAllSuperTypes()),
reverse=True)
instance.python_class = type(name,
tuple(super_types),
attr_dict)
instance.__name__ = name
return instance
def __init__(self, name=None, superclass=None, abstract=False,
metainstance=None, **kwargs):
super().__init__(name, **kwargs)
self.abstract = abstract
self._eternal_listener.append(self)
def __call__(self, *args, **kwargs):
if self.abstract:
raise TypeError("Can't instantiate abstract EClass {0}"
.format(self.name))
return self.python_class(*args, **kwargs)
def allInstances(self=None, resources=None):
if self is None:
self = EClass
if resources:
yield from (x for x in self._instances
if isinstance(x, self) and x.eResource in resources)
else:
yield from (x for x in self._instances if isinstance(x, self))
def notifyChanged(self, notif):
# We do not update in case of static metamodel (could be changed)
if getattr(self.python_class, '_staticEClass', False):
return
if notif.feature is EClass.eSuperTypes:
new_supers = self.__compute_supertypes()
try:
self.python_class.__bases__ = new_supers
except TypeError:
new_supers = sorted(new_supers,
key=lambda x: len(x.eClass
.eAllSuperTypes()),
reverse=True)
self.python_class.__bases__ = tuple(new_supers)
elif notif.feature is EClass.eOperations:
if notif.kind is Kind.ADD:
self.__create_fun(notif.new)
elif notif.kind is Kind.REMOVE:
delattr(self.python_class, notif.old.name)
elif notif.feature is EClass.eStructuralFeatures:
if notif.kind is Kind.ADD:
setattr(self.python_class, notif.new.name, notif.new)
elif notif.kind is Kind.ADD_MANY:
for x in notif.new:
setattr(self.python_class, x.name, x)
elif notif.kind is Kind.REMOVE:
delattr(self.python_class, notif.old.name)
elif notif.feature is EClass.name and notif.kind is Kind.SET:
self.python_class.__name__ = notif.new
self.__name__ = notif.new
def __create_fun(self, eoperation):
name = eoperation.normalized_name()
namespace = {}
# code = compile(eoperation.to_code(), "<str>", "exec")
# exec(code, namespace)
code = compile_restricted(eoperation.to_code(), '<inline>', 'exec')
exec(code, safe_builtins, namespace)
setattr(self.python_class, name, namespace[name])
def __compute_supertypes(self):
if not self.eSuperTypes:
return (EObject,)
else:
eSuperTypes = list(self.eSuperTypes)
if len(eSuperTypes) > 1 and EObject.eClass in eSuperTypes:
eSuperTypes.remove(EObject.eClass)
return tuple(x.python_class for x in eSuperTypes)
def __repr__(self):
return '<{0} name="{1}">'.format(self.__class__.__name__, self.name)
@property
def eAttributes(self):
return [x for x in self.eStructuralFeatures
if x.is_attribute]
@property
def eReferences(self):
return [x for x in self.eStructuralFeatures
if x.is_reference]
def findEStructuralFeature(self, name):
return next((f for f in self._eAllStructuralFeatures_gen()
if f.name == name),
None)
def _eAllSuperTypes_gen(self):
super_types = self.eSuperTypes
yield from self.eSuperTypes
for x in super_types:
yield from x._eAllSuperTypes_gen()
def eAllSuperTypes(self):
return OrderedSet(self._eAllSuperTypes_gen())
def _eAllStructuralFeatures_gen(self):
yield from self.eStructuralFeatures
for parent in self.eSuperTypes:
yield from parent._eAllStructuralFeatures_gen()
def eAllStructuralFeatures(self):
return OrderedSet(self._eAllStructuralFeatures_gen())
def eAllReferences(self):
return set((x for x in self._eAllStructuralFeatures_gen()
if x.is_reference))
def eAllAttributes(self):
return set((x for x in self._eAllStructuralFeatures_gen()
if x.is_attribute))
def _eAllOperations_gen(self):
yield from self.eOperations
for parent in self.eSuperTypes:
yield from parent._eAllOperations_gen()
def eAllOperations(self):
return OrderedSet(self._eAllOperations_gen())
def findEOperation(self, name):
return next((f for f in self._eAllOperations_gen() if f.name == name),
None)
def __instancecheck__(self, instance):
return isinstance(instance, self.python_class)
def __subclasscheck__(self, cls):
return issubclass(cls, self.python_class)
def EMetaclass(cls):
"""Class decorator for creating PyEcore metaclass."""
superclass = cls.__bases__
if not issubclass(cls, EObject):
sclasslist = list(superclass)
if object in superclass:
index = sclasslist.index(object)
sclasslist.insert(index, EObject)
sclasslist.remove(object)
else:
sclasslist.insert(0, EObject)
superclass = tuple(sclasslist)
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
[orig_vars.pop(x) for x in slots]
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return MetaEClass(cls.__name__, superclass, orig_vars)
class EProxy(EObject):
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
def __init__(self, path=None, resource=None, wrapped=None, **kwargs):
super().__init__(**kwargs)
super().__setattr__('resolved', wrapped is not None)
super().__setattr__('_wrapped', wrapped)
super().__setattr__('_proxy_path', path)
super().__setattr__('_proxy_resource', resource)
super().__setattr__('_inverse_rels', set())
def force_resolve(self):
if self.resolved:
return
resource = self._proxy_resource
decoded = resource.resolve_object(self._proxy_path)
if not hasattr(decoded, '_inverse_rels'):
self._wrapped = decoded.eClass
else:
self._wrapped = decoded
self._wrapped._inverse_rels.update(self._inverse_rels)
self._inverse_rels = self._wrapped._inverse_rels
self.resolved = True
def delete(self, recursive=True):
if recursive and self.resolved:
[obj.delete() for obj in self.eAllContents()]
# for obj in self.eAllContents():
# obj.delete()
seek = set(self._inverse_rels)
if self.resolved:
seek.update((self, ref) for ref in self.eClass.eAllReferences())
for owner, feature in seek:
fvalue = owner.eGet(feature)
if feature.many:
if self in fvalue:
fvalue.remove(self)
continue
if owner is self:
fvalue.clear()
continue
value = next((val for val in fvalue
if self._wrapped is val),
None)
if value:
fvalue.remove(value)
else:
if self is fvalue or owner is self:
owner.eSet(feature, None)
continue
value = fvalue if self._wrapped is fvalue else None
if value:
owner.eSet(feature, None)
def __getattribute__(self, name):
if name in ('_wrapped', '_proxy_path', '_proxy_resource', 'resolved',
'force_resolve', 'delete'):
return super().__getattribute__(name)
resolved = super().__getattribute__('resolved')
if not resolved:
if name in ('__class__', '_inverse_rels', '__name__'):
return super().__getattribute__(name)
resource = self._proxy_resource
decoded = resource.resolve_object(self._proxy_path)
if not hasattr(decoded, '_inverse_rels'):
self._wrapped = decoded.eClass
else:
self._wrapped = decoded
self._wrapped._inverse_rels.update(self._inverse_rels)
self._inverse_rels = self._wrapped._inverse_rels
self.resolved = True
return self._wrapped.__getattribute__(name)
def __setattr__(self, name, value):
if name in ('_wrapped', '_proxy_path', 'resolved', '_proxy_resource'):
super().__setattr__(name, value)
return
resolved = self.resolved
if not resolved:
resource = self._proxy_resource
decoded = resource.resolve_object(self._proxy_path)
if not hasattr(decoded, '_inverse_rels'):
self._wrapped = decoded.eClass
else:
self._wrapped = decoded
self.resolved = True
self._wrapped.__setattr__(name, value)
def __instancecheck__(self, instance):
self.force_resolve()
return self._wrapped.__instancecheck__(instance)
def __call__(self, *args, **kwargs):
self.force_resolve()
return self._wrapped(*args, **kwargs)
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
self.force_resolve()
return self._wrapped == other
def __ne__(self, other):
self.force_resolve()
return self._wrapped != other
def abstract(cls):
cls.eClass.abstract = True
return cls
from .valuecontainer import ECollection, EValue, \
EList, EOrderedSet, ESet, EBag, \
EDerivedCollection, \
EcoreUtils, \
BadValueError # noqa
# meta-meta level
EString = EDataType('EString', str)
ENamedElement.name = EAttribute('name', EString)
ENamedElement.name._isset.add(ENamedElement.name) # special case
EString._isset.add(ENamedElement.name) # special case
EBoolean = EDataType('EBoolean', bool, False,
to_string=lambda x: str(x).lower(),
from_string=lambda x: x in ['True', 'true'])
EBooleanObject = EDataType('EBooleanObject', bool,
to_string=lambda x: str(x).lower(),
from_string=lambda x: x in ['True', 'true'])
EInteger = EDataType('EInteger', int, 0, from_string=int)
EInt = EDataType('EInt', int, 0, from_string=int)
ELong = EDataType('ELong', int, 0, from_string=int)
ELongObject = EDataType('ELongObject', int, from_string=int)
EIntegerObject = EDataType('EIntegerObject', int, from_string=int)
EBigInteger = EDataType('EBigInteger', int, from_string=int)
EDouble = EDataType('EDouble', float, 0.0, from_string=float)
EDoubleObject = EDataType('EDoubleObject', float, from_string=float)
EFloat = EDataType('EFloat', float, 0.0, from_string=float)
EFloatObject = EDataType('EFloatObject', float, from_string=float)
EStringToStringMapEntry = EDataType('EStringToStringMapEntry', dict,
type_as_factory=True)
EFeatureMapEntry = EDataType('EFeatureMapEntry', dict, type_as_factory=True)
EDiagnosticChain = EDataType('EDiagnosticChain', str)
ENativeType = EDataType('ENativeType', object)
EJavaObject = EDataType('EJavaObject', object)
EDate = EDataType('EDate', datetime,
from_string=parse_date,
to_string=lambda d: d.strftime('%Y-%m-%dT%H:%M:%S.%f%z'))
EBigDecimal = EDataType('EBigDecimal', Decimal, from_string=Decimal)
EByte = EDataType('EByte', bytes)
EByteObject = EDataType('EByteObject', bytes)
EByteArray = EDataType('EByteArray', bytearray)
EChar = EDataType('EChar', str)
ECharacterObject = EDataType('ECharacterObject', str)
EShort = EDataType('EShort', int, from_string=int)
EShortObject = EDataType('EShortObject', int, from_string=int)
EJavaClass = EDataType('EJavaClass', type)
EModelElement.eAnnotations = EReference('eAnnotations', EAnnotation,
upper=-1, containment=True)
EAnnotation.eModelElement = EReference('eModelElement', EModelElement,
eOpposite=EModelElement.eAnnotations)
EAnnotation.source = EAttribute('source', EString)
EAnnotation.details = EAttribute('details', EStringToStringMapEntry)
EAnnotation.references = EReference('references', EObject, upper=-1)
EAnnotation.contents = EReference('contents', EObject, upper=-1,
containment=True)
ETypedElement.ordered = EAttribute('ordered', EBoolean, default_value=True)
ETypedElement.unique = EAttribute('unique', EBoolean, default_value=True)
ETypedElement._lower = EAttribute('lower', EInteger, derived=True)
ETypedElement.lowerBound = EAttribute('lowerBound', EInteger)
ETypedElement._upper = EAttribute('upper', EInteger, derived=True)
ETypedElement.upperBound = EAttribute('upperBound', EInteger, default_value=1)
ETypedElement.required = EAttribute('required', EBoolean)
ETypedElement.eGenericType = EReference('eGenericType', EGenericType,
containment=True)
ETypedElement.eType = EReference('eType', EClassifier)
ENamedElement.name._isset.add(ETypedElement.eType) # special case
EStructuralFeature.changeable = EAttribute('changeable', EBoolean,
default_value=True)
EStructuralFeature.volatile = EAttribute('volatile', EBoolean)
EStructuralFeature.transient = EAttribute('transient', EBoolean)
EStructuralFeature.unsettable = EAttribute('unsettable', EBoolean)
EStructuralFeature.derived = EAttribute('derived', EBoolean)
EStructuralFeature.defaultValueLiteral = EAttribute('defaultValueLiteral',
EString)
EAttribute.iD = EAttribute('iD', EBoolean)
EPackage.nsURI = EAttribute('nsURI', EString)
EPackage.nsPrefix = EAttribute('nsPrefix', EString)
EPackage.eClassifiers = EReference('eClassifiers', EClassifier,
upper=-1, containment=True)
EPackage.eSubpackages = EReference('eSubpackages', EPackage,
upper=-1, containment=True)
EPackage.eSuperPackage = EReference('eSuperPackage', EPackage,
lower=1, eOpposite=EPackage.eSubpackages)
EClassifier.ePackage = EReference('ePackage', EPackage,
eOpposite=EPackage.eClassifiers)
EClassifier.eTypeParameters = EReference('eTypeParameters', ETypeParameter,
upper=-1, containment=True)
EClassifier.instanceTypeName = EAttribute('instanceTypeName', EString)
EClassifier.instanceClass = EAttribute('instanceClass', EJavaClass)
EClassifier.defaultValue = EAttribute('defaultValue', EJavaObject)
EClassifier.instanceTypeName = EAttribute('instanceTypeName', EString,
volatile=True, unsettable=True)
EDataType.instanceClassName_ = EAttribute('instanceClassName', EString)
EDataType.serializable = EAttribute('serializable', EBoolean)
EClass.abstract = EAttribute('abstract', EBoolean)
EClass.eStructuralFeatures = EReference('eStructuralFeatures',
EStructuralFeature,
upper=-1, containment=True)
EClass.eGenericSuperTypes = EReference('eGenericSuperTypes', EGenericType,
containment=True, upper=-1)
EClass.eAttributes_ = EReference('eAttributes', EAttribute,
upper=-1, derived=True)
EClass.eReferences_ = EReference('eReferences', EReference,
upper=-1, derived=True)
EClass.eSuperTypes = EReference('eSuperTypes', EClass, upper=-1)
EClass.eOperations = EReference('eOperations', EOperation,
upper=-1, containment=True)
EClass.instanceClassName = EAttribute('instanceClassName', EString)
EClass.interface = EAttribute('interface', EBoolean)
EStructuralFeature.eContainingClass = \
EReference('eContainingClass', EClass,
eOpposite=EClass.eStructuralFeatures)
EReference.containment = EAttribute('containment', EBoolean)
EReference.eOpposite_ = EReference('eOpposite', EReference)
EReference.resolveProxies = EAttribute('resolveProxies', EBoolean)
EEnum.eLiterals = EReference('eLiterals', EEnumLiteral, upper=-1,
containment=True)
EEnumLiteral.eEnum = EReference('eEnum', EEnum, eOpposite=EEnum.eLiterals)
EEnumLiteral.name = EAttribute('name', EString)
EEnumLiteral.value = EAttribute('value', EInteger)
EEnumLiteral.literal = EAttribute('literal', EString)
EOperation.eContainingClass = EReference('eContainingClass', EClass,
eOpposite=EClass.eOperations)
EOperation.eParameters = EReference('eParameters', EParameter, upper=-1,
containment=True)
EOperation.eExceptions = EReference('eExceptions', EClassifier, upper=-1)
EOperation.eTypeParameters = EReference('eTypeParameters', ETypeParameter,
upper=-1, containment=True)
EOperation.eGenericExceptions = EReference('eGenericExceptions', EGenericType,
upper=-1)
EParameter.eOperation = EReference('eOperation', EOperation,
eOpposite=EOperation.eParameters)
ETypeParameter.eBounds = EReference('eBounds', EGenericType,
upper=-1, containment=True)
ETypeParameter.eGenericType = EReference('eGenericType', EGenericType,
upper=-1)
EGenericType.eClassifier = EReference('eClassifier', EClassifier)
EGenericType.eTypeArguments = EReference('eTypeArguments', EGenericType,
containment=True, upper=-1)
EGenericType.eTypeParameter = EReference('eTypeParameter', ETypeParameter,
eOpposite=ETypeParameter.eGenericType)
EGenericType.eUpperBound = EReference('eUpperBound', EGenericType)
EGenericType.eLowerBound = EReference('eLowerBound', EGenericType)
register_classifier = Core.register_classifier
def register_metaclass(c, metaclass=MetaEClass, *args, **kwargs):
register_classifier(c, *args, **kwargs)
c.__class__ = metaclass
eClass = EPackage(name=name, nsURI=nsURI, nsPrefix=nsPrefix)
register_metaclass(EObject, promote=True, abstract=True,
metaclass=Metasubinstance)
register_metaclass(EModelElement, promote=True, abstract=True)
register_metaclass(ENamedElement, promote=True, abstract=True)
register_metaclass(EAnnotation, promote=True)
register_metaclass(EPackage, promote=True, metaclass=SpecialEPackage)
register_metaclass(EGenericType, promote=True)
register_metaclass(ETypeParameter, promote=True)
register_metaclass(ETypedElement, promote=True)
register_metaclass(EClassifier, promote=True, abstract=True)
register_metaclass(EDataType, promote=True)
register_metaclass(EEnum, promote=True)
register_metaclass(EEnumLiteral, promote=True)
register_metaclass(EParameter, promote=True)
register_metaclass(EOperation, promote=True)
register_metaclass(EStructuralFeature, promote=True, abstract=True)
register_metaclass(EAttribute, promote=True)
register_metaclass(EReference, promote=True)
register_metaclass(EClass, promote=True)
register_classifier(EString)
register_classifier(EBoolean)
register_classifier(EInteger)
register_classifier(EInt)
register_classifier(EBigInteger)
register_classifier(EIntegerObject)
register_classifier(EFloat)
register_classifier(EFloatObject)
register_classifier(EDouble)
register_classifier(EDoubleObject)
register_classifier(EStringToStringMapEntry)
register_classifier(EFeatureMapEntry)
register_classifier(EDiagnosticChain)
register_classifier(ENativeType)
register_classifier(EJavaObject)
register_classifier(EDate)
register_classifier(EBigDecimal)
register_classifier(EBooleanObject)
register_classifier(ELongObject)
register_classifier(ELong)
register_classifier(EByte)
register_classifier(EByteObject)
register_classifier(EByteArray)
register_classifier(EChar)
register_classifier(ECharacterObject)
register_classifier(EShort)
register_classifier(EShortObject)
register_classifier(EJavaClass)
eContents = eClass.eContents
__all__ = ['EObject', 'EModelElement', 'ENamedElement', 'EAnnotation',
'EPackage', 'EGenericType', 'ETypeParameter', 'ETypedElement',
'EClassifier', 'EDataType', 'EEnum', 'EEnumLiteral', 'EParameter',
'EOperation', 'EClass', 'EStructuralFeature', 'EAttribute',
'EReference', 'EString', 'EBoolean', 'EInteger',
'EStringToStringMapEntry', 'EDiagnosticChain', 'ENativeType',
'EJavaObject', 'abstract', 'MetaEClass', 'EList', 'ECollection',
'EOrderedSet', 'ESet', 'EcoreUtils', 'BadValueError', 'EDouble',
'EDoubleObject', 'EBigInteger', 'EInt', 'EIntegerObject', 'EFloat',
'EFloatObject', 'ELong', 'EProxy', 'EBag', 'EFeatureMapEntry',
'EDate', 'EBigDecimal', 'EBooleanObject', 'ELongObject', 'EByte',
'EByteObject', 'EByteArray', 'EChar', 'ECharacterObject',
'EShort', 'EShortObject', 'EJavaClass', 'EMetaclass',
'EDerivedCollection']
| |
# -*- coding: utf-8 -*-
"""
Tests of the neo.core.irregularlysampledsignal.IrregularySampledSignal class
"""
import unittest
import os
import pickle
import warnings
from copy import deepcopy
import numpy as np
import quantities as pq
from numpy.testing import assert_array_equal
from neo.core.dataobject import ArrayDict
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.irregularlysampledsignal import IrregularlySampledSignal
from neo.core import Segment, ChannelIndex
from neo.core.baseneo import MergeError
from neo.test.tools import (assert_arrays_almost_equal, assert_arrays_equal,
assert_neo_object_is_compliant, assert_same_sub_schema,
assert_same_attributes, assert_same_annotations,
assert_same_array_annotations)
from neo.test.generate_datasets import (get_fake_value, get_fake_values, fake_neo,
TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = {
str(x): TEST_ANNOTATIONS[x] for x in range(len(TEST_ANNOTATIONS))}
def test__get_fake_values(self):
self.annotations['seed'] = 0
times = get_fake_value('times', pq.Quantity, seed=0, dim=1)
signal = get_fake_value('signal', pq.Quantity, seed=1, dim=2)
name = get_fake_value('name', str, seed=2, obj=IrregularlySampledSignal)
description = get_fake_value('description', str, seed=3, obj='IrregularlySampledSignal')
file_origin = get_fake_value('file_origin', str)
arr_ann = get_fake_value('array_annotations', dict, seed=5,
obj=IrregularlySampledSignal, n=1)
attrs1 = {'name': name, 'description': description, 'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
attrs2['array_annotations'] = arr_ann
res11 = get_fake_values(IrregularlySampledSignal, annotate=False, seed=0)
res12 = get_fake_values('IrregularlySampledSignal', annotate=False, seed=0)
res21 = get_fake_values(IrregularlySampledSignal, annotate=True, seed=0)
res22 = get_fake_values('IrregularlySampledSignal', annotate=True, seed=0)
assert_array_equal(res11.pop('times'), times)
assert_array_equal(res12.pop('times'), times)
assert_array_equal(res21.pop('times'), times)
assert_array_equal(res22.pop('times'), times)
assert_array_equal(res11.pop('signal'), signal)
assert_array_equal(res12.pop('signal'), signal)
assert_array_equal(res21.pop('signal'), signal)
assert_array_equal(res22.pop('signal'), signal)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
# Array annotations need to be compared separately
# because numpy arrays define equality differently
arr_ann_res21 = res21.pop('array_annotations')
arr_ann_attrs2 = attrs2.pop('array_annotations')
self.assertEqual(res21, attrs2)
assert_arrays_equal(arr_ann_res21['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res21['number'], arr_ann_attrs2['number'])
arr_ann_res22 = res22.pop('array_annotations')
self.assertEqual(res22, attrs2)
assert_arrays_equal(arr_ann_res22['valid'], arr_ann_attrs2['valid'])
assert_arrays_equal(arr_ann_res22['number'], arr_ann_attrs2['number'])
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = IrregularlySampledSignal
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'IrregularlySampledSignal'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, IrregularlySampledSignal))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
class TestIrregularlySampledSignalConstruction(unittest.TestCase):
def test_IrregularlySampledSignal_creation_times_units_signal_units(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.ms, signal=[20., 40., 60.] * pq.mV,
name='test', description='tester', file_origin='test.file',
test1=1, array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_arg(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7], signal=[20., 40., 60.], units=pq.V,
time_units=pq.s, name='test', description='tester',
file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1.1, 1.5, 1.7] * pq.s)
assert_array_equal(np.asarray(sig).flatten(), np.array([20., 40., 60.]))
self.assertEqual(sig.units, pq.V)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_creation_units_rescale(self):
params = {'test2': 'y1', 'test3': True}
arr_ann = {'anno1': [23], 'anno2': ['A']}
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
units=pq.mV, time_units=pq.ms, name='test',
description='tester', file_origin='test.file', test1=1,
array_annotations=arr_ann, **params)
sig.annotate(test1=1.1, test0=[1, 2])
assert_neo_object_is_compliant(sig)
assert_array_equal(sig.times, [1100, 1500, 1700] * pq.ms)
assert_array_equal(np.asarray(sig).flatten(), np.array([2000., 4000., 6000.]))
self.assertEqual(sig.units, pq.mV)
self.assertEqual(sig.name, 'test')
self.assertEqual(sig.description, 'tester')
self.assertEqual(sig.file_origin, 'test.file')
self.assertEqual(sig.annotations['test0'], [1, 2])
self.assertEqual(sig.annotations['test1'], 1.1)
self.assertEqual(sig.annotations['test2'], 'y1')
self.assertTrue(sig.annotations['test3'])
assert_arrays_equal(sig.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(sig.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(sig.array_annotations, ArrayDict)
def test_IrregularlySampledSignal_different_lens_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60., 70.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_signal_units_ValueError(self):
times = [1.1, 1.5, 1.7] * pq.ms
signal = [20., 40., 60.]
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
def test_IrregularlySampledSignal_no_time_units_ValueError(self):
times = [1.1, 1.5, 1.7]
signal = [20., 40., 60.] * pq.mV
self.assertRaises(ValueError, IrregularlySampledSignal, times, signal)
class TestIrregularlySampledSignalProperties(unittest.TestCase):
def setUp(self):
self.times = [np.arange(10.0) * pq.s, np.arange(-100.0, 100.0, 10.0) * pq.ms,
np.arange(100) * pq.ns]
self.data = [np.arange(10.0) * pq.nA, np.arange(-100.0, 100.0, 10.0) * pq.mV,
np.random.uniform(size=100) * pq.uV]
self.signals = [IrregularlySampledSignal(t, signal=D, testattr='test') for D, t in
zip(self.data, self.times)]
def test__compliant(self):
for signal in self.signals:
assert_neo_object_is_compliant(signal)
def test__t_start_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_start, times[0], delta=1e-15)
def test__t_stop_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.t_stop, times[-1], delta=1e-15)
def test__duration_getter(self):
for signal, times in zip(self.signals, self.times):
self.assertAlmostEqual(signal.duration, times[-1] - times[0], delta=1e-15)
def test__sampling_intervals_getter(self):
for signal, times in zip(self.signals, self.times):
assert_arrays_almost_equal(signal.sampling_intervals, np.diff(times), threshold=1e-15)
def test_IrregularlySampledSignal_repr(self):
sig = IrregularlySampledSignal([1.1, 1.5, 1.7] * pq.s, signal=[2., 4., 6.] * pq.V,
name='test', description='tester', file_origin='test.file',
test1=1)
assert_neo_object_is_compliant(sig)
if np.__version__.split(".")[:2] > ['1', '13']:
# see https://github.com/numpy/numpy/blob/master/doc/release/1.14.0-notes.rst#many
# -changes-to-array-printing-disableable-with-the-new-legacy-printing-mode
targ = (
'<IrregularlySampledSignal(array([[2.],\n [4.],\n [6.]]) * V '
'' + 'at times [1.1 1.5 1.7] s)>')
else:
targ = (
'<IrregularlySampledSignal(array([[ 2.],\n [ 4.],\n [ 6.]]) '
'* V ' + 'at times [ 1.1 1.5 1.7] s)>')
res = repr(sig)
self.assertEqual(targ, res)
class TestIrregularlySampledSignalArrayMethods(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
self.signal1.segment = Segment()
self.signal1.channel_index = ChannelIndex([0])
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__slice_should_return_IrregularlySampledSignal(self):
result = self.signal1[3:8]
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.size, 5)
self.assertEqual(result.t_start, self.time1quant[3])
self.assertEqual(result.t_stop, self.time1quant[7])
assert_array_equal(self.time1quant[3:8], result.times)
assert_array_equal(self.data1[3:8].reshape(-1, 1), result.magnitude)
# Test other attributes were copied over (in this case, defaults)
self.assertEqual(result.file_origin, self.signal1.file_origin)
self.assertEqual(result.name, self.signal1.name)
self.assertEqual(result.description, self.signal1.description)
self.assertEqual(result.annotations, self.signal1.annotations)
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__getitem_should_return_single_quantity(self):
self.assertEqual(self.signal1[0], 0 * pq.mV)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test__getitem_out_of_bounds_IndexError(self):
self.assertRaises(IndexError, self.signal1.__getitem__, 10)
def test_comparison_operators(self):
assert_array_equal(self.signal1 >= 5 * pq.mV, np.array(
[[False, False, False, False, False, True, True, True, True, True]]).T)
assert_array_equal(self.signal1 == 5 * pq.mV, np.array(
[[False, False, False, False, False, True, False, False, False, False]]).T)
assert_array_equal(self.signal1 == self.signal1, np.array(
[[True, True, True, True, True, True, True, True, True, True]]).T)
def test__comparison_as_indexing_single_trace(self):
self.assertEqual(self.signal1[self.signal1 == 5], [5 * pq.mV])
def test__comparison_as_indexing_multi_trace(self):
signal = IrregularlySampledSignal(self.time1quant, np.arange(20).reshape((-1, 2)) * pq.V)
assert_array_equal(signal[signal < 10],
np.array([[0, 2, 4, 6, 8], [1, 3, 5, 7, 9]]).T * pq.V)
def test__indexing_keeps_order_across_channels(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting one entry per trace
mask[[0, 1, 0, 3, 0, 2, 4, 3, 1, 4], range(10)] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 33, 4, 25, 46, 37, 18, 49]]) * pq.V)
def test__indexing_keeps_order_across_time(self):
# AnalogSignals with 10 traces each having 5 samples (eg. data[0] = [0,10,20,30,40])
data = np.array([range(10), range(10, 20), range(20, 30), range(30, 40), range(40, 50)])
mask = np.full((5, 10), fill_value=False, dtype=bool)
# selecting two entries per trace
temporal_ids = [0, 1, 0, 3, 1, 2, 4, 2, 1, 4] + [4, 3, 2, 1, 0, 1, 2, 3, 2, 1]
mask[temporal_ids, list(range(10)) + list(range(10))] = True
signal = IrregularlySampledSignal(np.arange(5) * pq.s, np.array(data) * pq.V)
assert_array_equal(signal[mask], np.array([[0, 11, 2, 13, 4, 15, 26, 27, 18, 19],
[40, 31, 22, 33, 14, 25, 46, 37, 28,
49]]) * pq.V)
def test__comparison_with_inconsistent_units_should_raise_Exception(self):
self.assertRaises(ValueError, self.signal1.__gt__, 5 * pq.nA)
def test_simple_statistics(self):
targmean = self.signal1[:-1] * np.diff(self.time1quant).reshape(-1, 1)
targmean = targmean.sum() / (self.time1quant[-1] - self.time1quant[0])
self.assertEqual(self.signal1.max(), 9 * pq.mV)
self.assertEqual(self.signal1.min(), 0 * pq.mV)
self.assertEqual(self.signal1.mean(), targmean)
def test_mean_interpolation_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.mean, True)
def test_resample_NotImplementedError(self):
self.assertRaises(NotImplementedError, self.signal1.resample, True)
def test__rescale_same(self):
result = self.signal1.copy()
result = result.rescale(pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
assert_same_sub_schema(result, self.signal1)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new(self):
result = self.signal1.copy()
result = result.rescale(pq.uV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(result.units, 1 * pq.uV)
assert_arrays_almost_equal(np.array(result), self.data1.reshape(-1, 1) * 1000., 1e-10)
assert_array_equal(result.times, self.time1quant)
self.assertIsInstance(result.channel_index, ChannelIndex)
self.assertIsInstance(result.segment, Segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
self.assertIs(result.segment, self.signal1.segment)
def test__rescale_new_incompatible_ValueError(self):
self.assertRaises(ValueError, self.signal1.rescale, pq.nA)
def test_time_slice(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_deepcopy_annotations(self):
params1 = {'test0': 'y1', 'test1': ['deeptest'], 'test2': True}
self.signal1.annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
params2 = {'test0': 'y2', 'test2': False}
self.signal1.annotate(**params2)
self.signal1.annotations['test1'][0] = 'shallowtest'
self.assertNotEqual(self.signal1.annotations['test0'], result.annotations['test0'])
self.assertNotEqual(self.signal1.annotations['test1'], result.annotations['test1'])
self.assertNotEqual(self.signal1.annotations['test2'], result.annotations['test2'])
# Change annotations of result
params3 = {'test0': 'y3'}
result.annotate(**params3)
result.annotations['test1'][0] = 'shallowtest2'
self.assertNotEqual(self.signal1.annotations['test0'], result.annotations['test0'])
self.assertNotEqual(self.signal1.annotations['test1'], result.annotations['test1'])
self.assertNotEqual(self.signal1.annotations['test2'], result.annotations['test2'])
def test__time_slice_deepcopy_array_annotations(self):
length = self.signal1.shape[-1]
params1 = {'test0': ['y{}'.format(i) for i in range(length)],
'test1': ['deeptest' for i in range(length)],
'test2': [(-1)**i > 0 for i in range(length)]}
self.signal1.array_annotate(**params1)
result = self.signal1.time_slice(None, None)
# Change annotations of original
params2 = {'test0': ['x{}'.format(i) for i in range(length)],
'test2': [(-1) ** (i + 1) > 0 for i in range(length)]}
self.signal1.array_annotate(**params2)
self.signal1.array_annotations['test1'][0] = 'shallowtest'
self.assertFalse(all(self.signal1.array_annotations['test0']
== result.array_annotations['test0']))
self.assertFalse(all(self.signal1.array_annotations['test1']
== result.array_annotations['test1']))
self.assertFalse(all(self.signal1.array_annotations['test2']
== result.array_annotations['test2']))
# Change annotations of result
params3 = {'test0': ['z{}'.format(i) for i in range(1, result.shape[-1]+1)]}
result.array_annotate(**params3)
result.array_annotations['test1'][0] = 'shallow2'
self.assertFalse(all(self.signal1.array_annotations['test0']
== result.array_annotations['test0']))
self.assertFalse(all(self.signal1.array_annotations['test1']
== result.array_annotations['test1']))
self.assertFalse(all(self.signal1.array_annotations['test2']
== result.array_annotations['test2']))
def test__time_slice_deepcopy_data(self):
result = self.signal1.time_slice(None, None)
# Change values of original array
self.signal1[2] = 7.3*self.signal1.units
self.assertFalse(all(self.signal1 == result))
# Change values of sliced array
result[3] = 9.5*result.units
self.assertFalse(all(self.signal1 == result))
def test_time_slice_out_of_boundries(self):
targdataquant = self.data1quant
targtimequant = self.time1quant
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 0
t_stop = 2500000
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_empty(self):
targdataquant = [] * pq.mV
targtimequant = [] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
result = targ_signal.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
self.assertEqual(result.array_annotations, {})
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_stop(self):
targdataquant = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0], [9.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:10] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = None
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_start(self):
targdataquant = [[0.0], [1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[0:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = None
t_stop = 250
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_none_both(self):
targdataquant = [[0.0], [1.0], [2.0], [3.0], [4.0], [5.0], [6.0], [7.0], [8.0],
[9.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[0:10] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = None
t_stop = None
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test_time_slice_differnt_units(self):
targdataquant = [[1.0], [2.0], [3.0]] * pq.mV
targtime = np.logspace(1, 5, 10)
targtimequant = targtime[1:4] * pq.ms
targ_signal = IrregularlySampledSignal(targtimequant, signal=targdataquant, name='spam',
description='eggs', file_origin='testfile.txt',
arg1='test')
t_start = 15
t_stop = 250
t_start = 0.015 * pq.s
t_stop = .250 * pq.s
result = self.signal1.time_slice(t_start, t_stop)
assert_array_equal(result, targ_signal)
assert_array_equal(result.times, targtimequant)
self.assertEqual(result.units, 1 * pq.mV)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
def test__time_slice_should_set_parents_to_None(self):
# When timeslicing, a deep copy is made,
# thus the reference to parent objects should be destroyed
result = self.signal1.time_slice(1 * pq.ms, 3 * pq.ms)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__deepcopy_should_set_parents_objects_to_None(self):
# Deepcopy should destroy references to parents
result = deepcopy(self.signal1)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__time_shift_same_attributes(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_attributes(result, self.signal1, exclude=['times', 't_start', 't_stop'])
def test__time_shift_same_annotations(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_annotations(result, self.signal1)
def test__time_shift_same_array_annotations(self):
result = self.signal1.time_shift(1 * pq.ms)
assert_same_array_annotations(result, self.signal1)
def test__time_shift_should_set_parents_to_None(self):
# When time-shifting, a deep copy is made,
# thus the reference to parent objects should be destroyed
result = self.signal1.time_shift(1 * pq.ms)
self.assertEqual(result.segment, None)
self.assertEqual(result.channel_index, None)
def test__time_shift_by_zero(self):
shifted = self.signal1.time_shift(0 * pq.ms)
assert_arrays_equal(shifted.times, self.signal1.times)
def test__time_shift_same_units(self):
shifted = self.signal1.time_shift(10 * pq.ms)
assert_arrays_equal(shifted.times, self.signal1.times + 10 * pq.ms)
def test__time_shift_different_units(self):
shifted = self.signal1.time_shift(1 * pq.s)
assert_arrays_equal(shifted.times, self.signal1.times + 1000 * pq.ms)
def test_as_array(self):
sig_as_arr = self.signal1.as_array()
self.assertIsInstance(sig_as_arr, np.ndarray)
assert_array_equal(self.data1, sig_as_arr.flat)
def test_as_quantity(self):
sig_as_q = self.signal1.as_quantity()
self.assertIsInstance(sig_as_q, pq.Quantity)
assert_array_equal(self.data1, sig_as_q.magnitude.flat)
def test__copy_should_preserve_parent_objects(self):
result = self.signal1.copy()
self.assertIs(result.segment, self.signal1.segment)
self.assertIs(result.channel_index, self.signal1.channel_index)
class TestIrregularlySampledSignalCombination(unittest.TestCase):
def setUp(self):
self.data1 = np.arange(10.0)
self.data1quant = self.data1 * pq.mV
self.time1 = np.logspace(1, 5, 10)
self.time1quant = self.time1 * pq.ms
self.arr_ann = {'anno1': [23], 'anno2': ['A']}
self.signal1 = IrregularlySampledSignal(self.time1quant, signal=self.data1quant,
name='spam', description='eggs',
file_origin='testfile.txt', arg1='test',
array_annotations=self.arr_ann)
def test__compliant(self):
assert_neo_object_is_compliant(self.signal1)
self.assertEqual(self.signal1.name, 'spam')
self.assertEqual(self.signal1.description, 'eggs')
self.assertEqual(self.signal1.file_origin, 'testfile.txt')
self.assertEqual(self.signal1.annotations, {'arg1': 'test'})
assert_arrays_equal(self.signal1.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(self.signal1.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(self.signal1.array_annotations, ArrayDict)
def test__add_const_quantity_should_preserve_data_complement(self):
result = self.signal1 + 0.065 * pq.V
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) + 65)
assert_array_equal(result.times, self.time1quant)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 74 * pq.mV)
def test__add_two_consistent_signals_should_preserve_data_complement(self):
data2 = np.arange(10.0, 20.0)
data2quant = data2 * pq.mV
signal2 = IrregularlySampledSignal(self.time1quant, signal=data2quant)
assert_neo_object_is_compliant(signal2)
result = self.signal1 + signal2
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
targ = IrregularlySampledSignal(self.time1quant, signal=np.arange(10.0, 30.0, 2.0),
units="mV", name='spam', description='eggs',
file_origin='testfile.txt', arg1='test')
assert_neo_object_is_compliant(targ)
assert_array_equal(result, targ)
assert_array_equal(self.time1quant, targ.times)
assert_array_equal(result.times, targ.times)
assert_same_sub_schema(result, targ)
def test__add_signals_with_inconsistent_times_AssertionError(self):
signal2 = IrregularlySampledSignal(self.time1quant * 2., signal=np.arange(10.0),
units="mV")
assert_neo_object_is_compliant(signal2)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__add_signals_with_inconsistent_dimension_ValueError(self):
signal2 = np.arange(20).reshape(2, 10)
self.assertRaises(ValueError, self.signal1.__add__, signal2)
def test__subtract_const_should_preserve_data_complement(self):
result = self.signal1 - 65 * pq.mV
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], -56 * pq.mV)
assert_array_equal(result.magnitude, (self.data1 - 65).reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
def test__subtract_from_const_should_return_signal(self):
result = 10 * pq.mV - self.signal1
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 1 * pq.mV)
assert_array_equal(result.magnitude, (10 - self.data1).reshape(-1, 1))
assert_array_equal(result.times, self.time1quant)
def test__mult_signal_by_const_float_should_preserve_data_complement(self):
result = self.signal1 * 2.
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) * 2)
assert_array_equal(result.times, self.time1quant)
def test__mult_signal_by_const_array_should_preserve_data_complement(self):
result = self.signal1 * np.array(2.)
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) * 2)
assert_array_equal(result.times, self.time1quant)
def test__divide_signal_by_const_should_preserve_data_complement(self):
result = self.signal1 / 0.5
self.assertIsInstance(result, IrregularlySampledSignal)
assert_neo_object_is_compliant(result)
self.assertEqual(result.name, 'spam')
self.assertEqual(result.description, 'eggs')
self.assertEqual(result.file_origin, 'testfile.txt')
self.assertEqual(result.annotations, {'arg1': 'test'})
assert_arrays_equal(result.array_annotations['anno1'], np.array([23]))
assert_arrays_equal(result.array_annotations['anno2'], np.array(['A']))
self.assertIsInstance(result.array_annotations, ArrayDict)
self.assertEqual(self.signal1[9], 9 * pq.mV)
self.assertEqual(result[9], 18 * pq.mV)
assert_array_equal(result.magnitude, self.data1.reshape(-1, 1) / 0.5)
assert_array_equal(result.times, self.time1quant)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
res = pretty(self.signal1)
signal = self.signal1
targ = (("IrregularlySampledSignal with %d channels of length %d; units %s; datatype %s \n"
"" % (signal.shape[1], signal.shape[0], signal.units.dimensionality.unicode,
signal.dtype))
+ ("name: '{}'\ndescription: '{}'\n".format(signal.name, signal.description))
+ ("annotations: %s\n" % str(signal.annotations))
+ ("sample times: {}".format(signal.times[:10])))
self.assertEqual(res, targ)
def test__merge(self):
data1 = np.arange(1000.0, 1066.0).reshape((11, 6)) * pq.uV
data2 = np.arange(2.0, 2.033, 0.001).reshape((11, 3)) * pq.mV
times1 = np.arange(11.0) * pq.ms
times2 = np.arange(1.0, 12.0) * pq.ms
arr_ann1 = {'anno1': np.arange(6), 'anno2': ['a', 'b', 'c', 'd', 'e', 'f']}
arr_ann2 = {'anno1': np.arange(100, 103), 'anno3': []}
signal1 = IrregularlySampledSignal(times1, data1, name='signal1',
description='test signal', file_origin='testfile.txt',
array_annotations=arr_ann1)
signal2 = IrregularlySampledSignal(times1, data2, name='signal2',
description='test signal', file_origin='testfile.txt',
array_annotations=arr_ann2)
signal3 = IrregularlySampledSignal(times2, data2, name='signal3',
description='test signal', file_origin='testfile.txt')
with warnings.catch_warnings(record=True) as w:
merged12 = signal1.merge(signal2)
self.assertTrue(len(w) == 1)
self.assertEqual(w[0].category, UserWarning)
self.assertSequenceEqual(str(w[0].message), "The following array annotations were "
"omitted, because they were only present"
" in one of the merged objects: "
"['anno2'] from the one that was merged "
"into and ['anno3'] from the one that "
"was merged into the other")
target_data12 = np.hstack([data1, data2.rescale(pq.uV)])
assert_neo_object_is_compliant(signal1)
assert_neo_object_is_compliant(signal2)
assert_neo_object_is_compliant(merged12)
self.assertAlmostEqual(merged12[5, 0], 1030.0 * pq.uV, 9)
self.assertAlmostEqual(merged12[5, 6], 2015.0 * pq.uV, 9)
self.assertEqual(merged12.name, 'merge(signal1, signal2)')
self.assertEqual(merged12.file_origin, 'testfile.txt')
assert_arrays_equal(merged12.array_annotations['anno1'],
np.array([0, 1, 2, 3, 4, 5, 100, 101, 102]))
self.assertIsInstance(merged12.array_annotations, ArrayDict)
assert_arrays_equal(merged12.magnitude, target_data12)
self.assertRaises(MergeError, signal1.merge, signal3)
class TestAnalogSignalFunctions(unittest.TestCase):
def test__pickle(self):
signal1 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.s, np.arange(10.0),
units="mV")
fobj = open('./pickle', 'wb')
pickle.dump(signal1, fobj)
fobj.close()
fobj = open('./pickle', 'rb')
try:
signal2 = pickle.load(fobj)
except ValueError:
signal2 = None
assert_array_equal(signal1, signal2)
fobj.close()
os.remove('./pickle')
class TestIrregularlySampledSignalEquality(unittest.TestCase):
def test__signals_with_different_times_should_be_not_equal(self):
signal1 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.s, np.arange(10.0),
units="mV")
signal2 = IrregularlySampledSignal(np.arange(10.0) / 100 * pq.ms, np.arange(10.0),
units="mV")
self.assertNotEqual(signal1, signal2)
if __name__ == "__main__":
unittest.main()
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AppDynamics Extension
Downloads, installs and configures the AppDynamics agent for PHP
"""
import os
import os.path
import logging
from extension_helpers import PHPExtensionHelper
from subprocess import call
import re
_log = logging.getLogger('appdynamics')
class AppDynamicsInstaller(PHPExtensionHelper):
_detected = None # Boolean to check if AppDynamics service is detected
_FILTER = "app[-]?dynamics"
_appdynamics_credentials = None # JSON which contains all appdynamics credentials
_account_access_key = None # AppDynamics Controller Account Access Key
_account_name = None # AppDynamics Controller Account Name
_host_name = None # AppDynamics Controller Host Address
_port = None # AppDynamics Controller Port
_ssl_enabled = None # AppDynamics Controller SSL Enabled
# Specify the Application details
_app_name = None # AppDynamics App name
_tier_name = None # AppDynamics Tier name
_node_name = None # AppDynamics Node name
def __init__(self, ctx):
PHPExtensionHelper.__init__(self, ctx)
def _defaults(self):
"""Returns a set of default environment variables.
Return a dictionary of default environment variables. These
are merged with the build pack context when this the extension
object is created.
"""
return {
'APPDYNAMICS_HOST': 'packages.appdynamics.com',
'APPDYNAMICS_VERSION': '4.3.5.9',
'APPDYNAMICS_PACKAGE': 'appdynamics-php-agent-x64-linux-{APPDYNAMICS_VERSION}.tar.bz2',
'APPDYNAMICS_DOWNLOAD_URL': 'https://{APPDYNAMICS_HOST}/php/{APPDYNAMICS_VERSION}/{APPDYNAMICS_PACKAGE}'
}
def _should_compile(self):
"""
Determines if the extension should install it's payload.
This check is called during the `compile` method of the extension.
It should return true if the payload of the extension should
be installed (i.e. the `install` method is called).
"""
if AppDynamicsInstaller._detected is None:
VCAP_SERVICES_STRING = str(self._services)
if bool(re.search(AppDynamicsInstaller._FILTER, VCAP_SERVICES_STRING)):
print("AppDynamics service detected, beginning compilation")
_log.info("AppDynamics service detected")
AppDynamicsInstaller._detected = True
else:
AppDynamicsInstaller._detected = False
return AppDynamicsInstaller._detected
def _configure(self):
"""
Configures the extension.
Called when `should_configure` returns true.
"""
print("Running AppDynamics extension method _configure")
self._load_service_info()
def _load_service_info(self):
"""
Get Controller binding credentials and application details for AppDynamics service
"""
print("Setting AppDynamics credentials info...")
services = self._ctx.get('VCAP_SERVICES', {})
service_defs = services.get("appdynamics")
if service_defs is None:
# Search in user-provided service
print("No Marketplace AppDynamics services found")
print("Searching for AppDynamics service in user-provided services")
user_services = services.get("user-provided")
for user_service in user_services:
if bool(re.search(AppDynamicsInstaller._FILTER, user_service.get("name"))):
print("Using the first AppDynamics service present in user-provided services")
AppDynamicsInstaller._appdynamics_credentials = user_service.get("credentials")
self._load_service_credentials()
try:
# load the app details from user-provided service
print("Setting AppDynamics App, Tier and Node names from user-provided service")
AppDynamicsInstaller._app_name = AppDynamicsInstaller._appdynamics_credentials.get("application-name")
print("User-provided service application-name = " + AppDynamicsInstaller._app_name)
AppDynamicsInstaller._tier_name = AppDynamicsInstaller._appdynamics_credentials.get("tier-name")
print("User-provided service tier-name = " + AppDynamicsInstaller._tier_name)
AppDynamicsInstaller._node_name = AppDynamicsInstaller._appdynamics_credentials.get("node-name")
print("User-provided service node-name = " + AppDynamicsInstaller._node_name)
except Exception:
print("Exception occurred while setting AppDynamics App, Tier and Node names from user-provided service, using default naming")
self._load_app_details()
elif len(service_defs) > 1:
print("Multiple AppDynamics services found in VCAP_SERVICES, using credentials from first one.")
AppDynamicsInstaller._appdynamics_credentials = service_defs[0].get("credentials")
self._load_service_credentials()
self._load_app_details()
elif len(service_defs) == 1:
print("AppDynamics service found in VCAP_SERVICES")
AppDynamicsInstaller._appdynamics_credentials = service_defs[0].get("credentials")
self._load_service_credentials()
self._load_app_details()
def _load_service_credentials(self):
"""
Configure AppDynamics Controller Binding credentials
Called when Appdynamics Service is detected
"""
if (AppDynamicsInstaller._appdynamics_credentials is not None):
print("Setting AppDynamics Controller Binding Credentials")
try:
AppDynamicsInstaller._host_name = AppDynamicsInstaller._appdynamics_credentials.get("host-name")
AppDynamicsInstaller._port = AppDynamicsInstaller._appdynamics_credentials.get("port")
AppDynamicsInstaller._account_name = AppDynamicsInstaller._appdynamics_credentials.get("account-name")
AppDynamicsInstaller._account_access_key = AppDynamicsInstaller._appdynamics_credentials.get("account-access-key")
AppDynamicsInstaller._ssl_enabled = AppDynamicsInstaller._appdynamics_credentials.get("ssl-enabled")
except Exception:
print("Error populating AppDynamics controller binding credentials")
else:
print("AppDynamics credentials empty")
def _load_app_details(self):
"""
Configure AppDynamics application details
Called when AppDynamics Service is detected
"""
print("Setting default AppDynamics App, Tier and Node names")
try:
AppDynamicsInstaller._app_name = self._application.get("space_name") + ":" + self._application.get("application_name")
print("AppDymamics default application-name = " + AppDynamicsInstaller._app_name)
AppDynamicsInstaller._tier_name = self._application.get("application_name")
print("AppDynamics default tier-name = " + AppDynamicsInstaller._tier_name)
AppDynamicsInstaller._node_name = AppDynamicsInstaller._tier_name
print("AppDynamics default node-name = " + AppDynamicsInstaller._node_name)
except Exception:
print("Error populating default App, Tier and Node names")
def _compile(self, install):
"""
Install the payload of this extension.
Called when `_should_compile` returns true. This is responsible
for installing the payload of the extension.
The argument is the installer object that is passed into the
`compile` method.
"""
print("Downloading AppDynamics package...")
install.package('APPDYNAMICS')
print("Downloaded AppDynamics package")
def _service_environment(self):
"""
Sets environment variables for application container
Returns dict of environment variables x[var]=val
"""
print("Setting AppDynamics service environment variables")
env = {
'PHP_VERSION': "$(/home/vcap/app/php/bin/php-config --version | cut -d '.' -f 1,2)",
'PHP_EXT_DIR': "$(/home/vcap/app/php/bin/php-config --extension-dir | sed 's|/tmp/staged|/home/vcap|')",
'APPD_CONF_CONTROLLER_HOST': AppDynamicsInstaller._host_name,
'APPD_CONF_CONTROLLER_PORT': AppDynamicsInstaller._port,
'APPD_CONF_ACCOUNT_NAME': AppDynamicsInstaller._account_name,
'APPD_CONF_ACCESS_KEY': AppDynamicsInstaller._account_access_key,
'APPD_CONF_SSL_ENABLED': AppDynamicsInstaller._ssl_enabled,
'APPD_CONF_APP': AppDynamicsInstaller._app_name,
'APPD_CONF_TIER': AppDynamicsInstaller._tier_name,
'APPD_CONF_NODE': AppDynamicsInstaller._node_name
}
return env
# def _service_commands(self):
def _preprocess_commands(self):
"""
Commands that the build pack needs to run in the runtime container prior to the app starting.
Use these sparingly as they run before the app starts and count against the time that an application has
to start up successfully (i.e. if it takes too long app will fail to start).
Returns list of commands
"""
print("Running AppDynamics preprocess commands")
commands = [
[ 'echo "Installing AppDynamics package..."'],
[ 'PHP_EXT_DIR=$(find /home/vcap/app -name "no-debug-non-zts*" -type d)'],
[ 'chmod -R 755 /home/vcap'],
[ 'chmod -R 777 /home/vcap/app/appdynamics/appdynamics-php-agent/logs'],
[ 'if [ $APPD_CONF_SSL_ENABLED == \"true\" ] ; then export sslflag=-s ; '
'echo sslflag set to $sslflag ; fi; '],
[ '/home/vcap/app/appdynamics/appdynamics-php-agent/install.sh '
'$sslflag '
'-a "$APPD_CONF_ACCOUNT_NAME@$APPD_CONF_ACCESS_KEY" '
'-e "$PHP_EXT_DIR" '
'-p "/home/vcap/app/php/bin" '
'-i "/home/vcap/app/appdynamics/phpini" '
'-v "$PHP_VERSION" '
'--ignore-permissions '
'"$APPD_CONF_CONTROLLER_HOST" '
'"$APPD_CONF_CONTROLLER_PORT" '
'"$APPD_CONF_APP" '
'"$APPD_CONF_TIER" '
'"$APPD_CONF_NODE:$CF_INSTANCE_INDEX" '],
[ 'cat /home/vcap/app/appdynamics/phpini/appdynamics_agent.ini >> /home/vcap/app/php/etc/php.ini'],
[ 'echo "AppDynamics installation complete"']
]
return commands
AppDynamicsInstaller.register(__name__)
| |
import mock
import unittest
from bin.commands import snapshot
class TestSnapshotSnapshot(unittest.TestCase):
def setUp(self):
# store private methods so they can be restored after tests that mock them
self._stash_buffer = snapshot._stash_buffer
def tearDown(self):
snapshot._stash_buffer = self._stash_buffer
@mock.patch('bin.commands.utils.execute.check_output', return_value='status\noutput\n')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_noMessage(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# when
snapshot.snapshot()
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with('git stash push --include-untracked'.split())
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output', return_value='status\noutput\n')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_quiet(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# when
snapshot.snapshot(quiet=True)
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with('git stash push --include-untracked --quiet'.split())
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output', return_value='status\noutput\n')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_withMessage(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# when
message = 'the message'
snapshot.snapshot(message)
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with(['git', 'stash', 'push', '--include-untracked', '--message', message])
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output', return_value='status\noutput\n')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_withFiles(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# when
message = None
files = ['file1', 'file2']
snapshot.snapshot(message, files=files)
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with(['git', 'stash', 'push', '--include-untracked', '--'] + files)
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output', return_value='status\noutput\n')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_withFilesAndMessages(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# when
message = 'the message'
files = ['file1', 'file2']
snapshot.snapshot(message, files=files)
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with(['git', 'stash', 'push', '--include-untracked', '--message', message, '--'] + files)
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output', return_value='')
@mock.patch('bin.commands.utils.messages.info')
def test_snapshot_noChangesToSnapshot(self, mock_info, mock_checkoutput):
# when
quiet = False
snapshot.snapshot(quiet=quiet)
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_info.assert_called_once_with('No local changes to save. No snapshot created.', quiet)
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_replace(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# given
mock_checkoutput.side_effect = [
'status\noutput\n',
'stash@{0}: WIP on master: 8a3a15e edit readme\nstash@{1}: On master: edit readme\n'
]
# when
snapshot.snapshot('edit readme', replace=True)
# then
mock_checkoutput.assert_has_calls([
mock.call('git status --porcelain'),
mock.call('git stash list')
])
mock_stashbuffer.assert_called_once()
mock_call.assert_has_calls([
mock.call('git stash drop --quiet stash@{1}'.split()),
mock.call(['git', 'stash', 'push', '--include-untracked', '--message', 'edit readme'])
])
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_replace_nothingMatches(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# given
mock_checkoutput.side_effect = [
'status\noutput\n',
'stash@{0}: WIP on master: 8a3a15e edit readme\n'
]
# when
snapshot.snapshot('edit readme', replace=True)
# then
mock_checkoutput.assert_has_calls([
mock.call('git status --porcelain'),
mock.call('git stash list')
])
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with(['git', 'stash', 'push', '--include-untracked', '--message', 'edit readme'])
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
@mock.patch('bin.commands.utils.execute.check_output', return_value='status\noutput\n')
@mock.patch('bin.commands.snapshot._stash_buffer')
@mock.patch('bin.commands.utils.execute.call')
@mock.patch('bin.commands.utils.execute.swallow')
def test_snapshot_replace_noMessageIncluded(self, mock_swallow, mock_call, mock_stashbuffer, mock_checkoutput):
# when
snapshot.snapshot(replace=True)
# then
mock_checkoutput.assert_called_once_with('git status --porcelain')
mock_stashbuffer.assert_called_once()
mock_call.assert_called_once_with('git stash push --include-untracked'.split())
mock_swallow.assert_called_once_with('git stash apply --quiet --index'.split())
class TestSnapshotStashBuffer(unittest.TestCase):
@mock.patch('bin.commands.utils.execute.check_output', return_value='\n')
def test_snapshot_stashBuffer_noPreviousStashes(self, mock_checkoutput):
# when
snapshot._stash_buffer(False)
# then
mock_checkoutput.assert_called_once_with('git stash list')
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('time.strftime')
def test_snapshot_stashBuffer_previousStashesButNoConflict(self, mock_strftime, mock_checkoutput):
# given
mock_checkoutput.side_effect = ['stash0\n', 'time1']
mock_strftime.return_value = 'time0'
# when
snapshot._stash_buffer(True)
# then
mock_checkoutput.assert_has_calls([
mock.call('git stash list'),
mock.call(['git', 'show', '-s', '--format=%ci', 'stash@{0}'])
])
mock_strftime.assert_called_once_with('%Y-%m-%d %H:%M:%S %z')
@mock.patch('bin.commands.utils.execute.check_output')
@mock.patch('time.strftime')
@mock.patch('bin.commands.utils.messages.warn', return_value=True)
def test_snapshot_stashBuffer_conflictFound(self, mock_warn, mock_strftime, mock_checkoutput):
# given
mock_checkoutput.side_effect = ['stash0\n', 'time0']
mock_strftime.side_effect = ['time0', 'time0', 'time1']
quiet = True
# when
snapshot._stash_buffer(quiet)
# then
mock_checkoutput.assert_has_calls([
mock.call('git stash list'),
mock.call(['git', 'show', '-s', '--format=%ci', 'stash@{0}'])
])
mock_strftime.assert_called_with('%Y-%m-%d %H:%M:%S %z')
self.assertEqual(mock_strftime.call_count, 3)
mock_warn.assert_has_calls([
mock.call('snapshot created too close to last stash', quiet=quiet, ignore=False),
mock.call('snapshot created too close to last stash', quiet=quiet, ignore=True)
])
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
if sys.version < "3":
from itertools import imap as map
from pyspark import SparkContext
from pyspark.rdd import _prepare_for_python_RDD, ignore_unicode_prefix
from pyspark.serializers import PickleSerializer, AutoBatchedSerializer
from pyspark.sql import since
from pyspark.sql.types import StringType
from pyspark.sql.column import Column, _to_java_column, _to_seq
__all__ = [
'array',
'approxCountDistinct',
'coalesce',
'countDistinct',
'explode',
'monotonicallyIncreasingId',
'rand',
'randn',
'sparkPartitionId',
'struct',
'udf',
'when']
__all__ += ['lag', 'lead', 'ntile']
def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_functions = {
'lit': 'Creates a :class:`Column` of literal value.',
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'first': 'Aggregate function: returns the first value in a group.',
'last': 'Aggregate function: returns the last value in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': 'Computes the cosine inverse of the given value; the returned angle is in the range' +
'0.0 through pi.',
'asin': 'Computes the sine inverse of the given value; the returned angle is in the range' +
'-pi/2 through pi/2.',
'atan': 'Computes the tangent inverse of the given value.',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': 'Computes the cosine of the given value.',
'cosh': 'Computes the hyperbolic cosine of the given value.',
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': 'Computes the sine of the given value.',
'sinh': 'Computes the hyperbolic sine of the given value.',
'tan': 'Computes the tangent of the given value.',
'tanh': 'Computes the hyperbolic tangent of the given value.',
'toDegrees': 'Converts an angle measured in radians to an approximately equivalent angle ' +
'measured in degrees.',
'toRadians': 'Converts an angle measured in degrees to an approximately equivalent angle ' +
'measured in radians.',
'bitwiseNOT': 'Computes bitwise not.',
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': 'Returns the angle theta from the conversion of rectangular coordinates (x, y) to' +
'polar coordinates (r, theta).',
'hypot': 'Computes `sqrt(a^2^ + b^2^)` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.'
}
_window_functions = {
'rowNumber':
"""returns a sequential number starting at 1 within a window partition.
This is equivalent to the ROW_NUMBER function in SQL.""",
'denseRank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and denseRank is that denseRank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using denseRank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and denseRank is that denseRank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using denseRank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third.
This is equivalent to the RANK function in SQL.""",
'cumeDist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.
This is equivalent to the CUME_DIST function in SQL.""",
'percentRank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.
This is equivalent to the PERCENT_RANK function in SQL.""",
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.4)(_create_window_function(_name, _doc))
del _name, _doc
__all__ += _functions.keys()
__all__ += _functions_1_4.keys()
__all__ += _binary_mathfunctions.keys()
__all__ += _window_functions.keys()
__all__.sort()
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def approxCountDistinct(col, rsd=None):
"""Returns a new :class:`Column` for approximate distinct count of ``col``.
>>> df.agg(approxCountDistinct(df.age).alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approxCountDistinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approxCountDistinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = sqlContext.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+-------------+
|Coalesce(a,b)|
+-------------+
| null|
| 1|
| 2|
+-------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+---------------+
| a| b|Coalesce(a,0.0)|
+----+----+---------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+---------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = sqlContext.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(1.4)
def monotonicallyIncreasingId():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonicallyIncreasingId().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonicallyIncreasingId())
@since(1.4)
def rand(seed=None):
"""Generates a random column with i.i.d. samples from U[0.0, 1.0].
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@since(1.4)
def randn(seed=None):
"""Generates a column with i.i.d. samples from the standard normal distribution.
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.4)
def sparkPartitionId():
"""A column for partition ID of the Spark task.
Note that this is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(sparkPartitionId().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sparkPartitionId())
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
that are named or aliased.
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.4)
def lag(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), count, default))
@since(1.4)
def lead(col, count=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param count: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), count, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. Fow example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
class UserDefinedFunction(object):
"""
User defined function in Python
.. versionadded:: 1.3
"""
def __init__(self, func, returnType):
self.func = func
self.returnType = returnType
self._broadcast = None
self._judf = self._create_judf()
def _create_judf(self):
f = self.func # put it in closure `func`
func = lambda _, it: map(lambda x: f(*x), it)
ser = AutoBatchedSerializer(PickleSerializer())
command = (func, None, ser, ser)
sc = SparkContext._active_spark_context
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command, self)
ssql_ctx = sc._jvm.SQLContext(sc._jsc.sc())
jdt = ssql_ctx.parseDataType(self.returnType.json())
fname = f.__name__ if hasattr(f, '__name__') else f.__class__.__name__
judf = sc._jvm.UserDefinedPythonFunction(fname, bytearray(pickled_command), env, includes,
sc.pythonExec, sc.pythonVer, broadcast_vars,
sc._javaAccumulator, jdt)
return judf
def __del__(self):
if self._broadcast is not None:
self._broadcast.unpersist()
self._broadcast = None
def __call__(self, *cols):
sc = SparkContext._active_spark_context
jc = self._judf.apply(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def udf(f, returnType=StringType()):
"""Creates a :class:`Column` expression representing a user defined function (UDF).
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> df.select(slen(df.name).alias('slen')).collect()
[Row(slen=5), Row(slen=3)]
"""
return UserDefinedFunction(f, returnType)
def _test():
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['df'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| |
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import six
from nova import exception
from nova.i18n import _
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None, **kwargs):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
bdm_dict.update(kwargs)
do_not_default = do_not_default or set()
self._validate(bdm_dict)
if bdm_dict.get('device_name'):
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
# NOTE (ndipanov): Never default db fields
self.update({field: None for field in self._fields - do_not_default})
self.update(list(six.iteritems(bdm_dict)))
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in six.iteritems(bdm_dict))
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details=_("Some fields are invalid."))
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details=_("Some required fields are missing"))
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = {fld: val for fld, val in six.iteritems(legacy_bdm)
if fld in copy_over_fields}
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details=_("Unrecognized legacy format."))
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict, image_uuid_specified):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
destination_type = api_dict.get('destination_type')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details=_("Invalid source_type field."))
elif source_type == 'blank' and device_uuid:
raise exception.InvalidBDMFormat(
details=_("Invalid device UUID."))
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details=_("Missing device UUID."))
api_dict[source_type + '_id'] = device_uuid
if source_type == 'image' and destination_type == 'local':
boot_index = api_dict.get('boot_index', -1)
# if this bdm is generated from --image ,then
# source_type = image and destination_type = local is allowed
if not (image_uuid_specified and boot_index == 0):
raise exception.InvalidBDMFormat(
details=_("Mapping image to local is not supported."))
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = {field: self.get(field)
for field in copy_over_fields if field in self}
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def get_image_mapping(self):
drop_fields = (set(['connection_info', 'device_name']) |
self._db_only_fields)
mapping_dict = dict(self)
for fld in drop_fields:
mapping_dict.pop(fld, None)
return mapping_dict
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def snapshot_from_bdm(snapshot_id, template):
"""Create a basic volume snapshot BDM from a given template bdm."""
copy_from_template = ['disk_bus', 'device_type', 'boot_index']
snapshot_dict = {'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id}
for key in copy_from_template:
snapshot_dict[key] = template.get(key)
return BlockDeviceDict(snapshot_dict)
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None, no_root=False):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
# NOTE (ndipanov): We will not decide which device is root here - we assume
# that it will be supplied later. This is useful for having the root device
# as part of the image defined mappings that are already in the v2 format.
if no_root:
for bdm in new_bdms:
bdm['boot_index'] = -1
return new_bdms
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidBDMFormat(
details=_("Device name empty or too long."))
if ' ' in value:
raise exception.InvalidBDMFormat(
details=_("Device name contains spaces."))
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput:
# NOTE: We can remove this validation code after removing
# Nova v2.0 API code because v2.1 API validates this case
# already at its REST API layer.
raise exception.InvalidBDMFormat(
details=_("Invalid volume_size."))
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') != 'swap'):
return True
return False
def get_root_bdm(bdms):
try:
return next(bdm for bdm in bdms if bdm.get('boot_index', -1) == 0)
except StopIteration:
return None
def get_bdms_to_connect(bdms, exclude_root_mapping=False):
"""Will return non-root mappings, when exclude_root_mapping is true.
Otherwise all mappings will be returned.
"""
return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or
not exclude_root_mapping)
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s|h)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd or hd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
_nums = re.compile('\d+')
def get_device_letter(device_name):
letter = strip_prefix(device_name)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
return _nums.sub('', letter)
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
blanks = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
# ebs volume case
if bdm.destination_type == 'volume':
ebs_devices.append(bdm.device_name)
continue
if bdm.source_type == 'blank':
blanks.append(bdm)
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
ebs_devices.sort()
for nebs, ebs in enumerate(ebs_devices):
mappings['ebs%d' % nebs] = ebs
swap = [bdm for bdm in blanks if bdm.guest_format == 'swap']
if swap:
mappings['swap'] = swap.pop().device_name
ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap']
if ephemerals:
for num, eph in enumerate(ephemerals):
mappings['ephemeral%d' % num] = eph.device_name
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug("block_device_list %s", block_device_list)
return strip_dev(mount_device) in block_device_list
def get_bdm_ephemeral_disk_size(block_device_mappings):
return sum(bdm.get('volume_size', 0)
for bdm in block_device_mappings
if new_format_is_ephemeral(bdm))
def get_bdm_swap_list(block_device_mappings):
return [bdm for bdm in block_device_mappings
if new_format_is_swap(bdm)]
def get_bdm_local_disk_num(block_device_mappings):
return len([bdm for bdm in block_device_mappings
if bdm.get('destination_type') == 'local'])
| |
"""
@brief test log(time=2s)
@author Xavier Dupre
"""
import sys
import os
import unittest
from pyquickhelper.sphinxext.revealjs.directives import heading
from pyquickhelper.sphinxext.revealjs.directives import RevealjsDirective
from pyquickhelper.sphinxext.revealjs.directives import RvNoteDirective
from pyquickhelper.sphinxext.revealjs.directives import RvSmallDirective
from pyquickhelper.sphinxext.revealjs.directives import RvCodeDirective
from pyquickhelper.sphinxext.revealjs.directives import visit_revealjs
from pyquickhelper.sphinxext.revealjs import compat
from pyquickhelper.sphinxext.revealjs.directives import depart_revealjs
from pyquickhelper.sphinxext.revealjs.directives import visit_rv_code
from pyquickhelper.sphinxext.revealjs.directives import depart_rv_code
from pyquickhelper.sphinxext.revealjs.directives import visit_rv_small
from pyquickhelper.sphinxext.revealjs.directives import depart_rv_small
from pyquickhelper.sphinxext.revealjs.directives import visit_rv_note
from pyquickhelper.sphinxext.revealjs.directives import depart_rv_note
from pyquickhelper.sphinxext.revealjs.directives import setup
from pyquickhelper.sphinxext.revealjs import directives as d
class DummyConfig(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
def __getattr__(self, name):
if name in self.kwargs:
return self.kwargs.get(name)
return self
def nested_parse(self, content, content_offset, node):
pass
def warning(self, msg, line):
return dict(msg=msg, line=line)
class TestHeading(unittest.TestCase):
def _get_target(self):
return heading
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def test_it(self):
self.assertEqual("h1", self._call_fut("h1"))
self.assertEqual("h2", self._call_fut("h2"))
self.assertEqual("h3", self._call_fut("h3"))
self.assertEqual("h4", self._call_fut("h4"))
self.assertEqual("h5", self._call_fut("h5"))
self.assertEqual("h6", self._call_fut("h6"))
def test_value_error(self):
try:
self._call_fut("unknown")
raise AssertionError('Excpeption not raised')
except ValueError:
pass
class TestRevealjsDirective(unittest.TestCase):
def _get_target_class(self):
return RevealjsDirective
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def _get_dummy_config(self, **kwargs):
config = dict()
config.update(kwargs)
return DummyConfig(**config)
def _get_params(self, **kwargs):
params = dict(
name='dummyname',
arguments=['tell-k', 'test'],
options={},
content="test",
lineno=1,
content_offset=1,
block_text="",
state="",
state_machine="",
)
params.update(kwargs)
return params
def test_it(self):
directive = self._make_one(**self._get_params())
directive.state = self._get_dummy_config()
nodes = directive.run()
self.assertEqual(1, len(nodes))
self.assertEqual('tell-k test', nodes[0]['title'])
self.assertEqual(False, nodes[0]['noheading'])
self.assertEqual([], nodes[0]['classes'])
def test_class_option(self):
directive = self._make_one(**self._get_params(options={
"class": "add-class",
}))
directive.state = self._get_dummy_config()
nodes = directive.run()
self.assertEqual('add-class', nodes[0]['classes'])
def test_noheading_option(self):
directive = self._make_one(**self._get_params(options={
"noheading": None,
}))
directive.state = self._get_dummy_config()
nodes = directive.run()
self.assertEqual(True, nodes[0]['noheading'])
def test_other_options(self):
directive = self._make_one(**self._get_params(options={
"title-heading": "title-heading",
}))
directive.state = self._get_dummy_config()
nodes = directive.run()
self.assertEqual("title-heading", nodes[0]['title-heading'])
class TestRvSmallDirective(unittest.TestCase):
def _get_target_class(self):
return RvSmallDirective
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def _get_dummy_config(self, **kwargs):
config = dict()
config.update(kwargs)
return DummyConfig(**config)
def _get_params(self, **kwargs):
params = dict(
name='dummyname',
arguments='',
options={},
content="test",
lineno=1,
content_offset=1,
block_text="",
state="",
state_machine="",
)
params.update(kwargs)
return params
def test_it(self):
directive = self._make_one(**self._get_params())
directive.state = self._get_dummy_config()
nodes = directive.run()
self.assertEqual(1, len(nodes))
self.assertEqual([], nodes[0]['classes'])
def test_class_option(self):
directive = self._make_one(**self._get_params(options={
"class": "add-class",
}))
directive.state = self._get_dummy_config()
nodes = directive.run()
self.assertEqual('add-class', nodes[0]['classes'])
class TestRvNoteDirective(unittest.TestCase):
def _get_target_class(self):
return RvNoteDirective
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def _get_dummy_config(self, **kwargs):
config = dict()
config.update(kwargs)
return DummyConfig(**config)
def _get_params(self, **kwargs):
params = dict(
name='dummyname',
arguments='',
options={},
content="test",
lineno=1,
content_offset=1,
block_text="",
state="",
state_machine="",
)
params.update(kwargs)
return params
def test_it(self):
directive = self._make_one(**self._get_params())
directive.state = self._get_dummy_config()
nodes = directive.run()
assert 1 == len(nodes)
assert [] == nodes[0]['classes']
def test_class_option(self):
directive = self._make_one(**self._get_params(options={
"class": "add-class",
}))
directive.state = self._get_dummy_config()
nodes = directive.run()
assert 'add-class' == nodes[0]['classes']
class TestRvCodeDirective(unittest.TestCase):
def _get_target_class(self):
return RvCodeDirective
def _make_one(self, *args, **kwargs):
return self._get_target_class()(*args, **kwargs)
def _get_dummy_config(self, **kwargs):
config = dict()
config.update(kwargs)
return DummyConfig(**config)
def _get_params(self, **kwargs):
params = dict(
name='dummyname',
arguments='',
options={},
content="test",
lineno=1,
content_offset=1,
block_text="",
state="",
state_machine="",
)
params.update(kwargs)
return params
def test_it(self):
directive = self._make_one(**self._get_params())
directive.state = self._get_dummy_config()
nodes = directive.run()
assert 1 == len(nodes)
class TestVisitRevealjs(unittest.TestCase):
def _get_target(self):
return visit_revealjs
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_node(self, *args, **kwargs):
class DummyNode(object):
attrs = {
'id': "id",
'title': 'title',
'noheading': False,
'title-heading': 'h1',
'subtitle': 'subtitle',
'subtitle-heading': 'h2',
'data-markdown': None,
'data-transition': None,
'data-background': None,
'data-background-repeat': None,
'data-background-size': None,
'data-background-transition': None,
'data-state': None,
'data-separator': None,
'data-vertical': None,
}
def __init__(self, **kwargs):
self.attrs.update(kwargs)
def get(self, attr, default=None):
return self.attrs.get(attr, default)
@property
def rawsource(self):
return "rawsource"
return DummyNode(**kwargs)
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
fist_last = False
def __init__(self, body):
self.body = body
def starttag(self, node, tag, **kwargs):
ids = kwargs.pop('ids')
if ids:
kwargs.update({'id': " ".join(ids)})
attrs = ["{0}='{1}'".format(k, v) for k, v in kwargs.items()]
attrs.sort()
return "<{0} {1}>".format(tag, " ".join(attrs))
def set_first_last(self, node):
self.first_last = True
return DummySelf(DummyBody())
def test_it(self):
dummyself = self._get_dummy_self()
dummynode = self._get_dummy_node()
self._call_fut(dummyself, dummynode)
self.assertEqual([
compat.text("<section id='id'>"),
compat.text('<h1>title</h1>\n'),
compat.text('<h2>subtitle</h2>\n')
], dummyself.body.content)
def test_markdown(self):
dummyself = self._get_dummy_self()
dummynode = self._get_dummy_node(**{"data-markdown": "hoge"})
self._call_fut(dummyself, dummynode)
self.assertEqual(["<section data-markdown='hoge' id='id'>"],
dummyself.body.content)
dummyself = self._get_dummy_self()
dummynode = self._get_dummy_node(**{"data-markdown": ""})
self._call_fut(dummyself, dummynode)
self.assertEqual([
compat.text("<section data-markdown='' id='id'>"),
compat.text("<script type='text/template'>\n"),
compat.text('# title \n'),
compat.text('## subtitle \n'),
compat.text('rawsource'),
compat.text('</script>\n')
], dummyself.body.content)
class TestDepartRevealjs(unittest.TestCase):
def _get_target(self):
return depart_revealjs
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
return DummySelf(DummyBody())
def test_it(self):
dummyself = self._get_dummy_self()
self._call_fut(dummyself, None)
assert '</section>\n' == dummyself.body.content[0]
class TestVisitRvCode(unittest.TestCase):
def _get_target(self):
return visit_rv_code
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_node(self, *args, **kwargs):
class DummyNode(object):
@property
def rawsource(self):
return "rawsource"
return DummyNode()
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
def starttag(self, node, tag):
try:
return "<{0}>".format(tag)
except ValueError:
return "<%s>" % tag
return DummySelf(DummyBody())
def test_it(self):
dummynode = self._get_dummy_node()
dummyself = self._get_dummy_self()
self._call_fut(dummyself, dummynode)
assert '<pre>' == dummyself.body.content[0]
assert '<code data-trim contenteditable>' ==\
dummyself.body.content[1]
assert 'rawsource' == dummyself.body.content[2]
class TestDepartRvCode(unittest.TestCase):
def _get_target(self):
return depart_rv_code
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
return DummySelf(DummyBody())
def test_it(self):
dummyself = self._get_dummy_self()
self._call_fut(dummyself, None)
self.assertEqual('</code>', dummyself.body.content[0])
self.assertEqual('</pre>\n', dummyself.body.content[1])
class TestVisitRvSmall(unittest.TestCase):
def _get_target(self):
return visit_rv_small
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_node(self, *args, **kwargs):
class DummyNode(object):
@property
def rawsource(self):
return "rawsource"
return DummyNode()
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
self.first_last = False
def starttag(self, node, tag):
return "<{0}>".format(tag)
def set_first_last(self, node):
self.first_last = True
return DummySelf(DummyBody())
def test_it(self):
dummynode = self._get_dummy_node()
dummyself = self._get_dummy_self()
self._call_fut(dummyself, dummynode)
self.assertEqual('<small>', dummyself.body.content[0])
assert True is dummyself.first_last
class TestDepartRvSmall(unittest.TestCase):
def _get_target(self):
return depart_rv_small
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
return DummySelf(DummyBody())
def test_it(self):
dummyself = self._get_dummy_self()
self._call_fut(dummyself, None)
assert '</small>\n' == dummyself.body.content[0]
class TestVisitRvNote(unittest.TestCase):
def _get_target(self):
return visit_rv_note
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_node(self, *args, **kwargs):
class DummyNode(object):
@property
def rawsource(self):
return "rawsource"
return DummyNode()
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
self.first_last = False
def starttag(self, node, tag, **kwargs):
class_name = kwargs.pop('class')
return '<{0} class="{1}">'.format(tag, class_name)
def set_first_last(self, node):
self.first_last = True
return DummySelf(DummyBody())
def test_it(self):
dummynode = self._get_dummy_node()
dummyself = self._get_dummy_self()
self._call_fut(dummyself, dummynode)
self.assertEqual('<aside class="notes">', dummyself.body.content[0])
assert True is dummyself.first_last
class TestDepartRvNote(unittest.TestCase):
def _get_target(self):
return depart_rv_note
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def _get_dummy_self(self, *args, **kwargs):
class DummyBody(object):
content = []
def append(self, content):
self.content.append(content)
class DummySelf(object):
def __init__(self, body):
self.body = body
return DummySelf(DummyBody())
def test_it(self):
dummyself = self._get_dummy_self()
self._call_fut(dummyself, None)
self.assertEqual('</aside>\n', dummyself.body.content[0])
class TestSetup(unittest.TestCase):
def _get_target(self):
return setup
def _call_fut(self, *args, **kwargs):
return self._get_target()(*args, **kwargs)
def test_it(self):
class DummyApp(object):
nodes = []
directives = {}
def info(self, info):
self.info = info
def add_node(self, node, html):
self.nodes.append((node, html))
def add_directive(self, name, directive):
self.directives.update({name: directive})
dummy_app = DummyApp()
self._call_fut(dummy_app)
self.assertEqual(d.revealjs, dummy_app.nodes[0][0])
self.assertEqual((d.visit_revealjs, d.depart_revealjs),
dummy_app.nodes[0][1])
self.assertEqual(d.rv_code, dummy_app.nodes[1][0])
self.assertEqual((d.visit_rv_code, d.depart_rv_code),
dummy_app.nodes[1][1])
self.assertEqual(d.rv_note, dummy_app.nodes[2][0])
self.assertEqual((d.visit_rv_note, d.depart_rv_note),
dummy_app.nodes[2][1])
self.assertEqual(d.rv_small, dummy_app.nodes[3][0])
self.assertEqual((d.visit_rv_small, d.depart_rv_small),
dummy_app.nodes[3][1])
self.assertEqual(d.RevealjsDirective, dummy_app.directives['revealjs'])
self.assertEqual(d.RvCodeDirective, dummy_app.directives['rv_code'])
self.assertEqual(d.RvNoteDirective, dummy_app.directives['rv_note'])
self.assertEqual(d.RvSmallDirective, dummy_app.directives['rv_small'])
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
"""
Copyright (c) 2015-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import gmii_ep
module = 'eth_mac_1g_gmii'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("../rtl/axis_gmii_rx.v")
srcs.append("../rtl/axis_gmii_tx.v")
srcs.append("../rtl/eth_mac_1g.v")
srcs.append("../rtl/gmii_phy_if.v")
srcs.append("../rtl/oddr.v")
srcs.append("../rtl/ssio_sdr_in.v")
srcs.append("../rtl/ssio_sdr_out.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
TARGET = "SIM"
IODDR_STYLE = "IODDR2"
CLOCK_INPUT_STYLE = "BUFIO2"
ENABLE_PADDING = 1
MIN_FRAME_LENGTH = 64
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
gtx_clk = Signal(bool(0))
gtx_rst = Signal(bool(0))
tx_axis_tdata = Signal(intbv(0)[8:])
tx_axis_tvalid = Signal(bool(0))
tx_axis_tlast = Signal(bool(0))
tx_axis_tuser = Signal(bool(0))
gmii_rx_clk = Signal(bool(0))
gmii_rxd = Signal(intbv(0)[8:])
gmii_rx_dv = Signal(bool(0))
gmii_rx_er = Signal(bool(0))
mii_tx_clk = Signal(bool(0))
ifg_delay = Signal(intbv(0)[8:])
# Outputs
rx_clk = Signal(bool(0))
rx_rst = Signal(bool(0))
tx_clk = Signal(bool(0))
tx_rst = Signal(bool(0))
tx_axis_tready = Signal(bool(0))
rx_axis_tdata = Signal(intbv(0)[8:])
rx_axis_tvalid = Signal(bool(0))
rx_axis_tlast = Signal(bool(0))
rx_axis_tuser = Signal(bool(0))
gmii_tx_clk = Signal(bool(0))
gmii_txd = Signal(intbv(0)[8:])
gmii_tx_en = Signal(bool(0))
gmii_tx_er = Signal(bool(0))
tx_error_underflow = Signal(bool(0))
rx_error_bad_frame = Signal(bool(0))
rx_error_bad_fcs = Signal(bool(0))
speed = Signal(intbv(0)[2:])
# sources and sinks
axis_source_pause = Signal(bool(0))
mii_select = Signal(bool(0))
gmii_source = gmii_ep.GMIISource()
gmii_source_logic = gmii_source.create_logic(
gmii_rx_clk,
rst,
txd=gmii_rxd,
tx_en=gmii_rx_dv,
tx_er=gmii_rx_er,
mii_select=mii_select,
name='gmii_source'
)
gmii_sink = gmii_ep.GMIISink()
gmii_sink_logic = gmii_sink.create_logic(
gmii_tx_clk,
rst,
rxd=gmii_txd,
rx_dv=gmii_tx_en,
rx_er=gmii_tx_er,
mii_select=mii_select,
name='gmii_sink'
)
axis_source = axis_ep.AXIStreamSource()
axis_source_logic = axis_source.create_logic(
gmii_rx_clk, #tx_clk,
tx_rst,
tdata=tx_axis_tdata,
tvalid=tx_axis_tvalid,
tready=tx_axis_tready,
tlast=tx_axis_tlast,
tuser=tx_axis_tuser,
pause=axis_source_pause,
name='axis_source'
)
axis_sink = axis_ep.AXIStreamSink()
axis_sink_logic = axis_sink.create_logic(
gmii_rx_clk,
rx_rst,
tdata=rx_axis_tdata,
tvalid=rx_axis_tvalid,
tlast=rx_axis_tlast,
tuser=rx_axis_tuser,
name='axis_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
gtx_clk=gtx_clk,
gtx_rst=gtx_rst,
rx_clk=rx_clk,
rx_rst=rx_rst,
tx_clk=tx_clk,
tx_rst=tx_rst,
tx_axis_tdata=tx_axis_tdata,
tx_axis_tvalid=tx_axis_tvalid,
tx_axis_tready=tx_axis_tready,
tx_axis_tlast=tx_axis_tlast,
tx_axis_tuser=tx_axis_tuser,
rx_axis_tdata=rx_axis_tdata,
rx_axis_tvalid=rx_axis_tvalid,
rx_axis_tlast=rx_axis_tlast,
rx_axis_tuser=rx_axis_tuser,
gmii_rx_clk=gmii_rx_clk,
gmii_rxd=gmii_rxd,
gmii_rx_dv=gmii_rx_dv,
gmii_rx_er=gmii_rx_er,
gmii_tx_clk=gmii_tx_clk,
mii_tx_clk=mii_tx_clk,
gmii_txd=gmii_txd,
gmii_tx_en=gmii_tx_en,
gmii_tx_er=gmii_tx_er,
tx_error_underflow=tx_error_underflow,
rx_error_bad_frame=rx_error_bad_frame,
rx_error_bad_fcs=rx_error_bad_fcs,
speed=speed,
ifg_delay=ifg_delay
)
@always(delay(4))
def clkgen():
clk.next = not clk
gtx_clk.next = not clk
rx_clk_hp = Signal(int(4))
@instance
def rx_clk_gen():
while True:
yield delay(int(rx_clk_hp))
gmii_rx_clk.next = not gmii_rx_clk
mii_tx_clk.next = not gmii_rx_clk
rx_error_bad_frame_asserted = Signal(bool(0))
rx_error_bad_fcs_asserted = Signal(bool(0))
@always(clk.posedge)
def monitor():
if (rx_error_bad_frame):
rx_error_bad_frame_asserted.next = 1
if (rx_error_bad_fcs):
rx_error_bad_fcs_asserted.next = 1
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
gtx_rst.next = 1
yield clk.posedge
rst.next = 0
gtx_rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
ifg_delay.next = 12
# testbench stimulus
for rate, mii in [(4, 0), (20, 1), (200, 1)]:
rx_clk_hp.next = rate
mii_select.next = mii
yield delay(1000)
yield clk.posedge
print("test 1: test rx packet")
current_test.next = 1
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(32))
test_frame.update_fcs()
axis_frame = test_frame.build_axis_fcs()
gmii_source.send(b'\x55\x55\x55\x55\x55\x55\x55\xD5'+bytearray(axis_frame))
yield axis_sink.wait()
rx_frame = axis_sink.recv()
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis(rx_frame)
eth_frame.update_fcs()
assert eth_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: test tx packet")
current_test.next = 2
test_frame = eth_ep.EthFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.payload = bytearray(range(32))
test_frame.update_fcs()
axis_frame = test_frame.build_axis()
axis_source.send(axis_frame)
yield gmii_sink.wait()
rx_frame = gmii_sink.recv()
assert rx_frame.data[0:8] == bytearray(b'\x55\x55\x55\x55\x55\x55\x55\xD5')
eth_frame = eth_ep.EthFrame()
eth_frame.parse_axis_fcs(rx_frame.data[8:])
print(hex(eth_frame.eth_fcs))
print(hex(eth_frame.calc_fcs()))
assert len(eth_frame.payload.data) == 46
assert eth_frame.eth_fcs == eth_frame.calc_fcs()
assert eth_frame.eth_dest_mac == test_frame.eth_dest_mac
assert eth_frame.eth_src_mac == test_frame.eth_src_mac
assert eth_frame.eth_type == test_frame.eth_type
assert eth_frame.payload.data.index(test_frame.payload.data) == 0
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry points for YAPF.
The main APIs that YAPF exposes to drive the reformatting.
FormatFile(): reformat a file.
FormatCode(): reformat a string of code.
These APIs have some common arguments:
style_config: (string) Either a style name or a path to a file that contains
formatting style settings. If None is specified, use the default style
as set in style.DEFAULT_STYLE_FACTORY
lines: (list of tuples of integers) A list of tuples of lines, [start, end],
that we want to format. The lines are 1-based indexed. It can be used by
third-party code (e.g., IDEs) when reformatting a snippet of code rather
than a whole file.
print_diff: (bool) Instead of returning the reformatted source, return a
diff that turns the formatted source into reformatter source.
"""
import difflib
import io
import logging
import re
from yapf.yapflib import blank_line_calculator
from yapf.yapflib import comment_splicer
from yapf.yapflib import pytree_unwrapper
from yapf.yapflib import pytree_utils
from yapf.yapflib import reformatter
from yapf.yapflib import split_penalty
from yapf.yapflib import style
from yapf.yapflib import subtype_assigner
def FormatFile(filename, style_config=None, lines=None, print_diff=False):
"""Format a single Python file and return the formatted code.
Arguments:
filename: (unicode) The file to reformat.
style_config, lines, print_diff: see comment at the top of this module.
Returns:
The reformatted code or None if the file doesn't exist.
"""
original_source = ReadFile(filename, logging.warning)
if original_source is None:
return None
return FormatCode(original_source,
style_config=style_config,
filename=filename,
lines=lines,
print_diff=print_diff)
def FormatCode(unformatted_source,
filename='<unknown>',
style_config=None,
lines=None,
print_diff=False):
"""Format a string of Python code.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
filename: (unicode) The name of the file being reformatted.
style_config, lines, print_diff: see comment at the top of this module.
Returns:
The code reformatted to conform to the desired formatting style.
"""
style.SetGlobalStyle(style.CreateStyleFromConfig(style_config))
tree = pytree_utils.ParseCodeToTree(unformatted_source.rstrip() + '\n')
# Run passes on the tree, modifying it in place.
comment_splicer.SpliceComments(tree)
subtype_assigner.AssignSubtypes(tree)
split_penalty.ComputeSplitPenalties(tree)
blank_line_calculator.CalculateBlankLines(tree)
uwlines = pytree_unwrapper.UnwrapPyTree(tree)
if not uwlines:
return ''
for uwl in uwlines:
uwl.CalculateFormattingInformation()
if lines is not None:
reformatted_source = _FormatLineSnippets(unformatted_source, uwlines, lines)
else:
lines = _LinesToFormat(uwlines)
if lines:
reformatted_source = _FormatLineSnippets(unformatted_source, uwlines,
lines)
else:
reformatted_source = reformatter.Reformat(uwlines)
if unformatted_source == reformatted_source:
return '' if print_diff else reformatted_source
code_diff = _GetUnifiedDiff(unformatted_source, reformatted_source,
filename=filename)
if print_diff:
return code_diff
return reformatted_source
def ReadFile(filename, logger=None):
"""Read the contents of the file.
An optional logger can be specified to emit messages to your favorite logging
stream. If specified, then no exception is raised.
Arguments:
filename: (unicode) The name of the file.
logger: (function) A function or lambda that takes a string and emits it.
Returns:
The contents of filename.
Raises:
IOError: raised during an error if a logger is not specified.
"""
try:
with io.open(filename, mode='r', newline='') as fd:
source = fd.read()
return source
except IOError as err:
if logger:
logger(err)
else:
raise
DISABLE_PATTERN = r'^#+ +yapf: *disable$'
ENABLE_PATTERN = r'^#+ +yapf: *enable$'
def _LinesToFormat(uwlines):
"""Skip sections of code that we shouldn't reformat."""
start = 1
lines = []
for uwline in uwlines:
if uwline.is_comment:
if re.search(DISABLE_PATTERN, uwline.first.value.strip(), re.IGNORECASE):
lines.append((start, uwline.lineno))
elif re.search(ENABLE_PATTERN, uwline.first.value.strip(), re.IGNORECASE):
start = uwline.lineno
elif re.search(DISABLE_PATTERN, uwline.last.value.strip(), re.IGNORECASE):
# Disable only one line.
if uwline.lineno != start:
lines.append((start, uwline.lineno - 1))
start = uwline.last.lineno + 1
if start != 1 and start <= uwlines[-1].last.lineno + 1:
lines.append((start, uwlines[-1].last.lineno))
return lines
def _FormatLineSnippets(unformatted_source, uwlines, lines):
"""Format a string of Python code.
This provides an alternative entry point to YAPF.
Arguments:
unformatted_source: (unicode) The code to format.
uwlines: (list of UnwrappedLine) The unwrapped lines.
lines: (list of tuples of integers) A list of lines that we want to format.
The lines are 1-indexed.
Returns:
The code reformatted to conform to the desired formatting style.
"""
# First we reformat only those lines that we want to reformat.
index = 0
reformatted_sources = dict()
for start, end in sorted(lines):
snippet = []
while index < len(uwlines):
if start <= uwlines[index].lineno or start < uwlines[index].last.lineno:
while index < len(uwlines):
if end < uwlines[index].lineno:
break
snippet.append(uwlines[index])
index += 1
break
index += 1
# Make sure to re-add preceding blank lines to the code snippet.
blank_lines = ''
if snippet:
blank_lines = '\n' * (snippet[0].lineno - start)
if snippet[0].is_comment:
if snippet[0].first.value.count('\n') == len(blank_lines):
blank_lines = ''
reformatted_sources[(start, end)] = (
blank_lines + reformatter.Reformat(snippet).rstrip()
)
# Next we reconstruct the finalized lines inserting the reformatted lines at
# the appropriate places.
prev_end = 0
finalized_lines = []
unformatted_lines = unformatted_source.splitlines()
for key in sorted(reformatted_sources):
start, end = key
finalized_lines.extend(unformatted_lines[prev_end:start - 1])
finalized_lines.append(reformatted_sources[key])
prev_end = end
# If there are any remaining lines, place them at the end.
if prev_end < len(unformatted_lines):
finalized_lines.extend(unformatted_lines[prev_end:])
# Construct the reformatted sources.
return '\n'.join(finalized_lines).rstrip() + '\n'
def _GetUnifiedDiff(before, after, filename='code'):
"""Get a unified diff of the changes.
Arguments:
before: (unicode) The original source code.
after: (unicode) The reformatted source code.
filename: (unicode) The code's filename.
Returns:
The unified diff text.
"""
before = before.splitlines()
after = after.splitlines()
return '\n'.join(difflib.unified_diff(before, after, filename, filename,
'(original)', '(reformatted)',
lineterm='')) + '\n'
| |
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
import urllib2
import utils
from cloudify_rest_client import CloudifyClient
from cloudify.exceptions import HttpException, NonRecoverableError
class NodeInstance(object):
"""
Represents a deployment node instance.
An instance of this class contains runtime information retrieved
from Cloudify's runtime storage as well as the node's state.
"""
def __init__(self,
node_instance_id,
node_id,
runtime_properties=None,
state=None,
version=None,
host_id=None,
relationships=None):
self.id = node_instance_id
self._node_id = node_id
self._runtime_properties = \
DirtyTrackingDict((runtime_properties or {}).copy())
self._state = state
self._version = version
self._host_id = host_id
self._relationships = relationships
def get(self, key):
return self._runtime_properties.get(key)
def put(self, key, value):
self._runtime_properties[key] = value
def delete(self, key):
del(self._runtime_properties[key])
__setitem__ = put
__getitem__ = get
__delitem__ = delete
def __contains__(self, key):
return key in self._runtime_properties
@property
def runtime_properties(self):
"""
The node instance runtime properties.
To update the properties, make changes on the returned dict and call
``update_node_instance`` with the modified instance.
"""
return self._runtime_properties
@property
def version(self):
return self._version
@property
def state(self):
"""
The node instance state.
To update the node instance state, change this property value and
call ``update_node_instance`` with the modified instance.
"""
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def dirty(self):
return self._runtime_properties.dirty
@property
def host_id(self):
return self._host_id
@property
def node_id(self):
return self._node_id
@property
def relationships(self):
return self._relationships
def get_rest_client():
"""
:returns: A REST client configured to connect to the manager in context
:rtype: cloudify_rest_client.CloudifyClient
"""
return CloudifyClient(utils.get_manager_ip(),
utils.get_manager_rest_service_port())
def _save_resource(logger, resource, resource_path, target_path):
if not target_path:
target_path = os.path.join(utils.create_temp_folder(),
os.path.basename(resource_path))
with open(target_path, 'w') as f:
f.write(resource)
logger.info("Downloaded %s to %s" % (resource_path, target_path))
return target_path
def download_resource(resource_path, logger, target_path=None):
"""
Download resource from the manager file server.
:param resource_path: path to resource on the file server
:param logger: logger to use for info output
:param target_path: optional target path for the resource
:returns: path to the downloaded resource
"""
resource = get_resource(resource_path)
return _save_resource(logger, resource, resource_path, target_path)
def download_blueprint_resource(blueprint_id,
resource_path,
logger,
target_path=None):
"""
Download resource from the manager file server with path relative to
the blueprint denoted by ``blueprint_id``.
:param blueprint_id: the blueprint id of the blueprint to download the
resource from
:param resource_path: path to resource relative to blueprint folder
:param logger: logger to use for info output
:param target_path: optional target path for the resource
:returns: path to the downloaded resource
"""
resource = get_blueprint_resource(blueprint_id, resource_path)
return _save_resource(logger, resource, resource_path, target_path)
def get_resource(resource_path, base_url=None):
"""
Get resource from the manager file server.
:param resource_path: path to resource on the file server
:returns: resource content
"""
if base_url is None:
base_url = utils.get_manager_file_server_url()
try:
url = '{0}/{1}'.format(base_url, resource_path)
response = urllib2.urlopen(url)
return response.read()
except urllib2.HTTPError as e:
raise HttpException(e.url, e.code, e.msg)
def get_blueprint_resource(blueprint_id, resource_path):
"""
Get resource from the manager file server with patch relative to
the blueprint denoted by ``blueprint_id``.
:param blueprint_id: the blueprint id of the blueprint to download
the resource from
:param resource_path: path to resource relative to blueprint folder
:returns: resource content
"""
base_url = "{0}/{1}".format(utils
.get_manager_file_server_blueprints_root_url(),
blueprint_id)
return get_resource(resource_path, base_url=base_url)
def get_node_instance(node_instance_id):
"""
Read node instance data from the storage.
:param node_instance_id: the node instance id
:rtype: NodeInstance
"""
client = get_rest_client()
instance = client.node_instances.get(node_instance_id)
return NodeInstance(node_instance_id,
instance.node_id,
runtime_properties=instance.runtime_properties,
state=instance.state,
version=instance.version,
host_id=instance.host_id,
relationships=instance.relationships)
def update_node_instance(node_instance):
"""
Update node instance data changes in the storage.
:param node_instance: the node instance with the updated data
"""
client = get_rest_client()
client.node_instances.update(
node_instance.id,
state=node_instance.state,
runtime_properties=node_instance.runtime_properties,
version=node_instance.version)
def get_node_instance_ip(node_instance_id):
"""
Get the IP address of the host the node instance denoted by
``node_instance_id`` is contained in.
"""
client = get_rest_client()
instance = client.node_instances.get(node_instance_id)
if instance.host_id is None:
raise NonRecoverableError('node instance: {0} is missing host_id'
'property'.format(instance.id))
if node_instance_id != instance.host_id:
instance = client.node_instances.get(instance.host_id)
if instance.runtime_properties.get('ip'):
return instance.runtime_properties['ip']
node = client.nodes.get(instance.deployment_id, instance.node_id)
if node.properties.get('ip'):
return node.properties['ip']
raise NonRecoverableError('could not find ip for node instance: {0} with '
'host id: {1}'.format(node_instance_id,
instance.id))
# TODO: some nasty code duplication between these two methods
def update_execution_status(execution_id, status, error=None):
"""
Update the execution status of the execution denoted by ``execution_id``.
:returns: The updated status
"""
client = get_rest_client()
return client.executions.update(execution_id, status, error)
def get_bootstrap_context():
"""Read the manager bootstrap context."""
client = get_rest_client()
context = client.manager.get_context()['context']
return context.get('cloudify', {})
def get_provider_context():
"""Read the manager provider context."""
client = get_rest_client()
context = client.manager.get_context()
return context['context']
class DirtyTrackingDict(dict):
def __init__(self, *args, **kwargs):
super(DirtyTrackingDict, self).__init__(*args, **kwargs)
self.modifiable = True
self.dirty = False
def __setitem__(self, key, value):
r = super(DirtyTrackingDict, self).__setitem__(key, value)
self._set_changed()
return r
def __delitem__(self, key):
r = super(DirtyTrackingDict, self).__delitem__(key)
self._set_changed()
return r
def update(self, E=None, **F):
r = super(DirtyTrackingDict, self).update(E, **F)
self._set_changed()
return r
def clear(self):
r = super(DirtyTrackingDict, self).clear()
self._set_changed()
return r
def pop(self, k, d=None):
r = super(DirtyTrackingDict, self).pop(k, d)
self._set_changed()
return r
def popitem(self):
r = super(DirtyTrackingDict, self).popitem()
self._set_changed()
return r
def _set_changed(self):
# python 2.6 doesn't have modifiable during copy.deepcopy
if hasattr(self, 'modifiable') and not self.modifiable:
raise NonRecoverableError('Cannot modify runtime properties of'
' relationship node instances')
self.dirty = True
| |
from __future__ import unicode_literals
import json
import logging
import requests
import six
from six.moves.urllib.parse import quote
from globus_sdk import config, exc
from globus_sdk.version import __version__
from globus_sdk.response import GlobusHTTPResponse
class ClientLogAdapter(logging.LoggerAdapter):
"""
Stuff in the memory location of the client to make log records unambiguous.
"""
def process(self, msg, kwargs):
return '[instance:{}] {}'.format(id(self.extra['client']), msg), kwargs
def warn(self, *args, **kwargs):
return self.warning(*args, **kwargs)
class BaseClient(object):
r"""
Simple client with error handling for Globus REST APIs. Implemented
as a wrapper around a ``requests.Session`` object, with a simplified
interface that does not directly expose anything from requests.
You should *never* try to directly instantiate a ``BaseClient``.
**Parameters**
``authorizer`` (:class:`GlobusAuthorizer\
<globus_sdk.authorizers.base.GlobusAuthorizer>`)
A ``GlobusAuthorizer`` which will generate Authorization headers
``app_name`` (*string*)
Optional "nice name" for the application. Has no bearing on the
semantics of client actions. It is just passed as part of the
User-Agent string, and may be useful when debugging issues with the
Globus Team
All other parameters are for internal use and should be ignored.
"""
# Can be overridden by subclasses, but must be a subclass of GlobusError
error_class = exc.GlobusAPIError
default_response_class = GlobusHTTPResponse
# a collection of authorizer types, or None to indicate "any"
allowed_authorizer_types = None
BASE_USER_AGENT = 'globus-sdk-py-{0}'.format(__version__)
def __init__(self, service, environment=None,
base_path=None, authorizer=None, app_name=None):
# get the fully qualified name of the client class, so that it's a
# child of globus_sdk
self.logger = ClientLogAdapter(
logging.getLogger(self.__module__ + '.' + self.__class__.__name__),
{'client': self})
self.logger.info('Creating client of type {} for service "{}"'
.format(type(self), service))
# if restrictions have been placed by a child class on the allowed
# authorizer types, make sure we are not in violation of those
# constraints
if self.allowed_authorizer_types is not None and (
authorizer is not None and
type(authorizer) not in self.allowed_authorizer_types):
self.logger.error("{} doesn't support authorizer={}"
.format(type(self), type(authorizer)))
raise ValueError(
("{0} can only take authorizers from {1}, "
"but you have provided {2}").format(
type(self), self.allowed_authorizer_types,
type(authorizer)))
# defer this default until instantiation time so that logging can
# capture the execution of the config load
if environment is None:
environment = config.get_default_environ()
self.environment = environment
self.authorizer = authorizer
self.base_url = config.get_service_url(environment, service)
if base_path is not None:
self.base_url = slash_join(self.base_url, base_path)
# setup the basics for wrapping a Requests Session
# including basics for internal header dict
self._session = requests.Session()
self._headers = {
'Accept': 'application/json',
'User-Agent': self.BASE_USER_AGENT
}
# verify SSL? Usually true
self._verify = config.get_ssl_verify(environment)
# set application name if given
self.app_name = None
if app_name is not None:
self.set_app_name(app_name)
def set_app_name(self, app_name):
"""
Set an application name to send to Globus services as part of the User
Agent.
Application developers are encouraged to set an app name as a courtesy
to the Globus Team, and to potentially speed resolution of issues when
interacting with Globus Support.
"""
self.app_name = app_name
self._headers['User-Agent'] = '{0}/{1}'.format(self.BASE_USER_AGENT,
app_name)
def qjoin_path(self, *parts):
return "/" + "/".join(quote(part) for part in parts)
def get(self, path, params=None, headers=None,
response_class=None, retry_401=True):
"""
Make a GET request to the specified path.
**Parameters**
``path`` (*string*)
Path for the request, with or without leading slash
``params`` (*dict*)
Parameters to be encoded as a query string
``headers`` (*dict*)
HTTP headers to add to the request
``response_class`` (*class*)
Class for response object, overrides the client's
``default_response_class``
``retry_401`` (*bool*)
Retry on 401 responses with fresh Authorization if
``self.authorizer`` supports it
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
self.logger.debug('GET to {} with params {}'.format(path, params))
return self._request("GET", path, params=params, headers=headers,
response_class=response_class,
retry_401=retry_401)
def post(self, path, json_body=None, params=None, headers=None,
text_body=None, response_class=None, retry_401=True):
"""
Make a POST request to the specified path.
**Parameters**
``path`` (*string*)
Path for the request, with or without leading slash
``params`` (*dict*)
Parameters to be encoded as a query string
``headers`` (*dict*)
HTTP headers to add to the request
``json_body`` (*dict*)
Data which will be JSON encoded as the body of the request
``text_body`` (*string or dict*)
Either a raw string that will serve as the request body, or a
dict which will be HTTP Form encoded
``response_class`` (*class*)
Class for response object, overrides the client's
``default_response_class``
``retry_401`` (*bool*)
Retry on 401 responses with fresh Authorization if
``self.authorizer`` supports it
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
self.logger.debug('POST to {} with params {}'.format(path, params))
return self._request("POST", path, json_body=json_body, params=params,
headers=headers, text_body=text_body,
response_class=response_class,
retry_401=retry_401)
def delete(self, path, params=None, headers=None,
response_class=None, retry_401=True):
"""
Make a DELETE request to the specified path.
**Parameters**
``path`` (*string*)
Path for the request, with or without leading slash
``params`` (*dict*)
Parameters to be encoded as a query string
``headers`` (*dict*)
HTTP headers to add to the request
``response_class`` (*class*)
Class for response object, overrides the client's
``default_response_class``
``retry_401`` (*bool*)
Retry on 401 responses with fresh Authorization if
``self.authorizer`` supports it
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
self.logger.debug('DELETE to {} with params {}'.format(path, params))
return self._request("DELETE", path, params=params,
headers=headers,
response_class=response_class,
retry_401=retry_401)
def put(self, path, json_body=None, params=None, headers=None,
text_body=None, response_class=None, retry_401=True):
"""
Make a PUT request to the specified path.
**Parameters**
``path`` (*string*)
Path for the request, with or without leading slash
``params`` (*dict*)
Parameters to be encoded as a query string
``headers`` (*dict*)
HTTP headers to add to the request
``json_body`` (*dict*)
Data which will be JSON encoded as the body of the request
``text_body`` (*string or dict*)
Either a raw string that will serve as the request body, or a
dict which will be HTTP Form encoded
``response_class`` (*class*)
Class for response object, overrides the client's
``default_response_class``
``retry_401`` (*bool*)
Retry on 401 responses with fresh Authorization if
``self.authorizer`` supports it
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
self.logger.debug('PUT to {} with params {}'.format(path, params))
return self._request("PUT", path, json_body=json_body, params=params,
headers=headers, text_body=text_body,
response_class=response_class,
retry_401=retry_401)
def _request(self, method, path, params=None, headers=None,
json_body=None, text_body=None,
response_class=None, retry_401=True):
"""
**Parameters**
``method`` (*string*)
HTTP request method, as an all caps string
``path`` (*string*)
Path for the request, with or without leading slash
``params`` (*dict*)
Parameters to be encoded as a query string
``headers`` (*dict*)
HTTP headers to add to the request
``json_body`` (*dict*)
Data which will be JSON encoded as the body of the request
``text_body`` (*string or dict*)
Either a raw string that will serve as the request body, or a
dict which will be HTTP Form encoded
``response_class`` (*class*)
Class for response object, overrides the client's
``default_response_class``
``retry_401`` (*bool*)
Retry on 401 responses with fresh Authorization if
``self.authorizer`` supports it
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
if json_body is not None:
assert text_body is None
text_body = json.dumps(json_body)
# copy
rheaders = dict(self._headers)
# expand
if headers is not None:
rheaders.update(headers)
# add Authorization header, or (if it's a NullAuthorizer) possibly
# explicitly remove the Authorization header
if self.authorizer is not None:
self.logger.debug('request will have authorization of type {}'
.format(type(self.authorizer)))
self.authorizer.set_authorization_header(rheaders)
url = slash_join(self.base_url, path)
self.logger.debug('request will hit URL:{}'.format(url))
# because a 401 can trigger retry, we need to wrap the retry-able thing
# in a method
def send_request():
try:
return self._session.request(
method=method, url=url, headers=rheaders, params=params,
data=text_body, verify=self._verify)
except requests.RequestException as e:
self.logger.error("NetworkError on request")
raise exc.convert_request_exception(e)
# initial request
r = send_request()
self.logger.debug('Request made to URL: {}'.format(r.url))
# potential 401 retry handling
if r.status_code == 401 and retry_401 and self.authorizer is not None:
self.logger.debug('request got 401, checking retry-capability')
# note that although handle_missing_authorization returns a T/F
# value, it may actually mutate the state of the authorizer and
# therefore change the value set by the `set_authorization_header`
# method
if self.authorizer.handle_missing_authorization():
self.logger.debug('request can be retried')
self.authorizer.set_authorization_header(rheaders)
r = send_request()
if 200 <= r.status_code < 400:
self.logger.debug('request completed with response code: {}'
.format(r.status_code))
if response_class is None:
return self.default_response_class(r)
else:
return response_class(r)
self.logger.debug('request completed with (error) response code: {}'
.format(r.status_code))
raise self.error_class(r)
def slash_join(a, b):
"""
Join a and b with a single slash, regardless of whether they already
contain a trailing/leading slash or neither.
"""
if a.endswith("/"):
if b.startswith("/"):
return a[:-1] + b
return a + b
if b.startswith("/"):
return a + b
return a + "/" + b
def merge_params(base_params, **more_params):
"""
Merge additional keyword arguments into a base dictionary of keyword
arguments. Only inserts additional kwargs which are not None.
This way, we can accept a bunch of named kwargs, a collector of additional
kwargs, and then put them together sensibly as arguments to another
function (typically BaseClient.get() or a variant thereof).
For example:
>>> def ep_search(self, filter_scope=None, filter_fulltext=None, **params):
>>> # Yes, this is a side-effecting function, it doesn't return a new
>>> # dict because it's way simpler to update in place
>>> merge_params(
>>> params, filter_scope=filter_scope,
>>> filter_fulltext=filter_fulltext)
>>> return self.get('endpoint_search', params=params)
this is a whole lot cleaner than the alternative form:
>>> def ep_search(self, filter_scope=None, filter_fulltext=None, **params):
>>> if filter_scope is not None:
>>> params['filter_scope'] = filter_scope
>>> if filter_fulltext is not None:
>>> params['filter_scope'] = filter_scope
>>> return self.get('endpoint_search', params=params)
the second form exposes a couple of dangers that are obviated in the first
regarding correctness, like the possibility of doing
>>> if filter_scope:
>>> params['filter_scope'] = filter_scope
which is wrong (!) because filter_scope='' is a theoretically valid,
real argument we want to pass.
The first form will also prove shorter and easier to write for the most
part.
"""
for param in more_params:
if more_params[param] is not None:
base_params[param] = more_params[param]
def safe_stringify(value):
"""
Converts incoming value to a unicode string. Convert bytes by decoding,
anything else has __str__ called, then is converted to bytes and then to
unicode to deal with python 2 and 3 differing in definitions of string
"""
if isinstance(value, six.text_type):
return value
if isinstance(value, bytes):
return value.decode('utf-8')
else:
return six.b(str(value)).decode('utf-8')
| |
import re
import math
import glob
import pickle
import sys
import numpy as np
from peptide import Peptide
from xlink import XLink
from match import Match
from mzxmlreader import MZXMLReader
from fastareader import FastaReader
from random import shuffle
def logit(coef, x):
v = x
x = [1]
x.extend(v)
val = 0
for i in range(len(coef)):
val += coef[i] * x[i]
return float(1) / (1 + math.exp(-val))
def adjust_prior_marginal(prior_tr, model_output, marginal, iterations, rel_change_perc):
posterior_tr = []
for per_spec in model_output:
posterior_tr.extend(per_spec)
n = len(posterior_tr)
prior = prior_tr
posterior = [0.0] * n
for i in range(iterations):
for k in range(n):
denominator = (float(prior) / prior_tr) * posterior_tr[k] + (float(1 - prior) / (1 - prior_tr)) * (1 - posterior_tr[k])
if prior_tr * denominator == 0:
print 'denominator equals 0!'
print 'execution will terminate!'
sys.exit(1)
posterior[k] = float(prior * posterior_tr[k]) / (prior_tr * denominator)
prior_prev = prior
prior = 0.0
for j in range(n):
prior = prior + posterior[j] * marginal[j]
prior = prior / sum(marginal)
# print 'iteration = %d, %.10f' % (i, prior)
if i > 0 and abs(float(prior_prev - prior) / prior_prev) <= rel_change_perc:
# print '%f' % abs(float(prior_prev - prior) / prior_prev)
break
new_model_output = []
begin = 0
for i in range(len(model_output)):
end = begin + len(model_output[i])
new_model_output.append(posterior[begin : end])
begin = end
return new_model_output
def get_marginal(p21, p11, p12, p22):
alpha_T = []
beta_T = []
alpha_F = []
beta_F = []
for i in range(len(p21)):
for j in range(len(p21[i])):
denominator = 1.0 - (p11[i][j] - p12[i][j]) * (p21[i][j] - p22[i][j])
if denominator == 0:
print 'denominator equals 0!'
print 'execution will terminate!'
sys.exit(1)
at = (p12[i][j] + p22[i][j] * (p11[i][j] - p12[i][j])) / denominator
bt = (p22[i][j] + p12[i][j] * (p21[i][j] - p22[i][j])) / denominator
af = 1.0 - at
bf = 1.0 - bt
alpha_T.append(at)
beta_T.append(bt)
alpha_F.append(af)
beta_F.append(bf)
return [alpha_T, beta_T, alpha_F, beta_F]
def get_matches_per_spec(mass, param, index, title):
spec_dict = index.spec_dict
unique_pep = index.unique_pep[0]
# search_index = index.search_index
x_residue = param['x_residue']
index_list = index.get_candidates(title)
spec = spec_dict[title]
matches = []
for i in range(len(index_list)):
index1 = index_list[i][0]
index2 = index_list[i][1]
pep1 = unique_pep[index1]
pep2 = unique_pep[index2]
pep_sorted = sorted([pep1, pep2], key = lambda x : x.seq)
pep1 = pep_sorted[0]
pep2 = pep_sorted[1]
ch = spec_dict[title].ch
mz = spec_dict[title].mz
it = spec_dict[title].it
k_pos1 = []
k_pos2 = []
if param['ntermxlink'] == True:
if pep1.is_nterm == True:
k_pos1.append(0)
if pep2.is_nterm == True:
k_pos2.append(0)
pep_seq1 = pep1.seq
k_pos1.extend(list(zip(*filter(lambda x : x[1] == x_residue, enumerate(pep_seq1[:-1])))[0]))
pep_seq2 = pep2.seq
k_pos2.extend(list(zip(*filter(lambda x : x[1] == x_residue, enumerate(pep_seq2[:-1])))[0]))
for p1 in k_pos1:
for p2 in k_pos2:
pos = [p1, p2]
xl = XLink(pep1, pep2, pos, ch, mass, param)
match = Match(spec, xl, mass, param)
match.match(mass)
matches.append(match.get_match_info(index))
return matches
def get_pep_from_pro(header, pro_seq, pattern_string, mass, param):
missed_sites = param['missed_sites']
min_len = param['min_length']
max_len = param['max_length']
mod_res = param['mod_res']
pattern = re.compile(pattern_string)
sites = [0]
for i in range(len(pro_seq)):
if i == len(pro_seq) - 1:
sites.append(i + 1)
elif (pro_seq[i] == 'K' or pro_seq[i] == 'R') and pro_seq[i + 1] != 'P':
sites.append(i + 1)
pep_seqs = []
for i in range(len(sites)):
if i < len(sites) - missed_sites - 1:
for j in range(missed_sites + 1):
pep_seq = pro_seq[sites[i] : sites[i + j + 1]]
if len(pep_seq) >= min_len and len(pep_seq) <= max_len and pattern.match(pep_seq):
pep_seqs.append(pep_seq)
else:
for j in range(i + 1, len(sites)):
pep_seq = pro_seq[sites[i] : sites[j]]
if len(pep_seq) >= min_len and len(pep_seq) <= max_len and pattern.match(pep_seq):
pep_seqs.append(pep_seq)
pep_seqs = list(set(pep_seqs))
peps = []
for pep_seq in pep_seqs:
modif = dict(position=[], delta_mass=[])
is_nterm = True if pep_seq == pro_seq[:len(pep_seq)] else False
peps.append(Peptide(pep_seq, header, modif, mass, is_nterm))
if len(mod_res) != 0:
mod_mass = param['mod_mass']
index = [i for i, aa in enumerate(pep_seq) if aa == mod_res]
if len(index) != 0:
max_var_mod = min(param['max_var_mod'], len(index))
for i in range(1, max_var_mod + 1):
mod_cfg = subset(index, i)
for j in range(len(mod_cfg)):
pep_seq_mod = list(pep_seq)
modif = dict(position=[], delta_mass=[])
for k in range(len(mod_cfg[j])):
pep_seq_mod[mod_cfg[j][k]] = pep_seq_mod[mod_cfg[j][k]].lower()
modif['position'].append(mod_cfg[j][k])
modif['delta_mass'].append(mod_mass)
pep_seq_mod = ''.join(pep_seq_mod)
peps.append(Peptide(pep_seq_mod, header, modif, mass, is_nterm))
return peps
def read_param(filename):
param = dict(
use_a_ion=True,
verbose=False,
ch_pre_xlink_ions=[1, 3],
ch_post_xlink_ions=[2, 5],
base_peak_int = 100.0,
dynamic_range = 0.001,
missed_sites = 2,
min_length = 4,
max_length = 51,
mod_res = '',
mod_mass = 0.0,
linker_mass = 136.10005,
ms1_tol = dict(measure='ppm', val=5),
ms2_tol = dict(measure='da', val=0.01),
min_mz = 200,
max_mz = 2000,
mode = 'conservative',
x_residue = 'K',
aa = 'ACDEFGHIKLMNPQRSTVWY',
rel_change_perc = 0.01,
neutral_loss=dict(
h2o_loss=dict(
mass=-18.010565,
aa=set('ACDEFGHIKLMNPQRSTVWY')),
nh3_loss=dict(
mass=-17.026549,
aa=set('ACDEFGHIKLMNPQRSTVWY')),
h2o_gain=dict(
mass=18.010565,
aa=set('ACDEFGHIKLMNPQRSTVWY'))),
model_TT_TF = [0.0] * 17,
model_TF_FF = [0.0] * 17,
nTT = 169,
nTF = 8568,
nFF = 91242)
mass = dict(
A=71.037114,
R=156.101111,
N=114.042927,
D=115.026943,
C=103.009184,
E=129.042593,
Q=128.058578,
G=57.021464,
H=137.058912,
I=113.084064,
L=113.084064,
K=128.094963,
M=131.040485,
F=147.068414,
P=97.052764,
S=87.032028,
T=101.047678,
W=186.079313,
Y=163.063329,
V=99.068414,
Hatom=1.007825032,
Oatom=15.99491462,
neutron_mass = 1.008701,
b_ion_res=1.0078246,
a_ion_res=-26.9870904,
y_ion_res=19.0183888,
isotope_inc = [1.008701/4, 1.008701/3, 1.008701/2, 1.008701/1])
f = open(filename)
lines = f.readlines()
for l in lines:
l = l[:-1]
cols= l.split('\t')
if len(l) == 0 or l[0] == '#' or len(cols) < 2:
continue
name = cols[0]
val = cols[1]
if name == 'database':
param['database'] = val
elif name == 'MS_data_directory':
param['ms_data'] = val
if val[-1] != '/':
param['ms_data'] += '/'
elif name == 'XLresidue':
param['x_residue'] = val
elif name == 'ms1tol_unit':
param['ms1_tol']['measure'] = val
elif name == 'ms1tol_val':
param['ms1_tol']['val'] = float(val) ########## used to be int(val)
elif name == 'ms2tol_unit':
param['ms2_tol']['measure'] = val
elif name == 'ms2tol_val':
param['ms2_tol']['val'] = float(val)
elif name == 'linker_mass':
param['linker_mass'] = float(val)
elif name == 'miss_cleave':
param['missed_sites'] = int(val)
elif name == 'include_a_ions':
param['use_a_ion'] = True if val.lower() == 'true' else False
elif name == 'min_peplen':
param['min_length'] = int(val)
elif name == 'max_peplen':
param['max_length'] = int(val)
elif name == 'fix_mod_res':
param['fix_mod_res'] = val
elif name == 'fix_mod_mass':
param['fix_mod_mass'] = float(val)
elif name == 'var_mod_res':
param['mod_res'] = val
elif name == 'var_mod_mass':
param['mod_mass'] = float(val)
elif name == 'min_preXL_ions_ch':
param['ch_pre_xlink_ions'][0] = int(val)
elif name == 'max_preXL_ions_ch':
param['ch_pre_xlink_ions'][1] = int(val)
elif name == 'min_postXL_ions_ch':
param['ch_post_xlink_ions'][0] = int(val)
elif name == 'max_postXL_ions_ch':
param['ch_post_xlink_ions'][1] = int(val)
elif name == 'target_database':
param['target_database'] = val
elif name =='uniprot_database':
param['uniprot_database'] = val
elif name == 'max_iterations':
param['max_iterations'] = int(val)
elif name == 'annotate_spec':
param['annotation'] = True if val.lower() == 'true' else False
elif name == 'deisotope':
param['deisotope'] = True if val.lower() == 'true' else False
elif name == 'ndeisotope':
param['ndeisotope'] = int(val)
elif name == 'ntermxlink':
param['ntermxlink'] = True if val.lower() == 'true' else False
elif name == 'decoy_string':
param['decoy_string'] = val
elif name == 'cutoff':
param['cutoff'] = float(val)
elif name == 'is_unique':
param['is_unique'] = val
elif name == 'true_true_psm_file':
param['true_true_psm_file'] = val
elif name == 'max_var_mod':
param['max_var_mod'] = int(val)
elif len(name) >= 4 and name[:2] == 'CI':
if len(name) == 4:
s = int(name[2:])
param['model_TT_TF'][s] = float(val)
elif len(name) == 5:
s = int(name[3:])
param['model_TF_FF'][s] = float(val)
elif name == 'nTT':
param['nTT'] = int(val)
elif name == 'nTF':
param['nTF'] = int(val)
elif name == 'nFF':
param['nFF'] = int(val)
param['pattern_string'] = '^[' + param['aa'] + ']*' + param['x_residue'] + '[' + param['aa'] + ']+$'
param['prior_tr_TT_TF'] = float(param['nTT']) / (param['nTT'] + param['nTF'])
param['prior_tr_TF_FF'] = float(param['nTF']) / (param['nTF'] + param['nFF'])
f.close()
if len(param['fix_mod_res']) > 0:
mass[param['fix_mod_res']] += param['fix_mod_mass']
return [param, mass]
def read_spec(directory, param, mass):
files = glob.glob(directory + '*.mzXML')
spec_dict = dict()
total = []
for filename in files:
reader = MZXMLReader(filename)
spec = reader.get_spec_list(mass, param)
total.append(spec)
ss = []
for i in range(len(total)):
ss.append(set())
tmp = []
for j in range(len(total[i])):
if total[i][j].ret_time >= 0 and total[i][j].ret_time <= 110*60 and total[i][j].ch >= 2 and total[i][j].ch <= 7:
tmp.append(total[i][j])
tmp = sorted(tmp, key = lambda s : s.mol_weight)
tolerance = 0.01
lower_ratio = 0.3
upper_ratio = 1 / float(lower_ratio)
for j in range(len(tmp) - 1):
MZ = []
IT = []
mz = tmp[j].mz
it = tmp[j].it
last_index = 0
ik = 0
jk = 0
for ik in range(len(mz)):
if last_index == 0:
jk = 0
else:
jk = last_index
while not (last_index > len(mz) - 1 or jk > len(mz) - 1 or ik > len(mz) - 1 or mz[jk] > mz[ik] + tolerance):
if mz[jk] <= mz[ik] - tolerance:
last_index = jk
ratio = float(it[ik]) / float(it[jk])
if abs(mz[ik] - mz[jk]) <= tolerance and ratio >= lower_ratio and ratio <= upper_ratio:
MZ.append(mz[ik])
IT.append(it[ik])
jk = jk + 1
if len(MZ) >= 25:
spec_dict[tmp[j].title] = tmp[j]
return spec_dict
def get_tophits(index, result):
model_TT_TF = index.param['model_TT_TF']
model_TF_FF = index.param['model_TF_FF']
prior_tr_TT_TF = index.param['prior_tr_TT_TF']
prior_tr_TF_FF = index.param['prior_tr_TF_FF']
max_iterations = index.param['max_iterations']
rel_change_perc = index.param['rel_change_perc']
p21 = []
p11 = []
p12 = []
p22 = []
for i in range(len(result)):
print i
p21.append([])
p11.append([])
p12.append([])
p22.append([])
for j in range(len(result[i][1])):
feature = result[i][1][j][2]
x = list(feature[0])
x.extend(feature[1])
x_flip = list(feature[1])
x_flip.extend(feature[0])
b = model_TT_TF
p21[-1].append(logit(b, x))
p11[-1].append(logit(b, x_flip))
b = model_TF_FF
p12[-1].append(logit(b, x))
p22[-1].append(logit(b, x_flip))
[alpha_T, beta_T, alpha_F, beta_F] = get_marginal(p21, p11, p12, p22)
p21 = adjust_prior_marginal(prior_tr_TT_TF, p21, alpha_T, max_iterations, rel_change_perc)
p11 = adjust_prior_marginal(prior_tr_TT_TF, p11, beta_T, max_iterations, rel_change_perc)
p12 = adjust_prior_marginal(prior_tr_TF_FF, p12, beta_F, max_iterations, rel_change_perc)
p22 = adjust_prior_marginal(prior_tr_TF_FF, p22, alpha_F, max_iterations, rel_change_perc)
for i in range(len(result)):
print i
# result[i] = list(result[i])
for j in range(len(result[i][1])):
pep1 = index.unique_pep[0][result[i][1][j][0][0]]
pep2 = index.unique_pep[0][result[i][1][j][0][1]]
ap21 = p21[i][j]
ap11 = p11[i][j]
ap12 = p12[i][j]
ap22 = p22[i][j]
denominator = 1 - (ap11 - ap12) * (ap21 - ap22)
if denominator == 0:
print 'denominator equals 0!'
print 'execution will terminate!'
sys.exit(1)
marginal_alaph_T = (ap12 + ap22 * (ap11 - ap12)) / denominator
marginal_beta_T = (ap22 + ap12 * (ap21 - ap22)) / denominator
prob1 = ap11 * marginal_beta_T
prob2 = ap21 * marginal_alaph_T
score = (prob1 + prob2) / float(2)
info = {'alpha' : marginal_alaph_T, 'beta' : marginal_beta_T, 'prob' : [prob1, prob2], 'score' : score}
# result[i][1][j] = list(result[i][1][j])
result[i][1][j].append(info)
for r in result:
r[1] = sorted(r[1], key = lambda x : x[3]['score'], reverse = True)
result = sorted(result, key = lambda x : x[1][0][3]['score'], reverse = True)
tophits = []
for r in result:
scan = r[0]
pep = [index.unique_pep[0][r[1][0][0][0]].seq, index.unique_pep[0][r[1][0][0][1]].seq]
pos = [int(r[1][0][1][0]), int(r[1][0][1][1])]
pro = [index.unique_pep[0][r[1][0][0][0]].pro_id, index.unique_pep[0][r[1][0][0][1]].pro_id]
ch = int(scan.split('.')[-1])
score = r[1][0][3]['score']
alpha = r[1][0][3]['alpha']
beta = r[1][0][3]['beta']
tophits.append([pep, pos, pro, ch, score, alpha, beta, scan])
return tophits
def write_results(output_file, tophits):
f = open(output_file, 'w')
f.write('Rank\tPep_alpha\tPep_beta\tSite_alpha\tSite_beta\tPro_alpha\tPro_beta\tCharge\tpr(alpha=T,beta=T)\tpr(alpha=T)\tpr(beta=T)\tSpectrum\n')
for i in range(len(tophits)):
f.write('%d\t' % (i + 1))
f.write('%s\t%s\t' % (tophits[i][0][0], tophits[i][0][1]))
f.write('%d\t%d\t' % (tophits[i][1][0], tophits[i][1][1]))
f.write('%s\t%s\t' % (','.join(tophits[i][2][0]), ','.join(tophits[i][2][1])))
f.write('%d\t' % tophits[i][3])
f.write('%E\t' % tophits[i][4])
f.write('%E\t' % tophits[i][5])
f.write('%E\t' % tophits[i][6])
f.write('%s\n' % tophits[i][7])
f.close()
def subset(index, k):
k = len(index) if k > len(index) else k
if k == 0 or len(index) == 0:
return [[]]
sublist = subset(index[1:], k - 1)
output = []
for i in range(len(sublist)):
combo = [index[0]]
combo.extend(sublist[i])
output.append(combo)
if k <= len(index) - 1:
sublist = subset(index[1:], k)
for i in range(len(sublist)):
output.append(sublist[i])
return output
def get_true_true(result, index, param, mass):
true_true = []
for i in range(len(result)):
title = result[i][0]
spec = index.spec_dict[title]
ch = spec.ch
candidate = []
sum_int = []
for j in range(len(result[i][1])):
pep1 = index.unique_pep[0][result[i][1][j][0][0]]
pep2 = index.unique_pep[0][result[i][1][j][0][1]]
sl = [set(), set()]
pos = result[i][1][j][1]
for pro in pep1.pro_id:
cols = pro.split('|R')
if len(cols) > 1 and len(cols[1]) > 0:
sl[0].add(cols[1][0])
for pro in pep2.pro_id:
cols = pro.split('|R')
if len(cols) > 1 and len(cols[1]) > 0:
sl[1].add(cols[1][0])
feature = list(result[i][1][j][2][0])
feature.extend(result[i][1][j][2][1])
if feature[0] / float(feature[7]) >= 0.20 and feature[1] / float(feature[7]) >= 0.20 and feature[8] / float(feature[15]) >= 0.20 and feature[9] / float(feature[15]) >= 0.20 and feature[2] >= 0.1 and feature[10] >= 0.1 and (len(sl[0]) == 0 or len(sl[1]) == 0 or len(sl[0].intersection(sl[1]))) > 0:
xl = XLink(pep1, pep2, pos, ch, mass, param)
match = Match(spec, xl, mass, param)
match.match(mass)
candidate.append(match)
sum_int.append(feature[2] + feature[10])
if len(candidate) == 0:
continue
combo = zip(candidate, sum_int)
candidate = list(zip(*sorted(combo, key = lambda x : x[1], reverse = True))[0])
sum_int = list(zip(*sorted(combo, key = lambda x : x[1], reverse = True))[1])
true_true.append(candidate[0])
for i in range(len(true_true)):
pep1 = true_true[i].xlink.pep[0]
pep2 = true_true[i].xlink.pep[1]
s = pep1.seq + '\t' + pep2.seq + '\t' + ','.join(pep1.pro_id) + '\t' + ','.join(pep2.pro_id)
print s
if len(true_true) < 150:
print '\nWARNING: The number of True-True PSMs(' + str(len(true_true)) + ') is too small and maybe insufficient for training an reliable model!\n'
return true_true
def read_true_true(filename, mass, param):
f = open(filename)
lines = f.readlines()
modif = dict(position = [], delta_mass = [])
true_true = []
mz = []
it = []
for l in lines:
l = l[:-1]
cols = l.split('\t')
if len(cols) == 5:
pep = [cols[0], cols[1]]
pos = [int(cols[2]) - 1, int(cols[3]) - 1]
ch = int(cols[4])
elif len(cols) == 2:
mz.append(float(cols[0]))
it.append(float(cols[1]))
elif len(cols) == 1 and len(cols[0]) > 0:
prec_mz = float(cols[0])
else:
pep1 = Peptide(pep[0], 'protein_' + pep[0], modif, mass, False)
pep2 = Peptide(pep[1], 'prptein_' + pep[1], modif, mass, False)
xl = XLink(pep1, pep2, pos, ch, mass, param)
spec = Spectrum('precursor_mz' + str(prec_mz), 0, prec_mz, ch, mz, it, 0, mass)
match = Match(spec, xl, mass, param)
true_true.append(match)
mz = []
it = []
f.close()
if len(true_true) < 150:
print '\nWARNING: The number of True-True PSMs(' + str(len(true_true)) + ') is too small and maybe insufficient for training an reliable model!\n'
def get_true_false(true_true, param, mass):
true_true_seq = set()
true_true_mass = []
linker_mass = param['linker_mass']
if 'uniprot_database' not in param:
print 'No uniprot database specified!'
print 'execution will terminate!'
sys.exit(1)
fasta = FastaReader(param['uniprot_database']).read_fasta()
pattern_string = param['pattern_string']
peps = dict()
for match in true_true:
true_true_seq.add(match.xlink.pep[0].seq)
true_true_seq.add(match.xlink.pep[1].seq)
true_true_mass.append(match.xlink.mol_weight - linker_mass)
true_true_mass = max(true_true_mass)
for header, seq in fasta:
if 'MOUSE' in header:
pep = get_pep_from_pro(header, seq, pattern_string, mass, param)
for p in pep:
if p.seq not in peps and p.seq not in true_true_seq and p.prec_mass < true_true_mass:
peps[p.seq] = p
peps = peps.values()
alpha = []
beta = []
for i in range(len(true_true)):
print i
sys.stdout.flush()
match = true_true[i]
ch = match.spec.ch
ms2tol = match.xlink.mol_weight * 5 * (10 ** (-6))
alpha.append([])
beta.append([])
pep = match.xlink.pep
for j in range(len(peps)):
if abs(pep[0].prec_mass + peps[j].prec_mass + linker_mass - match.xlink.mol_weight) <= ms2tol:
pepseq1 = pep[0].seq
pepseq2 = peps[j].seq
k_pos1 = list(zip(*filter(lambda x : x[1] == 'K', enumerate(pepseq1[:-1])))[0])
k_pos2 = list(zip(*filter(lambda x : x[1] == 'K', enumerate(pepseq2[:-1])))[0])
pos = [k_pos1[len(k_pos1) / 2], k_pos2[len(k_pos2) / 2]]
xl = XLink(pep[0], peps[j], pos, ch, mass, param)
tf = Match(match.spec, xl, mass, param)
tf.match(mass)
feature = tf.feature
if (feature[1][0] + feature[1][1]) / float(feature[1][7]) >= 0.2:
alpha[-1].append(tf)
for j in range(len(peps)):
if abs(pep[1].prec_mass + peps[j].prec_mass + linker_mass - match.xlink.mol_weight) <= ms2tol:
pepseq1 = pep[1].seq
pepseq2 = peps[j].seq
k_pos1 = list(zip(*filter(lambda x : x[1] == 'K', enumerate(pepseq1[:-1])))[0])
k_pos2 = list(zip(*filter(lambda x : x[1] == 'K', enumerate(pepseq2[:-1])))[0])
pos = [k_pos1[len(k_pos1) / 2], k_pos2[len(k_pos2) / 2]]
xl = XLink(pep[1], peps[j], pos, ch, mass, param)
tf = Match(match.spec, xl, mass, param)
tf.match(mass)
feature = tf.feature
if (feature[1][0] + feature[1][1]) / float(feature[1][7]) >= 0.2:
beta[-1].append(tf)
true_false = []
for i in range(len(alpha)):
true_false.extend(alpha[i])
for i in range(len(beta)):
true_false.extend(beta[i])
return true_false
def get_false_false(true_true, param, mass):
linker_mass = param['linker_mass']
true_true_seq = set()
true_true_mass = []
for match in true_true:
true_true_seq.add(match.xlink.pep[0].seq)
true_true_seq.add(match.xlink.pep[1].seq)
true_true_mass.append(match.xlink.mol_weight - linker_mass)
min_mass = int(min(true_true_mass) - 0.2)
max_mass = int(max(true_true_mass) + 0.2)
if 'uniprot_database' not in param:
print 'No uniprot database specified!'
print 'execution will terminate!'
sys.exit(1)
fasta = FastaReader(param['uniprot_database']).read_fasta()
pattern_string = param['pattern_string']
peps = dict()
for header, seq in fasta:
if 'YEAST' in header:
pep = get_pep_from_pro(header, seq, pattern_string, mass, param)
for p in pep:
if p.seq not in peps and p.seq not in true_true_seq:
peps[p.seq] = p
peps = peps.values()
int_dict = dict()
for pep in peps:
num = int(pep.prec_mass)
if num > max_mass:
continue
if num not in int_dict:
int_dict[num] = [pep]
else:
int_dict[num].append(pep)
false_false = []
for k in range(len(true_true)):
match = true_true[k]
print k
sys.stdout.flush()
false_false.append([])
prec_mass = match.xlink.mol_weight - linker_mass
ch = match.spec.ch
ms2tol = match.xlink.mol_weight * 3 * (10 ** (-6))
mass_list = range(500, max_mass - 500)
shuffle(mass_list)
mass_list = mass_list[:25]
for m in mass_list:
if m not in int_dict:
continue
shuffle(int_dict[m])
int_dict[m] = int_dict[m][:50]
for i in range(len(int_dict[m])):
num = int(prec_mass - int_dict[m][i].prec_mass)
if num not in int_dict:
continue
shuffle(int_dict[num])
int_dict[num] = int_dict[num][:50]
for j in range(len(int_dict[num])):
pepseq1 = int_dict[m][i].seq
pepseq2 = int_dict[num][j].seq
k_pos1 = list(zip(*filter(lambda x : x[1] == 'K', enumerate(pepseq1[:-1])))[0])
k_pos2 = list(zip(*filter(lambda x : x[1] == 'K', enumerate(pepseq2[:-1])))[0])
pos = [k_pos1[len(k_pos1) / 2], k_pos2[len(k_pos2) / 2]]
xl = XLink(int_dict[m][i], int_dict[num][j], pos, ch, mass, param)
if abs(match.xlink.mol_weight - xl.mol_weight) <= ms2tol:
ff = Match(match.spec, xl, mass, param)
ff.match(mass)
feature = ff.feature
if (feature[0][0] + feature[0][1]) / float(feature[0][7]) >= 0.15 and (feature[1][0] + feature[1][1]) / float(feature[1][7]) >= 0.15:
false_false[-1].append(ff)
l = []
for i in range(len(false_false)):
l.extend(false_false[i])
false_false = l
return false_false
def get_feature_matrix(matches):
X = []
for m in matches:
x = []
x.extend(m.feature[0])
x.extend(m.feature[1])
X.append(x)
X = np.asarray(X)
return X
def filter_by_fdr(top_hits, decoy_string, cutoff, is_unique):
if cutoff < 0 or cutoff > 1:
print 'fdr cutoff should be greater than 0.0 and less than 1.0!'
sys.exit(1)
top_hits = sorted(top_hits, key = lambda x : x[4], reverse = True)
intra_cum_count = []
inter_cum_count = []
tardec_cum_count = []
decdec_cum_count = []
intra_count = 0
inter_count = 0
tardec_count = 0
decdec_count = 0
unique_intra_fdr = set()
unique_inter_fdr = set()
unique_tardec_fdr = set()
unique_decdec_fdr = set()
xl_type = []
for i in range(len(top_hits)):
pro1 = top_hits[i][2][0]
pro2 = top_hits[i][2][1]
is_tar = [[], []]
is_dec = [[], []]
pep_str = [top_hits[i][0][0], top_hits[i][0][1]]
# pep_str = [top_hits[i][0][0], top_hits[i][0][1], str(top_hits[i][3])]
pep_str = '_'.join(pep_str)
for part in pro1:
if decoy_string in part:
is_dec[0].append(True)
is_tar[0].append(False)
else:
is_dec[0].append(False)
is_tar[0].append(True)
for part in pro2:
if decoy_string in part:
is_dec[1].append(True)
is_tar[1].append(False)
else:
is_dec[1].append(False)
is_tar[1].append(True)
if any(is_tar[0]) and any(is_tar[1]):
if len(set(pro1).intersection(set(pro2))) > 0:
if is_unique == False:
intra_count += 1
else:
unique_intra_fdr.add(pep_str)
intra_count = len(unique_intra_fdr)
xl = 'intraxlink'
else:
if is_unique == False:
inter_count += 1
else:
unique_inter_fdr.add(pep_str)
inter_count = len(unique_inter_fdr)
xl = 'interxlink'
elif (any(is_tar[0]) and all(is_dec[1])) or (all(is_dec[0]) and any(is_tar[1])):
if is_unique == False:
tardec_count += 1
else:
unique_tardec_fdr.add(pep_str)
tardec_count = len(unique_tardec_fdr)
xl = 'target-decoy'
elif all(is_dec[0]) and all(is_dec[1]):
if is_unique == False:
decdec_count += 1
else:
unique_decdec_fdr.add(pep_str)
decdec_count = len(unique_decdec_fdr)
xl = 'decoy-decoy'
else:
print 'execution will terminate!'
sys.exit(1)
intra_cum_count.append(intra_count)
inter_cum_count.append(inter_count)
tardec_cum_count.append(tardec_count)
decdec_cum_count.append(decdec_count)
xl_type.append(xl)
# tmp = enumerate(xl_type)
# tmp = filter(lambda x : x[1] == 'target-decoy' or x[1] == 'decoy-decoy', tmp)
# print tmp[0][0]
fdr_intra = []
for i in range(len(top_hits)):
if intra_cum_count[i] != 0:
fdr = float(tardec_cum_count[i] - decdec_cum_count[i]) / intra_cum_count[i]
fdr_intra.append([fdr, i])
else:
fdr_intra.append([float(sys.maxint), i])
fdr_inter = []
for i in range(len(top_hits)):
if inter_cum_count[i] != 0:
fdr = float(tardec_cum_count[i] - decdec_cum_count[i]) / inter_cum_count[i]
fdr_inter.append([fdr, i])
else:
fdr_inter.append([float(sys.maxint), i])
# pickle.dump([fdr_intra, fdr_inter], file('save.pickle', 'w'))
fdr_intra = filter(lambda x : x[0] <= cutoff, fdr_intra)
fdr_inter = filter(lambda x : x[0] <= cutoff, fdr_inter)
if any(fdr_intra) < 0 or any(fdr_inter) < 0:
print 'warning: negative fdr value'
max_index_intra = fdr_intra[-1][1] if len(fdr_intra) > 0 else -1
max_index_inter = fdr_inter[-1][1] if len(fdr_inter) > 0 else -1
intra = []
for i in range(len(top_hits)):
if xl_type[i] == 'intraxlink' and i <= max_index_intra:
intra.append(top_hits[i])
inter = []
for i in range(len(top_hits)):
if xl_type[i] == 'interxlink' and i <= max_index_inter:
inter.append(top_hits[i])
print '#intra = %d, #TD = %d, #DD = %d' % (intra_cum_count[max_index_intra], tardec_cum_count[max_index_intra], decdec_cum_count[max_index_intra])
print '#inter = %d, #TD = %d, #DD = %d' % (inter_cum_count[max_index_inter], tardec_cum_count[max_index_inter], decdec_cum_count[max_index_inter])
unique_intra = set()
f = open('intra' + str(cutoff), 'w')
for i in range(len(intra)):
pep = [intra[i][0][0], intra[i][0][1]]
pro = [','.join(intra[i][2][0]), ','.join(intra[i][2][1])]
pos = [intra[i][1][0], intra[i][1][1]]
score = intra[i][4]
ch = intra[i][3]
scan = intra[i][-1]
f.write('%d\t%s\t%s\t%d\t%d\t%s\t%s\t%d\t%f\t%s\n' % (i + 1, pep[0], pep[1], pos[0] + 1, pos[1] + 1, pro[0], pro[1], ch, score, scan))
unique_intra.add('_'.join(pep))
f.close()
unique_inter = set()
f = open('inter' + str(cutoff), 'w')
for i in range(len(inter)):
pep = [inter[i][0][0], inter[i][0][1]]
pro = [','.join(inter[i][2][0]), ','.join(inter[i][2][1])]
pos = [inter[i][1][0], inter[i][1][1]]
score = inter[i][4]
ch = inter[i][3]
scan = inter[i][-1]
f.write('%d\t%s\t%s\t%d\t%d\t%s\t%s\t%d\t%f\t%s\n' % (i + 1, pep[0], pep[1], pos[0] + 1, pos[1] + 1, pro[0], pro[1], ch, score, scan))
unique_inter.add('_'.join(pep))
f.close()
return [intra, unique_intra, inter, unique_inter]
def get_matches_from_tophits(tophit, param, mass, spec_dict):
pep1 = tophit[0][0]
pep2 = tophit[0][1]
pro1 = tophit[2][0]
pro2 = tophit[2][1]
pos1 = tophit[1][0]
pos2 = tophit[1][1]
ch = tophit[3]
title = tophit[7]
filename = pep1 + '_' + pep2 + '_' + str(pos1) + '_' + str(pos2) + '_' + str(ch) + '_' + title + '.annotation'
modif = dict(position=[], delta_mass=[])
for j in range(len(pep1)):
if pep1[j].islower():
modif['position'].append(j)
modif['delta_mass'].append(param['mod_mass'])
pep1 = Peptide(pep1, ', '.join(pro1), modif, mass, pos1 == 0 and pep1[0] != param['x_residue'])
modif = dict(position=[], delta_mass=[])
for j in range(len(pep2)):
if pep2[j].islower():
modif['position'].append(j)
modif['delta_mass'].append(param['mod_mass'])
pep2 = Peptide(pep2, ', '.join(pro2), modif, mass, pos2 == 0 and pep2[0] != param['x_residue'])
xl = XLink(pep1, pep2, [pos1, pos2], ch, mass, param)
match = Match(spec_dict[title], xl, mass, param)
return [match, filename]
| |
from datetime import datetime
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db.models import permalink, Count
from django.template.defaultfilters import truncatechars
from lxml import etree
import json
import logging
import os
import requests
import time
from urllib import urlencode, unquote
from UserDict import UserDict
from bs4 import BeautifulSoup
import rdflib
from rdflib import Graph
from rdflib.namespace import RDF, Namespace
from eulfedora.models import Relation, ReverseRelation, \
FileDatastream, XmlDatastream, DatastreamObject
from eulfedora.rdfns import relsext
from eulxml import xmlmap
from eulxml.xmlmap import teimap
from piffle import iiif
from readux import __version__
from readux.annotations.models import Annotation
from readux.books import abbyyocr, tei
from readux.fedora import DigitalObject
from readux.collection.models import Collection
from readux.utils import solr_interface, absolutize_url
logger = logging.getLogger(__name__)
BIBO = rdflib.Namespace('http://purl.org/ontology/bibo/')
DC = rdflib.Namespace('http://purl.org/dc/terms/')
REPOMGMT = Namespace(rdflib.URIRef('http://pid.emory.edu/ns/2011/repo-management/#'))
# local repo-management namespace also in use in the Keep
repomgmt_ns = {'eul-repomgmt': REPOMGMT}
class MinMarcxml(xmlmap.XmlObject):
'''Minimal MARC :class:`~eulxml.xmlmap.XmlObject`; currently only includes
fields needed for readux import and display.'''
ROOT_NS = 'http://www.loc.gov/MARC21/slim'
ROOT_NAMESPACES = {'marc21': ROOT_NS}
#: ocm number in controlfield tag 0001
ocm_number = xmlmap.StringField('marc21:record/marc21:controlfield[@tag="001"]')
# NOTE: consider using pymarc for any marcxml handling instead
# xml can be loaded via pymarc.parse_xml_to_array(filename)
class Book(DigitalObject):
'''Fedora Book Object. Extends :class:`~eulfedora.models.DigitalObject`.
.. Note::
This is a bare-minimum model, only implemented enough to support
indexing and access to volumes.
'''
#: content model for books
BOOK_CONTENT_MODEL = 'info:fedora/emory-control:ScannedBook-1.0'
CONTENT_MODELS = [BOOK_CONTENT_MODEL]
#: marcxml :class:`~eulfedora.models.XMlDatastream` with the metadata
#: record for all associated volumes; xml content will be instance of
#: :class:`MinMarcxml`
marcxml = XmlDatastream("MARCXML", "MARC21 metadata", MinMarcxml, defaults={
'control_group': 'M',
'versionable': True,
})
#: :class:`~readux.collection.models.Collection` this book belongs to,
#: via fedora rels-ext isMemberOfcollection
collection = Relation(relsext.isMemberOfCollection, type=Collection)
#: default view for new object
NEW_OBJECT_VIEW = 'books:volume'
# NOTE: this is semi-bogus, since book-level records are currently
# not displayed in readux
@permalink
def get_absolute_url(self):
'Absolute url to view this object within the site'
return (self.NEW_OBJECT_VIEW, [str(self.pid)])
@property
def best_description(self):
'''Single best description to use when only one can be displayed (e.g.,
for twitter or facebook integration). Currently selects the longest
description from available dc:description values.
'''
# for now, just return the longest description
# eventually we should be able to update this to make use of the MARCXML
descriptions = list(self.dc.content.description_list)
if descriptions:
return sorted(descriptions, key=len, reverse=True)[0]
@staticmethod
def pids_by_label(label):
'''Search Books by label and return a list of matching pids.'''
solr = solr_interface()
q = solr.query(content_model=Book.BOOK_CONTENT_MODEL,
label=label).field_limit('pid')
return [result['pid'] for result in q]
# NOTE: Image and Page defined before Volume to allow referencing in
# Volume relation definitions
class IIIFImage(iiif.IIIFImageClient):
'''Subclass of :class:`readux.books.iiif.IIIFImageClient`, for generating
IIIF Image URIs for page images in Fedora.'''
api_endpoint = settings.IIIF_API_ENDPOINT
image_id_prefix = getattr(settings, 'IIIF_ID_PREFIX', '')
image_id_suffix = getattr(settings, 'IIIF_ID_SUFFIX', '')
pid = None
long_side = 'height'
def __init__(self, *args, **kwargs):
pid = None
if 'pid' in kwargs:
pid = kwargs['pid']
del kwargs['pid']
super(IIIFImage, self).__init__(**kwargs)
if pid is not None:
self.pid = pid
def get_copy(self):
copy = super(IIIFImage, self).get_copy()
copy.pid = self.pid
return copy
def get_image_id(self):
'image id, based on fedora pid, configured prefix, and optional suffix'
return ''.join([self.image_id_prefix, self.pid, self.image_id_suffix])
# NOTE: using long edge instead of specifying both with exact
# results in cleaner urls/filenams (no !), and more reliable result
# depending on IIIF implementation
def thumbnail(self):
'default thumbnail: 300px on the long edge'
return self.size(**{self.long_side: 300}).format('png')
def mini_thumbnail(self):
'mini thumbnail: 100px on the long edge'
return self.size(**{self.long_side: 100}).format('png')
#: long edge size for single page display
SINGLE_PAGE_SIZE = 1000
def page_size(self):
'page size for display: :attr:`SINGLE_PAGE_SIZE` on the long edge'
return self.size(**{self.long_side: self.SINGLE_PAGE_SIZE})
class Image(DigitalObject):
''':class:`~eulfedora.models.DigitalObject` for image content,
objects with an Image-1.0 content model and Fedora services for image
preview and manipulation.'''
IMAGE_CONTENT_MODEL = 'info:fedora/emory-control:Image-1.0'
CONTENT_MODELS = [IMAGE_CONTENT_MODEL]
IMAGE_SERVICE = 'emory-control:DjatokaImageService'
content_types = ('image/jpeg', 'image/jp2', 'image/gif', 'image/bmp',
'image/png', 'image/tiff')
'supported content types (mimetypes) for image datastream'
image = FileDatastream("source-image", "Master image", defaults={
'mimetype': 'image/tiff',
# FIXME: versioned? checksum?
})
':class:`~eulfedora.models.FileDatastream` with image content'
def __init__(self, *args, **kwargs):
super(Image, self).__init__(*args, **kwargs)
_iiif = None
@property
def iiif(self):
'Access to :class:`IIIFImage` for this pid'
# since initializing iiif requires loris call for image metadata,
# only initialize on demand
if self._iiif is None:
self._iiif = IIIFImage(pid=self.pid)
if self.width > self.height:
self._iiif.long_side = 'width'
return self._iiif
_image_metadata = None
@property
def image_metadata(self):
'''Image metadata as returned by IIIF service'''
if self._image_metadata is None:
response = requests.get(self.iiif.info())
if response.status_code == requests.codes.ok:
self._image_metadata = response.json()
else:
logger.warn('Error retrieving image metadata: %s', response)
return self._image_metadata
# expose width & height from image metadata as properties
@property
def width(self):
'''Width of :attr:`image` datastream, according to
:attr:`image_metadata`.'''
if self.image_metadata:
return int(self.image_metadata['width'])
@property
def height(self):
'''Height of :attr:`image` datastream, according to
:attr:`image_metadata`.'''
if self.image_metadata:
return int(self.image_metadata['height'])
class Page(Image):
'''Page object with common functionality for all versions of
ScannedPage content.'''
NEW_OBJECT_VIEW = 'books:page'
#: pattern for retrieving page variants 1.0 or 1.1 from solr
PAGE_CMODEL_PATTERN = 'info:fedora/emory-control:ScannedPage-1.?'
#: xml datastream for a tei facsimile version of this page
#: unversioned because generated from the mets or abbyy ocr
tei = XmlDatastream('tei', 'TEI Facsimile for page content', tei.Facsimile, defaults={
'control_group': 'M',
'versionable': False,
})
#: page order property stored in rels-ext, for sorting pages in
#: volume order
page_order = Relation(REPOMGMT.pageOrder,
ns_prefix=repomgmt_ns, rdf_type=rdflib.XSD.int)
#: :class:`Volume` this page is a part of, via `isConstituentOf` relation
volume = Relation(relsext.isConstituentOf, type=DigitalObject)
# NOTE: can't set type as Volume here because it is not yet defined
#: path to xsl for generating TEI facsimile from mets/alto ocr or
#: abbyy ocr xml
ocr_to_teifacsimile_xsl = os.path.join(settings.BASE_DIR, 'readux',
'books', 'ocr_to_teifacsimile.xsl')
#: path to xsl for generating ids for mets/alto or abbyy ocr xml
ocr_add_ids_xsl = os.path.join(settings.BASE_DIR, 'readux',
'books', 'ocr_add_ids.xsl')
# NOTE: it *should* be more efficient to load the xslt once, but it
# results in malloc errors when python exits, so skip it for now
# ocr_to_teifacsimile = xmlmap.load_xslt(filename=ocr_to_teifacsimile_xsl)
@permalink
def get_absolute_url(self):
'Absolute url to view this object within the site'
return (self.NEW_OBJECT_VIEW, [self.volume.pid, str(self.pid)])
@property
def absolute_url(self):
'''Generate an absolute url to the page view, for external services
or for referencing in annotations.'''
return absolutize_url(self.get_absolute_url())
@property
def display_label(self):
'''Display label, for use in html titles, twitter/facebook metadata, etc.'''
return '%s, p. %d' % (self.volume.display_label, self.page_order)
def get_fulltext(self):
'(to be implemented by version-specific subclass)'
pass
def has_fulltext(self):
'(to be implemented by version-specific subclass)'
pass
def index_data(self):
'''Extend the default :meth:`eulfedora.models.DigitalObject.index_data`
method to include fields needed for Page objects.'''
data = super(Page, self).index_data()
if self.page_order is not None:
data['page_order'] = self.page_order
# if OCR text is available, index it as page fulltext, for searching & highlighting
if self.has_fulltext():
data['page_text'] = self.get_fulltext()
return data
@property
def has_requisite_content_models(self):
''':type: bool
True when the current object has the expected content models
for one of the supported Page variants.'''
# extending default implementation because pade object should include
# image cmodel and either page 1.0 or page 1.1
return (self.has_model(Image.IMAGE_CONTENT_MODEL) & \
(self.has_model(PageV1_0.PAGE_CONTENT_MODEL) | \
self.has_model(PageV1_1.PAGE_CONTENT_MODEL)))
@property
def image_url(self):
'Preliminary image url, for use in tei facsimile'
# TODO: we probably want to use some version of the ARK here
# return unicode(self.iiif)
# use the readux url, rather than exposing IIIF url directly
return absolutize_url(reverse('books:page-image',
kwargs={'vol_pid': self.volume.pid, 'pid': self.pid,
'mode': 'fullsize'}))
@property
def tei_options(self):
'Parameters for use in XSLT when generating page-level TEI facsimile'
# construct brief bibliographic information for use in sourceDesc/bibl
src_info = ''
# creator is a list, if we have any author information
if self.volume.creator:
src_info = ', '.join([c.rstrip('.') for c in self.volume.creator]) + '. '
src_info += '%s, %s.' % (self.volume.display_label, self.volume.date)
return {
# 'graphic_url': self.image_url,
'graphic_url': reverse('books:page-image',
kwargs={'vol_pid': self.volume.pid, 'pid': self.pid, 'mode': 'fs'}),
'title': self.display_label,
'distributor': settings.TEI_DISTRIBUTOR,
'source_bibl': src_info,
'page_number': str(self.page_order)
}
def annotations(self):
'''Find annotations for this page.
Returns a queryset that can be filtered further, e.g.
annotations created by a particular user or visible
to a user or group.
'''
return Annotation.objects.filter(volume_uri=self.absolute_url)
class PageV1_0(Page):
''':class:`Page` subclass for emory-control:ScannedPage-1.0.
ScannedPage-1.0 objects include a plain text OCR datastream and a
word position file, in addition to common page image and metadata.
'''
# NOTE: eulfedora syncrepo only knows how to create content models for
# DigitalObject classes with only one content model, so a fixture
# cmodel object is provided in fixtures/initial_data
#: Page 1.0 content model
PAGE_CONTENT_MODEL = 'info:fedora/emory-control:ScannedPage-1.0'
CONTENT_MODELS = [PAGE_CONTENT_MODEL, Image.IMAGE_CONTENT_MODEL]
NEW_OBJECT_VIEW = 'books:page'
#: ocr text
text = FileDatastream('text', "page text", defaults={
'mimetype': 'text/plain',
})
''':class:`~eulfedora.models.FileDatastream` page text content
generated by OCR'''
#: word position information
position = FileDatastream('position', "word positions", defaults={
'mimetype': 'text/plain',
})
''':class:`~eulfedora.models.FileDatastream` word position
information generated by OCR'''
def has_fulltext(self):
'check if text datastream is available'
return self.text.exists
def get_fulltext(self):
'''Sanitized OCR full-text, e.g., for indexing or text analysis'''
if self.text.exists:
# if content is a StreamIO, use getvalue to avoid utf-8 issues
if hasattr(self.text.content, 'getvalue'):
textval = self.text.content.getvalue().decode('utf-8', 'replace')
# remove control characters
control_chars = dict.fromkeys(range(32))
# replace whitespace control characters with a space:
# tab, line feed, carriage return
return textval.translate(control_chars)
else:
return self.text.content
def generate_tei(self, ocrpage):
'''Generate TEI facsimile for the current page'''
try:
result = ocrpage.xsl_transform(filename=self.ocr_to_teifacsimile_xsl,
return_type=unicode, **self.tei_options)
# returns _XSLTResultTree, which is not JSON serializable;
return xmlmap.load_xmlobject_from_string(result, tei.Facsimile)
except etree.XMLSyntaxError:
logger.warn('OCR xml for %s is invalid', self.pid)
def update_tei(self, ocrpage):
'''Run :meth:`generate_tei`, check that the result is valid, and if so
save the result as tei datastream content.'''
# check that TEI is valid
pagetei = self.generate_tei(ocrpage)
if not pagetei.schema_valid():
raise Exception('TEI is not valid according to configured schema')
self.tei.content = pagetei
class PageV1_1(Page):
'''Page subclass for emory-control:ScannedPage-1.1.
ScannedPage-1.1 objects include a METS/ALTO OCR datastream, in
addition to common page image and metadata.
'''
# NOTE: fixture cmodel provided in fixtures/initial_data
#: Page 1.1 content model
PAGE_CONTENT_MODEL = 'info:fedora/emory-control:ScannedPage-1.1'
CONTENT_MODELS = [PAGE_CONTENT_MODEL, Image.IMAGE_CONTENT_MODEL]
NEW_OBJECT_VIEW = 'books:page'
#: xml ocr datastream for mets/alto content for this page;
#: using text datastream id for consistency with ScannedPage-1.0
ocr = XmlDatastream('text', "OCR XML for page content", xmlmap.XmlObject, defaults={
'control_group': 'M',
'versionable': True,
})
def has_fulltext(self):
'check if ocr datastream is available'
return self.ocr.exists
def get_fulltext(self):
'''Sanitized OCR full-text, e.g., for indexing or text analysis'''
# for simplicity and speed, use beautifulsoup to pull text content from the
# alto ocr xml.
# explicitly request generic ds object to avoid attempting to parse as xml
ds = self.getDatastreamObject(self.ocr.id, dsobj_type=DatastreamObject)
xmlsoup = BeautifulSoup(ds.content)
# text content is grouped by line (TextLine element), and then contained
# in the "CONTENT" attribute of String elements.
return '\n'.join((' '.join(s['content'] for s in line.find_all('string')))
for line in xmlsoup.find_all('textline'))
def generate_tei(self):
'''Generate TEI facsimile for the current page'''
try:
result = self.ocr.content.xsl_transform(filename=self.ocr_to_teifacsimile_xsl,
return_type=unicode, **self.tei_options)
# returns _XSLTResultTree, which is not JSON serializable;
teidoc = xmlmap.load_xmlobject_from_string(result, tei.Facsimile)
return teidoc
except etree.XMLSyntaxError:
logger.warn('OCR xml for %s is invalid', self.pid)
def update_tei(self):
'''Run :meth:`generate_tei`, check that the result is valid, and if so
save the result as tei datastream content.'''
# check to make sure generated TEI is valid
pagetei = self.generate_tei()
if not pagetei.schema_valid():
raise Exception('TEI is not valid according to configured schema')
# load as tei should maybe happen here instead of in generate
self.tei.content = pagetei
@property
def ocr_has_ids(self):
'Check if OCR currently includes xml:ids'
if self.ocr.exists:
return self.ocr.content.node.xpath('count(//@xml:id)') > 0.0
def add_ocr_ids(self, regenerate_ids=False):
'Update OCR xml with ids for pages, blocks, lines, etc'
with open(self.ocr_add_ids_xsl) as xslfile:
try:
result = self.ocr.content.xsl_transform(filename=xslfile,
return_type=unicode, id_prefix='rdx_%s.' % self.noid,
regenerate_ids='true' if regenerate_ids else '')
# set the result as ocr datastream content
self.ocr.content = xmlmap.load_xmlobject_from_string(result)
return True
except etree.XMLSyntaxError:
logger.warn('OCR xml for %s is invalid', self.pid)
return False
class BaseVolume(object):
'''Common functionality for :class:`Volume` and :class:`SolrVolume`'''
# expects properties for pid, label, language
@property
def control_key(self):
'Control key for this Book title (e.g., ocm02872816)'
# LSDI Volume object label is ocm#_vol, e.g. ocn460678076_V.0
ocm, sep, vol = self.label.partition('_')
return ocm
@property
def volume(self):
'volume label for this Book (e.g., v.1)'
# LSDI Volume object label is ocm#_vol, e.g. ocn460678076_V.0
if self.label:
ocm, sep, vol = self.label.partition('_')
# if V.0, return no volume
if vol.lower() == 'v.0':
return ''
return vol
@property
def noid(self):
'short-form of pid'
pidspace, sep, noid = self.pid.partition(':')
return noid
def fulltext_absolute_url(self):
'''Generate an absolute url to the text view for this volume
for use with external services such as voyant-tools.org'''
return absolutize_url(reverse('books:text', kwargs={'pid': self.pid}))
def voyant_url(self):
'''Generate a url for sending the content of the current volume to Voyant
for text analysis. Includes a parameter for the default English
stopword list if the volume language is English.'''
url_params = {
'corpus': self.pid,
'archive': self.fulltext_absolute_url()
}
# if language is known to be english, set a default stopword list
# NOTE: we could add this for other languages at some point
if self.language and "eng" in self.language:
url_params['stopList'] = 'stop.en.taporware.txt'
return "http://voyant-tools.org/?%s" % urlencode(url_params)
def pdf_url(self):
'''Local PDF url, including starting page directive (#page=N) if start
page is set.'''
url = unquote(reverse('books:pdf', kwargs={'pid': self.pid}))
if self.start_page:
url = '%s#page=%d' % (url, self.start_page)
return url
@property
def large_pdf(self):
'''boolean indicating if this PDF should be considered large, based on a
threshold configured in localsettings'''
return self.pdf_size and self.pdf_size > settings.LARGE_PDF_THRESHOLD
class Volume(DigitalObject, BaseVolume):
'''Fedora Volume object with common functionality for all Volume variants.
Extends :class:`~eulfedora.models.DigitalObject` and :class:`BaseVolume`.
See also :class:`VolumeV1_0` and :class:`VolumeV1_1`.
'''
#: content model pattern for finding supported variant volumes
VOLUME_CMODEL_PATTERN = "info:fedora/emory-control:ScannedVolume-1.?"
# inherits DC, RELS-EXT
# related to parent Book object via isConstituentOf
#: pdf :class:`~eulfedora.models.FileDatastream` with the content
#: of the Volume (page images with OCR text behind)
pdf = FileDatastream("PDF", "PDF datastream", defaults={
'mimetype': 'application/pdf',
'versionable': True,
})
#: :class:`Page` that is the primary image for this volume (e.g., cover image)
primary_image = Relation(REPOMGMT.hasPrimaryImage, Page, repomgmt_ns)
#: list of :class:`Page` for all the pages in this book, if available
pages = ReverseRelation(relsext.isConstituentOf, Page, multiple=True,
order_by=REPOMGMT.pageOrder)
#: :class:`Book` this volume is associated with, via isConstituentOf
book = Relation(relsext.isConstituentOf, type=Book)
#: start page - 1-based index of the first non-blank page in the PDF
start_page = Relation(REPOMGMT.startPage,
ns_prefix=repomgmt_ns, rdf_type=rdflib.XSD.int)
@property
def is_a_volume(self):
''':type: bool
True when the current object has the expected content models
for one of the supported Volume variants.'''
# NOTE: *not* extending has_requisite_content_models because
# volume subclasses still need access to the default implementation
return self.has_model(VolumeV1_0.VOLUME_CONTENT_MODEL) | \
self.has_model(VolumeV1_1.VOLUME_CONTENT_MODEL)
@permalink
def get_absolute_url(self):
'Absolute url to view this object within the site'
return ('books:volume', [str(self.pid)])
@property
def absolute_url(self):
'''Generate an absolute url to the page view, for external services
or for referencing in annotations.'''
return absolutize_url(self.get_absolute_url())
# def get_pdf_url(self):
# return reverse('books:pdf', kwargs={'pid': self.pid})
@property
def page_count(self):
'Number of pages associated with this volume, based on RELS-EXT isConstituentOf'
if self.pages:
return len(self.pages)
# If no pages are ingested as self.pages is None, return 0
return 0
@property
def has_pages(self):
'boolean flag indicating if this volume has pages loaded'
if self.pages:
# pages exist and more than just the cover / primary image
return len(self.pages) > 1
else:
return False
@property
def has_tei(self):
'boolean flag indicating if TEI has been generated for volume pages'
if self.pages:
# NOTE: this is only checking tei in the first few pages;
# If TEI is incompletely loaded, this could report incorrectly.
# Checks multiple pages because blank pages might have no TEI.
for p in self.pages[:10]:
if p.tei.exists:
return True
return False
@property
def title(self):
return self.dc.content.title.rstrip().rstrip('/')
@property
def display_label(self):
'''Display label, for use in html titles, twitter/facebook metadata, etc.
Truncates the title to the first 150 characters, and includes volume information
if any.
'''
vol = ' [%s]' % self.volume if self.volume else ''
return '%s%s' % (truncatechars(self.title.rstrip(), 150), vol)
@property
def title_part1(self):
'Volume title, up to the first 150 characters'
return self.title[:150]
@property
def title_part2(self):
'Volume title after the first 150 characters'
return self.title[150:].strip()
@property
def creator(self):
'list of creators, from dublin core metadata'
return self.book.dc.content.creator_list
@property
def date(self):
'''List of dates, from volume metadata or book metadata if no
dates are present in the volume metadata. Becuase some volumes
include the digitization date (as the date of the electronic edition),
when there are multiple dates only the oldest is returned.'''
# some books (at least) include the digitization date (date of the
# electronic ediction). If there are multiple dates, only include the oldest.
# if dates are present in current volume dc, use those
if self.dc.content.date_list:
dates = self.dc.content.date_list
# otherwise, use dates from book dc
else:
dates = self.book.dc.content.date_list
if len(dates) > 1:
date = sorted([d.strip('[]') for d in dates])[0]
return [date]
else:
# convert eulxml list to normal list so it can be serialized via json
return list(dates)
@property
def digital_ed_date(self):
'''Date of the digital edition. Some volumes include the digitization
date; if there are multiple dates, return the newest one if it is
after 2000.'''
# some books (at least) include the digitization date (date of the
# electronic ediction). If there are multiple dates, return the newest
# it is after 2000
# if dates are present in current volume dc, use those
if self.dc.content.date_list:
dates = self.dc.content.date_list
# otherwise, use dates from book dc
else:
dates = self.book.dc.content.date_list
if dates:
sorted_dates = sorted([d.strip('[]') for d in dates])
sorted_dates = [d for d in sorted_dates if d > '2000']
if sorted_dates:
return sorted_dates[-1]
def index_data(self):
'''Extend the default
:meth:`eulfedora.models.DigitalObject.index_data`
method to include additional fields specific to Volumes.'''
data = super(Volume, self).index_data()
if self.fulltext_available:
data['fulltext'] = self.get_fulltext()
# pulling text content from the PDF is significantly slower;
# - only pdf if ocr xml is not available or errored
# NOTE: pdf to text seems to be hanging; disabling for now
# if 'fulltext' not in data:
# data['fulltext'] = pdf_to_text(self.pdf.content)
# index primary image pid to construct urls for cover image, first page
if self.primary_image:
data['hasPrimaryImage'] = self.primary_image.pid
# index pdf start page so we can link to correct page from search results
if self.start_page:
data['start_page'] = self.start_page
# index collection info
data['collection_id'] = self.book.collection.pid
data['collection_label'] = self.book.collection.short_label
# book this volume is part of, for access to book-level metadata
data['book_id'] = self.book.pid
# add book-level metadata to text for keyword searching purposes
# (preliminary; may want broken out for facets/fielded searching;
# would be better to index on book object and use joins for that if possible...)
# book_dc = self.book.dc.content
# convert xmlmap lists to straight lists for json output
data['creator'] = list(self.book.dc.content.creator_list)
# some books (at least) include the digitization date (date of the
# electronic ediction). Use local date property that returns only the oldest
data['date'] = self.date
if self.book.dc.content.subject_list:
data['subject'] = list(self.book.dc.content.subject_list)
# number of pages loaded for this book, to allow determining if page view is available
data['page_count'] = self.page_count
# size of the pdf
if self.pdf and self.pdf.size:
data['pdf_size'] = self.pdf.size
return data
#: supported unAPI formats, for use with :meth:`readux.books.views.unapi`
unapi_formats = {
'rdf_dc': {'type': 'application/rdf+xml', 'method': 'rdf_dc'}
}
@property
def ark_uri(self):
'fully-resolvable form of ARK URI'
for identifier in self.dc.content.identifier_list:
if 'ark:' in identifier:
return identifier
def rdf_dc_graph(self):
'''Generate an :class:`rdflib.Graph` of RDF Dublin Core for use
with unAPI and for harvest by Zotero. Content is based on
Volume Dublin Core content as well as Dublin Core information
from the parent :class:`Book` object'''
g = Graph()
g.bind('dc', DC)
g.bind('bibo', BIBO)
# use ARK URI as identifier
u = rdflib.URIRef(self.ark_uri)
g.add((u, RDF.type, BIBO.book))
# add information from dublin core
dc = self.dc.content
g.add((u, DC.title, rdflib.Literal(dc.title)))
if self.volume:
g.add((u, BIBO.volume, rdflib.Literal(self.volume)))
g.add((u, DC.identifier, u))
g.add((u, BIBO.uri, u))
# creator info seems to be at book level, rather than volume
for creator in dc.creator_list:
g.add((u, DC.creator, rdflib.Literal(creator)))
if not dc.creator_list:
for creator in self.book.dc.content.creator_list:
g.add((u, DC.creator, rdflib.Literal(creator)))
# same for publisher
if dc.publisher:
g.add((u, DC.publisher, rdflib.Literal(dc.publisher)))
elif self.book.dc.content.publisher:
g.add((u, DC.publisher, rdflib.Literal(self.book.dc.content.publisher)))
# seems to be also the case for date
# NOTE: we have multiple dates; seems to be one for original edition
# and one for the digitial edition. Zotero only picks up one (randomly?);
# do we want to privilege the earlier date ?
for d in self.date:
g.add((u, DC.date, rdflib.Literal(d)))
for description in dc.description_list:
g.add((u, DC.description, rdflib.Literal(description)))
if not dc.description_list:
for description in self.book.dc.content.description_list:
g.add((u, DC.description, rdflib.Literal(description)))
if dc.format:
g.add((u, DC['format'], rdflib.Literal(dc.format)))
# NOTE: can't use DC.format because namespaces have a format method
if dc.language:
g.add((u, DC.language, rdflib.Literal(dc.language)))
if dc.rights:
g.add((u, DC.rights, rdflib.Literal(dc.rights)))
for rel in dc.relation_list:
# NOTE: tried adding PDF as RDF.value, but Zotero doesn't pick it up as an attachment
g.add((u, DC.relation, rdflib.URIRef(rel)))
return g
def rdf_dc(self):
'Serialized form of :meth:`rdf_dc_graph` for use with unAPI'
return self.rdf_dc_graph().serialize()
def find_solr_pages(self):
'''Find pages for the current volume, sorted by page order; returns solr query
for any further filtering or pagination.'''
solr = solr_interface()
# find all pages that belong to the same volume and sort by page order
# - filtering separately should allow solr to cache filtered result sets more efficiently
return solr.query(isConstituentOf=self.uri) \
.filter(content_model=Page.PAGE_CMODEL_PATTERN) \
.filter(state='A') \
.sort_by('page_order') \
.field_limit(['pid', 'page_order']) \
.results_as(SolrPage)
# only return fields we actually need (pid, page_order)
# TODO: add volume id for generating urls ?
# solrquery = solrquery.field_limit(['pid', 'page_order', 'isConstituentOf']) # ??
# return so it can be filtered, paginated as needed
@staticmethod
def volumes_with_pages():
'''Search for Volumes with pages loaded and return a list of matching pids.'''
solr = solr_interface()
# searching on page count > 1 because volumes with cover only
# have page count of 1
q = solr.query(content_model=Volume.VOLUME_CMODEL_PATTERN,
page_count__gt=1).field_limit('pid')
return [result['pid'] for result in q]
@property
def pdf_size(self):
'size of the pdf, in bytes'
# exposing as a property here for consistency with SolrVolume result
return self.pdf.size
@property
def language(self):
'language of the content'
# exposing as a property here for consistency with SolrVolume result
return self.dc.content.language
def annotations(self):
'''Find annotations for any page in this volume.
Returns a queryset that can be filtered further, e.g.
annotations created by a particular user or visible
to a particular user or group.
'''
# NOTE: should match on full url *with* domain name
return Annotation.objects.filter(volume_uri=self.absolute_url)
def page_annotation_count(self, user=None):
'''Generate a dictionary with a count of annotations for each
unique page uri within the current volume. Filtered by
*visibility* to user, if specified.'''
# aggregate anotations by unique uri and return a count
# of the number of annotations for each uri
notes = self.annotations()
if user is not None:
notes = notes.visible_to(user=user)
notes = notes.values('uri').distinct() \
.annotate(count=Count('uri')) \
.values('uri', 'count')
# queryset returns a list of dict; convert to a dict for easy lookup
return dict([(n['uri'], n['count']) for n in notes])
def annotation_count(self, user=None):
'''Total number of annotations for this volume; filtered by annotations
*visible* to a particular user, if specified.'''
notes = self.annotations()
if user is not None:
notes = notes.visible_to(user)
return notes.count()
@classmethod
def volume_annotation_count(cls, user=None):
'''Generate a dictionary with a count of annotations for each
unique volume uri. Filtered by *visibility* to user if specified.'''
# aggregate anotations by unique uri and return a count
# of the number of annotations for each uri
notes = Annotation.objects.all()
if user is not None:
notes = notes.visible_to(user)
notes = notes.values('volume_uri').distinct() \
.annotate(count=Count('volume_uri')) \
.values('volume_uri', 'count')
# queryset returns a list of dict; convert to a dict for easy lookup
return dict([(n['volume_uri'], n['count']) for n in notes])
def generate_volume_tei(self):
'''Generate TEI for a volume by combining the TEI for
all pages.'''
if not self.has_tei:
return
# store volume TEI in django cache, because generating TEI
# for a large volume is expensive (fedora api calls for each page)
cache_key = '%s-tei' % self.pid
vol_tei_xml = cache.get(cache_key, None)
if vol_tei_xml:
logger.debug('Loading volume TEI for %s from cache' % self.pid)
vol_tei = xmlmap.load_xmlobject_from_string(vol_tei_xml,
tei.Facsimile)
# if tei was not in the cache, generate it
if vol_tei_xml is None:
start = time.time()
vol_tei = tei.Facsimile()
# populate header information
vol_tei.create_header()
vol_tei.header.title = self.title
# publication statement
vol_tei.distributor = settings.TEI_DISTRIBUTOR
vol_tei.pubstmt.distributor_readux = 'Readux'
vol_tei.pubstmt.desc = 'TEI facsimile generated by Readux version %s' % __version__
# source description - original publication
vol_tei.create_original_source()
vol_tei.original_source.title = self.title
# original publication date
if self.date:
vol_tei.original_source.date = self.date[0]
# if authors are set, it should be a list
if self.creator:
vol_tei.original_source.authors = self.creator
# source description - digital edition
vol_tei.create_digital_source()
vol_tei.digital_source.title = '%s, digital edition' % self.title
vol_tei.digital_source.date = self.digital_ed_date
# FIXME: ideally, these would be ARKs, but ARKs for readux volume
# content do not yet resolve to Readux urls
vol_tei.digital_source.url = absolutize_url(self.get_absolute_url())
vol_tei.digital_source.pdf_url = absolutize_url(self.pdf_url())
# loop through pages and add tei content
# for page in self.pages[:10]: # FIXME: temporary, for testing/speed
page_order = 1
for page in self.pages:
if page.tei.exists and page.tei.content.page:
# include facsimile page *only* from the tei for each page
# tei facsimile already includes a graphic url
teipage = page.tei.content.page
# add a reference from tei page to readux page
# pages should have ARKS; fall back to readux url if
# ark is not present (only expected to happen in dev)
teipage.href = page.ark_uri or absolutize_url(page.get_absolute_url())
# NOTE: generating ark_uri currently requires loading
# DC from fedora; could we generate reliably based on the pid?
# teipage.n = page.page_order
teipage.n = page_order
# NOTE: normally we would use page.page_order, but that
# requires an additional api call for each page
# to load the rels-ext, so use a local counter instead
# ensure graphic elements are present for image variants
# full size, page size, thumbnail, and deep zoom variants
# NOTE: graphic elements need to come immediately after
# surface and before zone; adding them before removing
# existing graphic element should place them correctly.
# mapping of types we want in the tei and
# corresponding mode to pass to the url
image_types = {
'full': 'fs',
'page': 'single-page',
'thumbnail': 'thumbnail',
'small-thumbnail': 'mini-thumbnail',
'json': 'info',
}
for image_type, mode in image_types.iteritems():
img_url = absolutize_url(
reverse('books:page-image', kwargs={
'vol_pid': self.pid, 'pid': page.pid,
'mode': mode}
))
teipage.graphics.append(tei.Graphic(rend=image_type,
url=img_url))
# page tei should have an existing graphic reference
# remove it from our output
if teipage.graphics[0].rend is None:
del teipage.graphics[0]
vol_tei.page_list.append(teipage)
page_order += 1
logger.info('Volume TEI for %s with %d pages generated in %.02fs' % \
(self.pid, len(self.pages), time.time() - start))
# update current date for either version (new or cached)
# store current date (tei generation) in publication statement
export_date = datetime.now()
vol_tei.pubstmt.date = export_date
vol_tei.pubstmt.date_normal = export_date
# save current volume tei in django cache
cache.set(cache_key, vol_tei.serialize(), 3000)
return vol_tei
class VolumeV1_0(Volume):
'''Fedora object for ScannedVolume-1.0. Extends :class:`Volume`.
ScannedVolume-1.0 objects include an Abbyy FineReader OCR XML datastream
with the OCR content for the entire volume.
'''
#: volume content model
VOLUME_CONTENT_MODEL = 'info:fedora/emory-control:ScannedVolume-1.0'
CONTENT_MODELS = [VOLUME_CONTENT_MODEL]
# NEW_OBJECT_VIEW = 'books:book-pages'
# inherits dc, rels-ext, pdf
#: :class:`~eulfedora.models.XmlDatastream` for ABBYY
#: FineReader OCR XML; content as :class:`AbbyyOCRXml`'''
ocr = XmlDatastream("OCR", "ABBYY Finereader OCR XML", abbyyocr.Document, defaults={
'control_group': 'M',
'versionable': True,
})
#: path to xslt for transforming abbyoccr to plain text with some structure
ocr_to_text_xsl = os.path.join(settings.BASE_DIR, 'readux', 'books', 'abbyocr-to-text.xsl')
#: path to xsl for generating ids for mets/alto or abbyy ocr xml
ocr_add_ids_xsl = os.path.join(settings.BASE_DIR, 'readux',
'books', 'ocr_add_ids.xsl')
# shortcuts for consistency with SolrVolume
@property
def fulltext_available(self):
'check if ocr is available'
return self.ocr.exists
def get_fulltext(self):
'''Return OCR full text (if available)'''
if self.ocr.exists:
with open(self.ocr_to_text_xsl) as xslfile:
try:
transform = self.ocr.content.xsl_transform(filename=xslfile,
return_type=unicode)
# returns _XSLTResultTree, which is not JSON serializable;
# convert to unicode
return unicode(transform)
except etree.XMLSyntaxError:
logger.warn('OCR xml for %s is invalid', self.pid)
# use beautifulsoup as fallback, since it can handle invalid xml
# explicitly request generic ds object to avoid attempting to parse as xml
ds = self.getDatastreamObject(self.ocr.id, dsobj_type=DatastreamObject)
xmlsoup = BeautifulSoup(ds.content)
# simple get text seems to generate reasonable text + whitespace
return xmlsoup.get_text()
@property
def ocr_has_ids(self):
'Check if OCR currently includes xml:ids'
if self.ocr.exists:
return self.ocr.content.node.xpath('count(//@xml:id)') > 0.0
def add_ocr_ids(self, regenerate_ids=False):
'Update OCR xml with ids for pages, blocks, lines, etc'
with open(self.ocr_add_ids_xsl) as xslfile:
try:
result = self.ocr.content.xsl_transform(filename=xslfile,
return_type=unicode, id_prefix='rdx_%s.' % self.noid,
regenerate_ids='true' if regenerate_ids else '')
# set the result as ocr datastream content
self.ocr.content = xmlmap.load_xmlobject_from_string(result,
abbyyocr.Document)
return True
except etree.XMLSyntaxError as err:
logger.warn('OCR xml for %s is invalid: %s', self.pid, err)
return False
class VolumeV1_1(Volume):
'''Fedora object for ScannedVolume-1.1. Extends :class:`Volume`.
ScannedVolume-1.1 objects have no additional datastreams because
ScannedPage-1.1 objects have page-level ALTO OCR XML.
'''
#: volume content model
VOLUME_CONTENT_MODEL = 'info:fedora/emory-control:ScannedVolume-1.1'
CONTENT_MODELS = [VOLUME_CONTENT_MODEL]
NEW_OBJECT_VIEW = 'books:volume'
# inherits dc, rels-ext, pdf
# no ocr datastream, because ocr is at the page level
@property
def fulltext_available(self):
# volume v1.1 doesn't include the full-text anywhere at the volume level,
# so text content is only available if pages are loaded
return self.has_pages
def get_fulltext(self):
'''Return OCR full text (if available)'''
q = self.find_solr_pages()
q = q.field_limit(['page_text'])
return '\n\n'.join(p['page_text'] for p in q if 'page_text' in p)
class SolrVolume(UserDict, BaseVolume):
'''Extension of :class:`~UserDict.UserDict` for use with Solr results
for volume-specific content. Extends :class:`BaseVolume` for common
Volume fields based on existing fields such as label.
'''
#: fields that should be returned via Solr to support list display needs
necessary_fields = ['pid', 'title', 'label', 'language',
'creator', 'date', 'hasPrimaryImage',
'page_count', 'collection_id', 'collection_label',
'pdf_size', 'start_page', 'created'
]
def __init__(self, **kwargs):
# sunburnt passes fields as kwargs; userdict wants them as a dict
UserDict.__init__(self, kwargs)
@property
def label(self):
'object label'
return self.data.get('label')
@property
def pid(self):
'object pid'
return self.data.get('pid')
@property
def has_pages(self):
'boolean indicator if a volume has pages loaded (not true for cover page only)'
return int(self.data.get('page_count', 0)) > 1
@property
def language(self):
'language of the content'
# exposing as a property here for generating voyant url
return self.get('language')
_primary_image = None
@property
def primary_image(self):
'Access to primary (cover) image analogous to :attr:`Volume.primary_image`'
# allow template access to cover image pid to work the same way as
# it does with Volume - vol.primary_image.pid
if self._primary_image is None:
if 'hasPrimaryImage' in self.data:
pid = self.data.get('hasPrimaryImage')
self._primary_image = {'pid': pid, 'iiif': IIIFImage(pid=pid)}
else:
self._primary_image = {}
return self._primary_image
@property
def start_page(self):
'start page within the pdf'
return self.data.get('start_page')
@property
def pdf_size(self):
return self.data.get('pdf_size')
class SolrPage(UserDict):
'''Extension of :class:`~UserDict.UserDict` for use with Solr results
for page-specific content.
'''
def __init__(self, **kwargs):
# sunburnt passes fields as kwargs; userdict wants them as a dict
UserDict.__init__(self, kwargs)
self.iiif = IIIFImage(pid=self.pid)
@property
def pid(self):
'object pid'
return self.data.get('pid')
def thumbnail_url(self):
'IIIF thumbnail url'
return self.iiif.thumbnail()
# hack: patch in volume as the related item type for pages
# (can't be done in page declaration due to order / volume primary image rel)
Page.volume.object_type = Volume
| |
"""
Dialogs that query users and verify the answer before accepting.
Query is the generic base class for a popup dialog.
The user must either enter a valid answer or close the dialog.
Entries are validated when <Return> is entered or [Ok] is clicked.
Entries are ignored when [Cancel] or [X] are clicked.
The 'return value' is .result set to either a valid answer or None.
Subclass SectionName gets a name for a new config file section.
Configdialog uses it for new highlight theme and keybinding set names.
Subclass ModuleName gets a name for File => Open Module.
Subclass HelpSource gets menu item and path for additions to Help menu.
"""
# Query and Section name result from splitting GetCfgSectionNameDialog
# of configSectionNameDialog.py (temporarily config_sec.py) into
# generic and specific parts. 3.6 only, July 2016.
# ModuleName.entry_ok came from editor.EditorWindow.load_module.
# HelpSource was extracted from configHelpSourceEdit.py (temporarily
# config_help.py), with darwin code moved from ok to path_ok.
import importlib
import os
import shlex
from sys import executable, platform # Platform is set for one test.
from tkinter import Toplevel, StringVar, BooleanVar, W, E, S
from tkinter.ttk import Frame, Button, Entry, Label, Checkbutton
from tkinter import filedialog
from tkinter.font import Font
class Query(Toplevel):
"""Base class for getting verified answer from a user.
For this base class, accept any non-blank string.
"""
def __init__(self, parent, title, message, *, text0='', used_names={},
_htest=False, _utest=False):
"""Create modal popup, return when destroyed.
Additional subclass init must be done before this unless
_utest=True is passed to suppress wait_window().
title - string, title of popup dialog
message - string, informational message to display
text0 - initial value for entry
used_names - names already in use
_htest - bool, change box location when running htest
_utest - bool, leave window hidden and not modal
"""
self.parent = parent # Needed for Font call.
self.message = message
self.text0 = text0
self.used_names = used_names
Toplevel.__init__(self, parent)
self.withdraw() # Hide while configuring, especially geometry.
self.title(title)
self.transient(parent)
self.grab_set()
windowingsystem = self.tk.call('tk', 'windowingsystem')
if windowingsystem == 'aqua':
try:
self.tk.call('::tk::unsupported::MacWindowStyle', 'style',
self._w, 'moveableModal', '')
except:
pass
self.bind("<Command-.>", self.cancel)
self.bind('<Key-Escape>', self.cancel)
self.protocol("WM_DELETE_WINDOW", self.cancel)
self.bind('<Key-Return>', self.ok)
self.bind("<KP_Enter>", self.ok)
self.create_widgets()
self.update_idletasks() # Need here for winfo_reqwidth below.
self.geometry( # Center dialog over parent (or below htest box).
"+%d+%d" % (
parent.winfo_rootx() +
(parent.winfo_width()/2 - self.winfo_reqwidth()/2),
parent.winfo_rooty() +
((parent.winfo_height()/2 - self.winfo_reqheight()/2)
if not _htest else 150)
) )
self.resizable(height=False, width=False)
if not _utest:
self.deiconify() # Unhide now that geometry set.
self.wait_window()
def create_widgets(self, ok_text='OK'): # Do not replace.
"""Create entry (rows, extras, buttons.
Entry stuff on rows 0-2, spanning cols 0-2.
Buttons on row 99, cols 1, 2.
"""
# Bind to self the widgets needed for entry_ok or unittest.
self.frame = frame = Frame(self, padding=10)
frame.grid(column=0, row=0, sticky='news')
frame.grid_columnconfigure(0, weight=1)
entrylabel = Label(frame, anchor='w', justify='left',
text=self.message)
self.entryvar = StringVar(self, self.text0)
self.entry = Entry(frame, width=30, textvariable=self.entryvar)
self.entry.focus_set()
self.error_font = Font(name='TkCaptionFont',
exists=True, root=self.parent)
self.entry_error = Label(frame, text=' ', foreground='red',
font=self.error_font)
entrylabel.grid(column=0, row=0, columnspan=3, padx=5, sticky=W)
self.entry.grid(column=0, row=1, columnspan=3, padx=5, sticky=W+E,
pady=[10,0])
self.entry_error.grid(column=0, row=2, columnspan=3, padx=5,
sticky=W+E)
self.create_extra()
self.button_ok = Button(
frame, text=ok_text, default='active', command=self.ok)
self.button_cancel = Button(
frame, text='Cancel', command=self.cancel)
self.button_ok.grid(column=1, row=99, padx=5)
self.button_cancel.grid(column=2, row=99, padx=5)
def create_extra(self): pass # Override to add widgets.
def showerror(self, message, widget=None):
#self.bell(displayof=self)
(widget or self.entry_error)['text'] = 'ERROR: ' + message
def entry_ok(self): # Example: usually replace.
"Return non-blank entry or None."
self.entry_error['text'] = ''
entry = self.entry.get().strip()
if not entry:
self.showerror('blank line.')
return None
return entry
def ok(self, event=None): # Do not replace.
'''If entry is valid, bind it to 'result' and destroy tk widget.
Otherwise leave dialog open for user to correct entry or cancel.
'''
entry = self.entry_ok()
if entry is not None:
self.result = entry
self.destroy()
else:
# [Ok] moves focus. (<Return> does not.) Move it back.
self.entry.focus_set()
def cancel(self, event=None): # Do not replace.
"Set dialog result to None and destroy tk widget."
self.result = None
self.destroy()
def destroy(self):
self.grab_release()
super().destroy()
class SectionName(Query):
"Get a name for a config file section name."
# Used in ConfigDialog.GetNewKeysName, .GetNewThemeName (837)
def __init__(self, parent, title, message, used_names,
*, _htest=False, _utest=False):
super().__init__(parent, title, message, used_names=used_names,
_htest=_htest, _utest=_utest)
def entry_ok(self):
"Return sensible ConfigParser section name or None."
self.entry_error['text'] = ''
name = self.entry.get().strip()
if not name:
self.showerror('no name specified.')
return None
elif len(name)>30:
self.showerror('name is longer than 30 characters.')
return None
elif name in self.used_names:
self.showerror('name is already in use.')
return None
return name
class ModuleName(Query):
"Get a module name for Open Module menu entry."
# Used in open_module (editor.EditorWindow until move to iobinding).
def __init__(self, parent, title, message, text0,
*, _htest=False, _utest=False):
super().__init__(parent, title, message, text0=text0,
_htest=_htest, _utest=_utest)
def entry_ok(self):
"Return entered module name as file path or None."
self.entry_error['text'] = ''
name = self.entry.get().strip()
if not name:
self.showerror('no name specified.')
return None
# XXX Ought to insert current file's directory in front of path.
try:
spec = importlib.util.find_spec(name)
except (ValueError, ImportError) as msg:
self.showerror(str(msg))
return None
if spec is None:
self.showerror("module not found")
return None
if not isinstance(spec.loader, importlib.abc.SourceLoader):
self.showerror("not a source-based module")
return None
try:
file_path = spec.loader.get_filename(name)
except AttributeError:
self.showerror("loader does not support get_filename",
parent=self)
return None
return file_path
class HelpSource(Query):
"Get menu name and help source for Help menu."
# Used in ConfigDialog.HelpListItemAdd/Edit, (941/9)
def __init__(self, parent, title, *, menuitem='', filepath='',
used_names={}, _htest=False, _utest=False):
"""Get menu entry and url/local file for Additional Help.
User enters a name for the Help resource and a web url or file
name. The user can browse for the file.
"""
self.filepath = filepath
message = 'Name for item on Help menu:'
super().__init__(
parent, title, message, text0=menuitem,
used_names=used_names, _htest=_htest, _utest=_utest)
def create_extra(self):
"Add path widjets to rows 10-12."
frame = self.frame
pathlabel = Label(frame, anchor='w', justify='left',
text='Help File Path: Enter URL or browse for file')
self.pathvar = StringVar(self, self.filepath)
self.path = Entry(frame, textvariable=self.pathvar, width=40)
browse = Button(frame, text='Browse', width=8,
command=self.browse_file)
self.path_error = Label(frame, text=' ', foreground='red',
font=self.error_font)
pathlabel.grid(column=0, row=10, columnspan=3, padx=5, pady=[10,0],
sticky=W)
self.path.grid(column=0, row=11, columnspan=2, padx=5, sticky=W+E,
pady=[10,0])
browse.grid(column=2, row=11, padx=5, sticky=W+S)
self.path_error.grid(column=0, row=12, columnspan=3, padx=5,
sticky=W+E)
def askfilename(self, filetypes, initdir, initfile): # htest #
# Extracted from browse_file so can mock for unittests.
# Cannot unittest as cannot simulate button clicks.
# Test by running htest, such as by running this file.
return filedialog.Open(parent=self, filetypes=filetypes)\
.show(initialdir=initdir, initialfile=initfile)
def browse_file(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.pathvar.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if platform[:3] == 'win':
dir = os.path.join(os.path.dirname(executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
file = self.askfilename(filetypes, dir, base)
if file:
self.pathvar.set(file)
item_ok = SectionName.entry_ok # localize for test override
def path_ok(self):
"Simple validity check for menu file path"
path = self.path.get().strip()
if not path: #no path specified
self.showerror('no help file path specified.', self.path_error)
return None
elif not path.startswith(('www.', 'http')):
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
self.showerror('help file path does not exist.',
self.path_error)
return None
if platform == 'darwin': # for Mac Safari
path = "file://" + path
return path
def entry_ok(self):
"Return apparently valid (name, path) or None"
self.entry_error['text'] = ''
self.path_error['text'] = ''
name = self.item_ok()
path = self.path_ok()
return None if name is None or path is None else (name, path)
class CustomRun(Query):
"""Get settings for custom run of module.
1. Command line arguments to extend sys.argv.
2. Whether to restart Shell or not.
"""
# Used in runscript.run_custom_event
def __init__(self, parent, title, *, cli_args=[],
_htest=False, _utest=False):
"""cli_args is a list of strings.
The list is assigned to the default Entry StringVar.
The strings are displayed joined by ' ' for display.
"""
message = 'Command Line Arguments for sys.argv:'
super().__init__(
parent, title, message, text0=cli_args,
_htest=_htest, _utest=_utest)
def create_extra(self):
"Add run mode on rows 10-12."
frame = self.frame
self.restartvar = BooleanVar(self, value=True)
restart = Checkbutton(frame, variable=self.restartvar, onvalue=True,
offvalue=False, text='Restart shell')
self.args_error = Label(frame, text=' ', foreground='red',
font=self.error_font)
restart.grid(column=0, row=10, columnspan=3, padx=5, sticky='w')
self.args_error.grid(column=0, row=12, columnspan=3, padx=5,
sticky='we')
def cli_args_ok(self):
"Validity check and parsing for command line arguments."
cli_string = self.entry.get().strip()
try:
cli_args = shlex.split(cli_string, posix=True)
except ValueError as err:
self.showerror(str(err))
return None
return cli_args
def entry_ok(self):
"Return apparently valid (cli_args, restart) or None"
self.entry_error['text'] = ''
cli_args = self.cli_args_ok()
restart = self.restartvar.get()
return None if cli_args is None else (cli_args, restart)
if __name__ == '__main__':
from unittest import main
main('idlelib.idle_test.test_query', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(Query, HelpSource, CustomRun)
| |
#!/usr/bin/env python
import google
import numpy as np
import os.path
import tensorflow as tf
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gru_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import seq2seq
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops.gen_user_ops import s4_parse_utterance
import tensorflow.core.framework.token_model_pb2 as token_model_pb2
import time
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('device', 0,
"""The GPU device to use (set to negative to use CPU).""")
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of utterances to process in a batch.""")
tf.app.flags.DEFINE_integer('features_width', 123,
"""Size of each feature.""")
tf.app.flags.DEFINE_integer('features_len_max', 2560,
"""Maximum number of features in an utterance.""")
tf.app.flags.DEFINE_integer('tokens_len_max', 50,
"""Maximum number of tokens in an utterance.""")
tf.app.flags.DEFINE_integer('vocab_size', 64,
"""Token vocabulary size.""")
tf.app.flags.DEFINE_integer('embedding_size', 64,
"""Token vocabulary size.""")
tf.app.flags.DEFINE_integer('encoder_cell_size', 256,
"""Encoder cell size.""")
tf.app.flags.DEFINE_integer('decoder_cell_size', 512,
"""Decoder cell size.""")
tf.app.flags.DEFINE_integer('attention_embedding_size', 256,
"""Attention embedding size.""")
tf.app.flags.DEFINE_float('max_gradient_norm', 5.0,
"""Maximum gradient norm.""")
tf.app.flags.DEFINE_float('learning_rate', 10.0,
"""Learning rate.""")
tf.app.flags.DEFINE_string('logdir', '/tmp',
"""Path to our outputs and logs.""")
class LASModel(object):
def __init__(self, dataset, batch_size, features_width, features_len_max,
vocab_size, embedding_size, tokens_len_max, encoder_cell_size,
decoder_cell_size, attention_embedding_size, max_gradient_norm,
learning_rate):
self.dataset = dataset
self.batch_size = batch_size
self.features_width = features_width
self.features_len_max = features_len_max
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.tokens_len_max = tokens_len_max
self.encoder_cell_size = encoder_cell_size
self.decoder_cell_size = decoder_cell_size
self.attention_embedding_size = attention_embedding_size
self.max_gradient_norm = max_gradient_norm
self.learning_rate = learning_rate
self.step_total = 0
self.step_time_total = 0
self.global_step = tf.Variable(0, trainable=False)
# Create the inputs.
self.create_graph_inputs()
# Create the decoder.
self.create_decoder()
# Create the loss.
self.create_loss()
# Create the optimizer.
self.create_optimizer()
self.saver = tf.train.Saver(tf.all_variables())
def create_graph_inputs(self):
dataset_map = {}
if self.dataset == 'train_si284':
self.dataset = 'speech4/data/train_si284.tfrecords'
self.dataset_size = 37416
elif self.dataset == 'test_dev93':
self.dataset = 'speech4/data/test_dev93.tfrecords'
self.dataset_size = 503
elif self.dataset == 'test_eval92':
self.dataset = 'speech4/data/test_eval92.tfrecords'
self.dataset_size = 333
elif self.dataset == 'ptb_train':
self.dataset = 'speech4/data/ptb_train.tfrecords'
self.dataset_size = 42068
elif self.dataset == 'ptb_valid':
self.dataset = 'speech4/data/ptb_valid.tfrecords'
self.dataset_size = 3370
elif self.dataset == 'ptb_test':
self.dataset = 'speech4/data/ptb_test.tfrecords'
self.dataset_size = 3761
filename_queue = tf.train.string_input_producer([self.dataset])
reader = tf.TFRecordReader()
_, serialized = reader.read(filename_queue)
serialized = tf.train.shuffle_batch(
[serialized], batch_size=self.batch_size, num_threads=2, capacity=self.batch_size * 4 + 512,
min_after_dequeue=512, seed=1000)
# Parse the batched of serialized strings into the relevant utterance features.
self.features, self.features_len, _, self.text, self.tokens, self.tokens_len, self.tokens_weights, self.uttid = s4_parse_utterance(
serialized, features_len_max=self.features_len_max,
tokens_len_max=self.tokens_len_max + 1)
# Add the shape to the features.
for feature in self.features:
feature.set_shape([self.batch_size, self.features_width])
for token in self.tokens:
token.set_shape([self.batch_size])
def create_decoder(self):
start_time = time.time()
with vs.variable_scope("embedding" or scope):
tokens = self.tokens[:-1]
embeddings = []
with tf.device("/cpu:0"):
sqrt3 = np.sqrt(3)
embedding = vs.get_variable(
"embedding", [self.vocab_size, self.embedding_size],
initializer=tf.random_uniform_initializer(-sqrt3, sqrt3))
for token in tokens:
# Create the embedding layer.
emb = embedding_ops.embedding_lookup(embedding, token)
emb.set_shape([self.batch_size, self.embedding_size])
embeddings.append(emb)
cell = rnn_cell.GRUCell(self.decoder_cell_size)
cell = rnn_cell.OutputProjectionWrapper(cell, self.vocab_size)
self.decoder_states = rnn.rnn(
cell, embeddings, dtype=tf.float32, sequence_length=self.tokens_len)[0]
self.logits = self.decoder_states
print('create_decoder graph time %f' % (time.time() - start_time))
def create_loss(self):
start_time = time.time()
self.losses = []
logits = self.decoder_states
targets = self.tokens[1:]
weights = self.tokens_weights[1:]
log_perps = seq2seq.sequence_loss(logits, targets, weights, self.vocab_size)
self.losses.append(log_perps)
print('create_loss graph time %f' % (time.time() - start_time))
def create_optimizer(self):
start_time = time.time()
params = tf.trainable_variables()
for param in params:
print('param: ' + param.name + ': ' + str(param.get_shape()))
self.gradient_norms = []
self.updates = []
#opt = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
opt = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
#opt = tf.train.AdagradOptimizer(learning_rate=self.learning_rate)
gradients = tf.gradients(self.losses, params)
if self.max_gradient_norm:
gradients, norm = tf.clip_by_global_norm(
gradients, self.max_gradient_norm)
self.gradient_norms.append(norm)
self.updates.append(opt.apply_gradients(
zip(gradients, params), global_step=self.global_step))
print('create_optimizer graph time %f' % (time.time() - start_time))
def run_graph(self, sess, targets):
fetches = []
for name, target in targets.iteritems():
if isinstance(target, (list, tuple)):
fetches.extend(target)
else:
fetches.append(target)
r = sess.run(fetches)
f = {}
start = 0
for name, target in targets.iteritems():
length = 1
if isinstance(target, (list, tuple)):
length = len(target)
end = start + length
if isinstance(target, (list, tuple)):
f[name] = r[start:end]
else:
f[name] = r[start]
start = end
return f
def compute_accuracy(self, logits, targets, weights):
assert len(logits) == len(targets)
assert len(logits) == len(weights)
correct = 0.0
count = 0.0
for logit, target, weight in zip(logits, targets, weights):
correct = correct + (np.equal(logit.argmax(axis=1), np.array(target)).astype(float) * weight).sum()
count = count + weight.sum()
return correct / count
def step(self, sess, forward_only, epoch=False):
if epoch:
steps_per_epoch = self.dataset_size // self.batch_size
for s in range(steps_per_epoch):
self.step(sess, forward_only, epoch=False)
start_time = time.time()
if forward_only:
uttid, text, logperp = sess.run([self.uttid, self.text] + self.losses)
else:
targets = {}
targets['uttid'] = self.uttid
targets['text'] = self.text
targets['tokens'] = self.tokens
targets['tokens_weights'] = self.tokens_weights
targets['logperp'] = self.losses
targets['logits'] = self.logits
targets['updates'] = self.updates
targets['gradient_norms'] = self.gradient_norms
fetches = self.run_graph(sess, targets)
print fetches['gradient_norms']
logperp = fetches['logperp'][0]
accuracy = self.compute_accuracy(
fetches['logits'], fetches['tokens'][1:], fetches['tokens_weights'][1:])
perplexity = np.exp(logperp)
tf.scalar_summary('perplexity', perplexity)
step_time = time.time() - start_time
self.step_total += 1
self.step_time_total += step_time
print 'step_total %d, step_time: %f, accuracy %f, perplexity %f' % (self.step_total, step_time, accuracy, perplexity)
def create_model(sess, dataset, forward_only):
start_time = time.time()
#initializer = tf.random_normal_initializer(0.0, 0.1)
initializer = tf.random_uniform_initializer(-0.1, 0.1)
with tf.variable_scope("model", initializer=initializer):
model = LASModel(
dataset, FLAGS.batch_size, FLAGS.features_width, FLAGS.features_len_max,
FLAGS.vocab_size, FLAGS.embedding_size, FLAGS.tokens_len_max,
FLAGS.encoder_cell_size, FLAGS.decoder_cell_size,
FLAGS.attention_embedding_size, FLAGS.max_gradient_norm,
FLAGS.learning_rate)
tf.add_check_numerics_ops()
sess.run(tf.initialize_all_variables())
tf.train.start_queue_runners(sess=sess)
print('create_model graph time %f' % (time.time() - start_time))
return model
def run_train():
tf.set_random_seed(1000)
device = '/gpu:%d' % FLAGS.device if FLAGS.device >= 0 else '/cpu:0'
with tf.device(device):
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
model = create_model(sess, 'ptb_train', False)
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph_def)
summary_writer.flush()
model.step(sess, forward_only=False, epoch=True)
for update in updates:
print updates
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, 0)
def main(_):
run_train()
if __name__ == '__main__':
tf.app.run()
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handles database requests from other Sahara services."""
import copy
from sahara.db import base as db_base
from sahara.utils import configs
from sahara.utils import crypto
CLUSTER_DEFAULTS = {
"cluster_configs": {},
"status": "undefined",
"anti_affinity": [],
"status_description": "",
"info": {},
"rollback_info": {},
"sahara_info": {},
"is_public": False,
"is_protected": False
}
NODE_GROUP_DEFAULTS = {
"node_processes": [],
"node_configs": {},
"volumes_per_node": 0,
"volumes_size": 0,
"volumes_availability_zone": None,
"volume_mount_prefix": "/volumes/disk",
"volume_type": None,
"floating_ip_pool": None,
"security_groups": None,
"auto_security_group": False,
"availability_zone": None,
"is_proxy_gateway": False,
"volume_local_to_instance": False,
}
NODE_GROUP_TEMPLATE_DEFAULTS = copy.deepcopy(NODE_GROUP_DEFAULTS)
NODE_GROUP_TEMPLATE_DEFAULTS.update({"is_public": False,
"is_protected": False})
INSTANCE_DEFAULTS = {
"volumes": []
}
DATA_SOURCE_DEFAULTS = {
"credentials": {},
"is_public": False,
"is_protected": False
}
JOB_DEFAULTS = {
"is_public": False,
"is_protected": False
}
JOB_BINARY_DEFAULTS = {
"is_public": False,
"is_protected": False
}
JOB_BINARY_INTERNAL_DEFAULTS = {
"is_public": False,
"is_protected": False
}
JOB_EXECUTION_DEFAULTS = {
"is_public": False,
"is_protected": False
}
def _apply_defaults(values, defaults):
new_values = copy.deepcopy(defaults)
new_values.update(values)
return new_values
class ConductorManager(db_base.Base):
"""This class aimed to conduct things.
The methods in the base API for sahara-conductor are various proxy
operations that allows other services to get specific work done without
locally accessing the database.
Additionally it performs some template-to-object copying magic.
"""
def __init__(self):
super(ConductorManager, self).__init__()
# Common helpers
def _populate_node_groups(self, context, cluster):
node_groups = cluster.get('node_groups')
if not node_groups:
return []
populated_node_groups = []
for node_group in node_groups:
populated_node_group = self._populate_node_group(context,
node_group)
self._cleanup_node_group(populated_node_group)
populated_node_group["tenant_id"] = context.tenant_id
populated_node_groups.append(
populated_node_group)
return populated_node_groups
def _cleanup_node_group(self, node_group):
node_group.pop('id', None)
node_group.pop('created_at', None)
node_group.pop('updated_at', None)
def _populate_node_group(self, context, node_group):
node_group_merged = copy.deepcopy(NODE_GROUP_DEFAULTS)
ng_tmpl_id = node_group.get('node_group_template_id')
ng_tmpl = None
if ng_tmpl_id:
ng_tmpl = self.node_group_template_get(context, ng_tmpl_id)
self._cleanup_node_group(ng_tmpl)
node_group_merged.update(ng_tmpl)
node_group_merged.update(node_group)
if ng_tmpl:
node_group_merged['node_configs'] = configs.merge_configs(
ng_tmpl.get('node_configs'),
node_group.get('node_configs'))
return node_group_merged
# Cluster ops
def cluster_get(self, context, cluster, show_progress=False):
"""Return the cluster or None if it does not exist."""
return self.db.cluster_get(context, cluster, show_progress)
def cluster_get_all(self, context, **kwargs):
"""Get all clusters filtered by **kwargs.
e.g. cluster_get_all(plugin_name='vanilla', hadoop_version='1.1')
"""
return self.db.cluster_get_all(context, **kwargs)
def cluster_create(self, context, values):
"""Create a cluster from the values dictionary."""
# loading defaults
merged_values = copy.deepcopy(CLUSTER_DEFAULTS)
merged_values['tenant_id'] = context.tenant_id
private_key, public_key = crypto.generate_key_pair()
merged_values['management_private_key'] = private_key
merged_values['management_public_key'] = public_key
cluster_template_id = values.get('cluster_template_id')
c_tmpl = None
if cluster_template_id:
c_tmpl = self.cluster_template_get(context, cluster_template_id)
del c_tmpl['created_at']
del c_tmpl['updated_at']
del c_tmpl['id']
del c_tmpl['is_public']
del c_tmpl['is_protected']
# updating with cluster_template values
merged_values.update(c_tmpl)
# updating with values provided in request
merged_values.update(values)
if c_tmpl:
merged_values['cluster_configs'] = configs.merge_configs(
c_tmpl.get('cluster_configs'),
values.get('cluster_configs'))
merged_values['node_groups'] = self._populate_node_groups(
context, merged_values)
return self.db.cluster_create(context, merged_values)
def cluster_update(self, context, cluster, values):
"""Set the given properties on cluster and update it."""
values = copy.deepcopy(values)
return self.db.cluster_update(context, cluster, values)
def cluster_destroy(self, context, cluster):
"""Destroy the cluster or raise if it does not exist."""
self.db.cluster_destroy(context, cluster)
# Node Group ops
def node_group_add(self, context, cluster, values):
"""Create a Node Group from the values dictionary."""
values = copy.deepcopy(values)
values = self._populate_node_group(context, values)
values['tenant_id'] = context.tenant_id
return self.db.node_group_add(context, cluster, values)
def node_group_update(self, context, node_group, values):
"""Set the given properties on node_group and update it."""
values = copy.deepcopy(values)
self.db.node_group_update(context, node_group, values)
def node_group_remove(self, context, node_group):
"""Destroy the node_group or raise if it does not exist."""
self.db.node_group_remove(context, node_group)
# Instance ops
def instance_add(self, context, node_group, values):
"""Create an Instance from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, INSTANCE_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.instance_add(context, node_group, values)
def instance_update(self, context, instance, values):
"""Set the given properties on Instance and update it."""
values = copy.deepcopy(values)
self.db.instance_update(context, instance, values)
def instance_remove(self, context, instance):
"""Destroy the Instance or raise if it does not exist."""
self.db.instance_remove(context, instance)
# Volumes ops
def append_volume(self, context, instance, volume_id):
"""Append volume_id to instance."""
self.db.append_volume(context, instance, volume_id)
def remove_volume(self, context, instance, volume_id):
"""Remove volume_id in instance."""
self.db.remove_volume(context, instance, volume_id)
# Cluster Template ops
def cluster_template_get(self, context, cluster_template):
"""Return the cluster_template or None if it does not exist."""
return self.db.cluster_template_get(context, cluster_template)
def cluster_template_get_all(self, context, **kwargs):
"""Get all cluster templates filtered by **kwargs.
e.g. cluster_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return self.db.cluster_template_get_all(context, **kwargs)
def cluster_template_create(self, context, values):
"""Create a cluster_template from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, CLUSTER_DEFAULTS)
values['tenant_id'] = context.tenant_id
values['node_groups'] = self._populate_node_groups(context, values)
return self.db.cluster_template_create(context, values)
def cluster_template_destroy(self, context, cluster_template,
ignore_default=False):
"""Destroy the cluster_template or raise if it does not exist."""
self.db.cluster_template_destroy(context, cluster_template,
ignore_default)
def cluster_template_update(self, context, id, values,
ignore_default=False):
"""Update a cluster_template from the values dictionary."""
values = copy.deepcopy(values)
values['tenant_id'] = context.tenant_id
values['id'] = id
if 'node_groups' in values:
values['node_groups'] = self._populate_node_groups(context, values)
return self.db.cluster_template_update(context, values, ignore_default)
# Node Group Template ops
def node_group_template_get(self, context, node_group_template):
"""Return the Node Group Template or None if it does not exist."""
return self.db.node_group_template_get(context, node_group_template)
def node_group_template_get_all(self, context, **kwargs):
"""Get all NodeGroupTemplates filtered by **kwargs.
e.g. node_group_template_get_all(plugin_name='vanilla',
hadoop_version='1.1')
"""
return self.db.node_group_template_get_all(context, **kwargs)
def node_group_template_create(self, context, values):
"""Create a Node Group Template from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, NODE_GROUP_TEMPLATE_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.node_group_template_create(context, values)
def node_group_template_destroy(self, context, node_group_template,
ignore_default=False):
"""Destroy the Node Group Template or raise if it does not exist."""
self.db.node_group_template_destroy(context, node_group_template,
ignore_default)
def node_group_template_update(self, context, id, values,
ignore_default=False):
"""Update a Node Group Template from the values dictionary."""
values = copy.deepcopy(values)
values['tenant_id'] = context.tenant_id
values['id'] = id
return self.db.node_group_template_update(context, values,
ignore_default)
# Data Source ops
def data_source_get(self, context, data_source):
"""Return the Data Source or None if it does not exist."""
return self.db.data_source_get(context, data_source)
def data_source_get_all(self, context, **kwargs):
"""Get all Data Sources filtered by **kwargs.
e.g. data_source_get_all(name='myfile', type='swift')
"""
return self.db.data_source_get_all(context, **kwargs)
def data_source_count(self, context, **kwargs):
"""Count Data Sources filtered by **kwargs.
Uses sqlalchemy "in_" clause for any tuple values
Uses sqlalchemy "like" clause for any string values containing %
"""
return self.db.data_source_count(context, **kwargs)
def data_source_create(self, context, values):
"""Create a Data Source from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, DATA_SOURCE_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.data_source_create(context, values)
def data_source_destroy(self, context, data_source):
"""Destroy the Data Source or raise if it does not exist."""
return self.db.data_source_destroy(context, data_source)
def data_source_update(self, context, id, values):
"""Update the Data Source or raise if it does not exist."""
values = copy.deepcopy(values)
values["id"] = id
return self.db.data_source_update(context, values)
# JobExecution ops
def job_execution_get(self, context, job_execution):
"""Return the JobExecution or None if it does not exist."""
return self.db.job_execution_get(context, job_execution)
def job_execution_get_all(self, context, **kwargs):
"""Get all JobExecutions filtered by **kwargs.
kwargs key values may be the names of fields in a JobExecution
plus the following special values with the indicated meaning:
'cluster.name' -- name of the Cluster referenced by the JobExecution
'job.name' -- name of the Job referenced by the JobExecution
'status' -- JobExecution['info']['status']
e.g. job_execution_get_all(cluster_id=12, input_id=123)
job_execution_get_all(**{'cluster.name': 'test',
'job.name': 'wordcount'})
"""
return self.db.job_execution_get_all(context, **kwargs)
def job_execution_count(self, context, **kwargs):
"""Count number of JobExecutions filtered by **kwargs.
e.g. job_execution_count(cluster_id=12, input_id=123)
"""
return self.db.job_execution_count(context, **kwargs)
def job_execution_create(self, context, values):
"""Create a JobExecution from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_EXECUTION_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_execution_create(context, values)
def job_execution_update(self, context, job_execution, values):
"""Updates a JobExecution from the values dictionary."""
values = copy.deepcopy(values)
return self.db.job_execution_update(context, job_execution, values)
def job_execution_destroy(self, context, job_execution):
"""Destroy the JobExecution or raise if it does not exist."""
return self.db.job_execution_destroy(context, job_execution)
# Job ops
def job_get(self, context, job):
"""Return the Job or None if it does not exist."""
return self.db.job_get(context, job)
def job_get_all(self, context, **kwargs):
"""Get all Jobs filtered by **kwargs.
e.g. job_get_all(name='myjob', type='MapReduce')
"""
return self.db.job_get_all(context, **kwargs)
def job_create(self, context, values):
"""Create a Job from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_create(context, values)
def job_update(self, context, job, values):
"""Updates a Job from the values dictionary."""
return self.db.job_update(context, job, values)
def job_destroy(self, context, job):
"""Destroy the Job or raise if it does not exist."""
self.db.job_destroy(context, job)
# JobBinary ops
def job_binary_get_all(self, context, **kwargs):
"""Get all JobBinarys filtered by **kwargs.
e.g. job_binary_get_all(name='wordcount.jar')
"""
return self.db.job_binary_get_all(context, **kwargs)
def job_binary_get(self, context, job_binary_id):
"""Return the JobBinary or None if it does not exist."""
return self.db.job_binary_get(context, job_binary_id)
def job_binary_create(self, context, values):
"""Create a JobBinary from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_BINARY_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_binary_create(context, values)
def job_binary_destroy(self, context, job_binary):
"""Destroy the JobBinary or raise if it does not exist."""
self.db.job_binary_destroy(context, job_binary)
def job_binary_update(self, context, id, values):
"""Update a JobBinary from the values dictionary."""
values = copy.deepcopy(values)
values['id'] = id
return self.db.job_binary_update(context, values)
# JobBinaryInternal ops
def job_binary_internal_get_all(self, context, **kwargs):
"""Get all JobBinaryInternals filtered by **kwargs.
e.g. cluster_get_all(name='wordcount.jar')
The JobBinaryInternals returned do not contain a data field.
"""
return self.db.job_binary_internal_get_all(context, **kwargs)
def job_binary_internal_get(self, context, job_binary_internal_id):
"""Return the JobBinaryInternal or None if it does not exist
The JobBinaryInternal returned does not contain a data field.
"""
return self.db.job_binary_internal_get(context, job_binary_internal_id)
def job_binary_internal_create(self, context, values):
"""Create a JobBinaryInternal from the values dictionary."""
# Since values["data"] is (should be) encoded as a string
# here the deepcopy of values only incs a reference count on data.
# This is nice, since data could be big...
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_BINARY_INTERNAL_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_binary_internal_create(context, values)
def job_binary_internal_destroy(self, context, job_binary_internal):
"""Destroy the JobBinaryInternal or raise if it does not exist."""
self.db.job_binary_internal_destroy(context, job_binary_internal)
def job_binary_internal_get_raw_data(self,
context, job_binary_internal_id):
"""Return the binary data field from a JobBinaryInternal."""
return self.db.job_binary_internal_get_raw_data(
context,
job_binary_internal_id)
def job_binary_internal_update(self, context, id, values):
"""Updates a JobBinaryInternal from the values dictionary."""
return self.db.job_binary_internal_update(context, id, values)
# Events ops
def cluster_provision_step_add(self, context, cluster_id, values):
"""Create a provisioning step assigned to cluster from values dict."""
return self.db.cluster_provision_step_add(context, cluster_id, values)
def cluster_provision_step_update(self, context, provision_step):
"""Update the cluster provisioning step."""
return self.db.cluster_provision_step_update(context, provision_step)
def cluster_provision_progress_update(self, context, cluster_id):
"""Return cluster with provision progress updated field."""
return self.db.cluster_provision_progress_update(context, cluster_id)
def cluster_event_add(self, context, provision_step, values):
"""Assign new event to the specified provision step."""
return self.db.cluster_event_add(context, provision_step, values)
| |
"""Color util methods."""
from __future__ import annotations
import colorsys
import math
from typing import NamedTuple
import attr
# mypy: disallow-any-generics
class RGBColor(NamedTuple):
"""RGB hex values."""
r: int
g: int
b: int
# Official CSS3 colors from w3.org:
# https://www.w3.org/TR/2010/PR-css3-color-20101028/#html4
# names do not have spaces in them so that we can compare against
# requests more easily (by removing spaces from the requests as well).
# This lets "dark seagreen" and "dark sea green" both match the same
# color "darkseagreen".
COLORS = {
"aliceblue": RGBColor(240, 248, 255),
"antiquewhite": RGBColor(250, 235, 215),
"aqua": RGBColor(0, 255, 255),
"aquamarine": RGBColor(127, 255, 212),
"azure": RGBColor(240, 255, 255),
"beige": RGBColor(245, 245, 220),
"bisque": RGBColor(255, 228, 196),
"black": RGBColor(0, 0, 0),
"blanchedalmond": RGBColor(255, 235, 205),
"blue": RGBColor(0, 0, 255),
"blueviolet": RGBColor(138, 43, 226),
"brown": RGBColor(165, 42, 42),
"burlywood": RGBColor(222, 184, 135),
"cadetblue": RGBColor(95, 158, 160),
"chartreuse": RGBColor(127, 255, 0),
"chocolate": RGBColor(210, 105, 30),
"coral": RGBColor(255, 127, 80),
"cornflowerblue": RGBColor(100, 149, 237),
"cornsilk": RGBColor(255, 248, 220),
"crimson": RGBColor(220, 20, 60),
"cyan": RGBColor(0, 255, 255),
"darkblue": RGBColor(0, 0, 139),
"darkcyan": RGBColor(0, 139, 139),
"darkgoldenrod": RGBColor(184, 134, 11),
"darkgray": RGBColor(169, 169, 169),
"darkgreen": RGBColor(0, 100, 0),
"darkgrey": RGBColor(169, 169, 169),
"darkkhaki": RGBColor(189, 183, 107),
"darkmagenta": RGBColor(139, 0, 139),
"darkolivegreen": RGBColor(85, 107, 47),
"darkorange": RGBColor(255, 140, 0),
"darkorchid": RGBColor(153, 50, 204),
"darkred": RGBColor(139, 0, 0),
"darksalmon": RGBColor(233, 150, 122),
"darkseagreen": RGBColor(143, 188, 143),
"darkslateblue": RGBColor(72, 61, 139),
"darkslategray": RGBColor(47, 79, 79),
"darkslategrey": RGBColor(47, 79, 79),
"darkturquoise": RGBColor(0, 206, 209),
"darkviolet": RGBColor(148, 0, 211),
"deeppink": RGBColor(255, 20, 147),
"deepskyblue": RGBColor(0, 191, 255),
"dimgray": RGBColor(105, 105, 105),
"dimgrey": RGBColor(105, 105, 105),
"dodgerblue": RGBColor(30, 144, 255),
"firebrick": RGBColor(178, 34, 34),
"floralwhite": RGBColor(255, 250, 240),
"forestgreen": RGBColor(34, 139, 34),
"fuchsia": RGBColor(255, 0, 255),
"gainsboro": RGBColor(220, 220, 220),
"ghostwhite": RGBColor(248, 248, 255),
"gold": RGBColor(255, 215, 0),
"goldenrod": RGBColor(218, 165, 32),
"gray": RGBColor(128, 128, 128),
"green": RGBColor(0, 128, 0),
"greenyellow": RGBColor(173, 255, 47),
"grey": RGBColor(128, 128, 128),
"honeydew": RGBColor(240, 255, 240),
"hotpink": RGBColor(255, 105, 180),
"indianred": RGBColor(205, 92, 92),
"indigo": RGBColor(75, 0, 130),
"ivory": RGBColor(255, 255, 240),
"khaki": RGBColor(240, 230, 140),
"lavender": RGBColor(230, 230, 250),
"lavenderblush": RGBColor(255, 240, 245),
"lawngreen": RGBColor(124, 252, 0),
"lemonchiffon": RGBColor(255, 250, 205),
"lightblue": RGBColor(173, 216, 230),
"lightcoral": RGBColor(240, 128, 128),
"lightcyan": RGBColor(224, 255, 255),
"lightgoldenrodyellow": RGBColor(250, 250, 210),
"lightgray": RGBColor(211, 211, 211),
"lightgreen": RGBColor(144, 238, 144),
"lightgrey": RGBColor(211, 211, 211),
"lightpink": RGBColor(255, 182, 193),
"lightsalmon": RGBColor(255, 160, 122),
"lightseagreen": RGBColor(32, 178, 170),
"lightskyblue": RGBColor(135, 206, 250),
"lightslategray": RGBColor(119, 136, 153),
"lightslategrey": RGBColor(119, 136, 153),
"lightsteelblue": RGBColor(176, 196, 222),
"lightyellow": RGBColor(255, 255, 224),
"lime": RGBColor(0, 255, 0),
"limegreen": RGBColor(50, 205, 50),
"linen": RGBColor(250, 240, 230),
"magenta": RGBColor(255, 0, 255),
"maroon": RGBColor(128, 0, 0),
"mediumaquamarine": RGBColor(102, 205, 170),
"mediumblue": RGBColor(0, 0, 205),
"mediumorchid": RGBColor(186, 85, 211),
"mediumpurple": RGBColor(147, 112, 219),
"mediumseagreen": RGBColor(60, 179, 113),
"mediumslateblue": RGBColor(123, 104, 238),
"mediumspringgreen": RGBColor(0, 250, 154),
"mediumturquoise": RGBColor(72, 209, 204),
"mediumvioletred": RGBColor(199, 21, 133),
"midnightblue": RGBColor(25, 25, 112),
"mintcream": RGBColor(245, 255, 250),
"mistyrose": RGBColor(255, 228, 225),
"moccasin": RGBColor(255, 228, 181),
"navajowhite": RGBColor(255, 222, 173),
"navy": RGBColor(0, 0, 128),
"navyblue": RGBColor(0, 0, 128),
"oldlace": RGBColor(253, 245, 230),
"olive": RGBColor(128, 128, 0),
"olivedrab": RGBColor(107, 142, 35),
"orange": RGBColor(255, 165, 0),
"orangered": RGBColor(255, 69, 0),
"orchid": RGBColor(218, 112, 214),
"palegoldenrod": RGBColor(238, 232, 170),
"palegreen": RGBColor(152, 251, 152),
"paleturquoise": RGBColor(175, 238, 238),
"palevioletred": RGBColor(219, 112, 147),
"papayawhip": RGBColor(255, 239, 213),
"peachpuff": RGBColor(255, 218, 185),
"peru": RGBColor(205, 133, 63),
"pink": RGBColor(255, 192, 203),
"plum": RGBColor(221, 160, 221),
"powderblue": RGBColor(176, 224, 230),
"purple": RGBColor(128, 0, 128),
"red": RGBColor(255, 0, 0),
"rosybrown": RGBColor(188, 143, 143),
"royalblue": RGBColor(65, 105, 225),
"saddlebrown": RGBColor(139, 69, 19),
"salmon": RGBColor(250, 128, 114),
"sandybrown": RGBColor(244, 164, 96),
"seagreen": RGBColor(46, 139, 87),
"seashell": RGBColor(255, 245, 238),
"sienna": RGBColor(160, 82, 45),
"silver": RGBColor(192, 192, 192),
"skyblue": RGBColor(135, 206, 235),
"slateblue": RGBColor(106, 90, 205),
"slategray": RGBColor(112, 128, 144),
"slategrey": RGBColor(112, 128, 144),
"snow": RGBColor(255, 250, 250),
"springgreen": RGBColor(0, 255, 127),
"steelblue": RGBColor(70, 130, 180),
"tan": RGBColor(210, 180, 140),
"teal": RGBColor(0, 128, 128),
"thistle": RGBColor(216, 191, 216),
"tomato": RGBColor(255, 99, 71),
"turquoise": RGBColor(64, 224, 208),
"violet": RGBColor(238, 130, 238),
"wheat": RGBColor(245, 222, 179),
"white": RGBColor(255, 255, 255),
"whitesmoke": RGBColor(245, 245, 245),
"yellow": RGBColor(255, 255, 0),
"yellowgreen": RGBColor(154, 205, 50),
# And...
"homeassistant": RGBColor(3, 169, 244),
}
@attr.s()
class XYPoint:
"""Represents a CIE 1931 XY coordinate pair."""
x: float = attr.ib() # pylint: disable=invalid-name
y: float = attr.ib() # pylint: disable=invalid-name
@attr.s()
class GamutType:
"""Represents the Gamut of a light."""
# ColorGamut = gamut(xypoint(xR,yR),xypoint(xG,yG),xypoint(xB,yB))
red: XYPoint = attr.ib()
green: XYPoint = attr.ib()
blue: XYPoint = attr.ib()
def color_name_to_rgb(color_name: str) -> RGBColor:
"""Convert color name to RGB hex value."""
# COLORS map has no spaces in it, so make the color_name have no
# spaces in it as well for matching purposes
hex_value = COLORS.get(color_name.replace(" ", "").lower())
if not hex_value:
raise ValueError("Unknown color")
return hex_value
# pylint: disable=invalid-name
def color_RGB_to_xy(
iR: int, iG: int, iB: int, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert from RGB color to XY color."""
return color_RGB_to_xy_brightness(iR, iG, iB, Gamut)[:2]
# Taken from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
# License: Code is given as is. Use at your own risk and discretion.
def color_RGB_to_xy_brightness(
iR: int, iG: int, iB: int, Gamut: GamutType | None = None
) -> tuple[float, float, int]:
"""Convert from RGB color to XY color."""
if iR + iG + iB == 0:
return 0.0, 0.0, 0
R = iR / 255
B = iB / 255
G = iG / 255
# Gamma correction
R = pow((R + 0.055) / (1.0 + 0.055), 2.4) if (R > 0.04045) else (R / 12.92)
G = pow((G + 0.055) / (1.0 + 0.055), 2.4) if (G > 0.04045) else (G / 12.92)
B = pow((B + 0.055) / (1.0 + 0.055), 2.4) if (B > 0.04045) else (B / 12.92)
# Wide RGB D65 conversion formula
X = R * 0.664511 + G * 0.154324 + B * 0.162028
Y = R * 0.283881 + G * 0.668433 + B * 0.047685
Z = R * 0.000088 + G * 0.072310 + B * 0.986039
# Convert XYZ to xy
x = X / (X + Y + Z)
y = Y / (X + Y + Z)
# Brightness
Y = 1 if Y > 1 else Y
brightness = round(Y * 255)
# Check if the given xy value is within the color-reach of the lamp.
if Gamut:
in_reach = check_point_in_lamps_reach((x, y), Gamut)
if not in_reach:
xy_closest = get_closest_point_to_point((x, y), Gamut)
x = xy_closest[0]
y = xy_closest[1]
return round(x, 3), round(y, 3), brightness
def color_xy_to_RGB(
vX: float, vY: float, Gamut: GamutType | None = None
) -> tuple[int, int, int]:
"""Convert from XY to a normalized RGB."""
return color_xy_brightness_to_RGB(vX, vY, 255, Gamut)
# Converted to Python from Obj-C, original source from:
# http://www.developers.meethue.com/documentation/color-conversions-rgb-xy
def color_xy_brightness_to_RGB(
vX: float, vY: float, ibrightness: int, Gamut: GamutType | None = None
) -> tuple[int, int, int]:
"""Convert from XYZ to RGB."""
if Gamut and not check_point_in_lamps_reach((vX, vY), Gamut):
xy_closest = get_closest_point_to_point((vX, vY), Gamut)
vX = xy_closest[0]
vY = xy_closest[1]
brightness = ibrightness / 255.0
if brightness == 0.0:
return (0, 0, 0)
Y = brightness
if vY == 0.0:
vY += 0.00000000001
X = (Y / vY) * vX
Z = (Y / vY) * (1 - vX - vY)
# Convert to RGB using Wide RGB D65 conversion.
r = X * 1.656492 - Y * 0.354851 - Z * 0.255038
g = -X * 0.707196 + Y * 1.655397 + Z * 0.036152
b = X * 0.051713 - Y * 0.121364 + Z * 1.011530
# Apply reverse gamma correction.
r, g, b = map(
lambda x: (12.92 * x)
if (x <= 0.0031308)
else ((1.0 + 0.055) * pow(x, (1.0 / 2.4)) - 0.055),
[r, g, b],
)
# Bring all negative components to zero.
r, g, b = map(lambda x: max(0, x), [r, g, b])
# If one component is greater than 1, weight components by that value.
max_component = max(r, g, b)
if max_component > 1:
r, g, b = map(lambda x: x / max_component, [r, g, b])
ir, ig, ib = map(lambda x: int(x * 255), [r, g, b])
return (ir, ig, ib)
def color_hsb_to_RGB(fH: float, fS: float, fB: float) -> tuple[int, int, int]:
"""Convert a hsb into its rgb representation."""
if fS == 0.0:
fV = int(fB * 255)
return fV, fV, fV
r = g = b = 0
h = fH / 60
f = h - float(math.floor(h))
p = fB * (1 - fS)
q = fB * (1 - fS * f)
t = fB * (1 - (fS * (1 - f)))
if int(h) == 0:
r = int(fB * 255)
g = int(t * 255)
b = int(p * 255)
elif int(h) == 1:
r = int(q * 255)
g = int(fB * 255)
b = int(p * 255)
elif int(h) == 2:
r = int(p * 255)
g = int(fB * 255)
b = int(t * 255)
elif int(h) == 3:
r = int(p * 255)
g = int(q * 255)
b = int(fB * 255)
elif int(h) == 4:
r = int(t * 255)
g = int(p * 255)
b = int(fB * 255)
elif int(h) == 5:
r = int(fB * 255)
g = int(p * 255)
b = int(q * 255)
return (r, g, b)
def color_RGB_to_hsv(iR: float, iG: float, iB: float) -> tuple[float, float, float]:
"""Convert an rgb color to its hsv representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fHSV = colorsys.rgb_to_hsv(iR / 255.0, iG / 255.0, iB / 255.0)
return round(fHSV[0] * 360, 3), round(fHSV[1] * 100, 3), round(fHSV[2] * 100, 3)
def color_RGB_to_hs(iR: float, iG: float, iB: float) -> tuple[float, float]:
"""Convert an rgb color to its hs representation."""
return color_RGB_to_hsv(iR, iG, iB)[:2]
def color_hsv_to_RGB(iH: float, iS: float, iV: float) -> tuple[int, int, int]:
"""Convert an hsv color into its rgb representation.
Hue is scaled 0-360
Sat is scaled 0-100
Val is scaled 0-100
"""
fRGB = colorsys.hsv_to_rgb(iH / 360, iS / 100, iV / 100)
return (int(fRGB[0] * 255), int(fRGB[1] * 255), int(fRGB[2] * 255))
def color_hs_to_RGB(iH: float, iS: float) -> tuple[int, int, int]:
"""Convert an hsv color into its rgb representation."""
return color_hsv_to_RGB(iH, iS, 100)
def color_xy_to_hs(
vX: float, vY: float, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert an xy color to its hs representation."""
h, s, _ = color_RGB_to_hsv(*color_xy_to_RGB(vX, vY, Gamut))
return h, s
def color_hs_to_xy(
iH: float, iS: float, Gamut: GamutType | None = None
) -> tuple[float, float]:
"""Convert an hs color to its xy representation."""
return color_RGB_to_xy(*color_hs_to_RGB(iH, iS), Gamut)
def _match_max_scale(
input_colors: tuple[int, ...], output_colors: tuple[int, ...]
) -> tuple[int, ...]:
"""Match the maximum value of the output to the input."""
max_in = max(input_colors)
max_out = max(output_colors)
if max_out == 0:
factor = 0.0
else:
factor = max_in / max_out
return tuple(int(round(i * factor)) for i in output_colors)
def color_rgb_to_rgbw(r: int, g: int, b: int) -> tuple[int, int, int, int]:
"""Convert an rgb color to an rgbw representation."""
# Calculate the white channel as the minimum of input rgb channels.
# Subtract the white portion from the remaining rgb channels.
w = min(r, g, b)
rgbw = (r - w, g - w, b - w, w)
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return _match_max_scale((r, g, b), rgbw) # type: ignore
def color_rgbw_to_rgb(r: int, g: int, b: int, w: int) -> tuple[int, int, int]:
"""Convert an rgbw color to an rgb representation."""
# Add the white channel to the rgb channels.
rgb = (r + w, g + w, b + w)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return _match_max_scale((r, g, b, w), rgb) # type: ignore
def color_rgb_to_rgbww(
r: int, g: int, b: int, min_mireds: int, max_mireds: int
) -> tuple[int, int, int, int, int]:
"""Convert an rgb color to an rgbww representation."""
# Find the color temperature when both white channels have equal brightness
mired_range = max_mireds - min_mireds
mired_midpoint = min_mireds + mired_range / 2
color_temp_kelvin = color_temperature_mired_to_kelvin(mired_midpoint)
w_r, w_g, w_b = color_temperature_to_rgb(color_temp_kelvin)
# Find the ratio of the midpoint white in the input rgb channels
white_level = min(r / w_r, g / w_g, b / w_b)
# Subtract the white portion from the rgb channels.
rgb = (r - w_r * white_level, g - w_g * white_level, b - w_b * white_level)
rgbww = (*rgb, round(white_level * 255), round(white_level * 255))
# Match the output maximum value to the input. This ensures the full
# channel range is used.
return _match_max_scale((r, g, b), rgbww) # type: ignore
def color_rgbww_to_rgb(
r: int, g: int, b: int, cw: int, ww: int, min_mireds: int, max_mireds: int
) -> tuple[int, int, int]:
"""Convert an rgbww color to an rgb representation."""
# Calculate color temperature of the white channels
mired_range = max_mireds - min_mireds
try:
ct_ratio = ww / (cw + ww)
except ZeroDivisionError:
ct_ratio = 0.5
color_temp_mired = min_mireds + ct_ratio * mired_range
color_temp_kelvin = color_temperature_mired_to_kelvin(color_temp_mired)
w_r, w_g, w_b = color_temperature_to_rgb(color_temp_kelvin)
white_level = max(cw, ww) / 255
# Add the white channels to the rgb channels.
rgb = (r + w_r * white_level, g + w_g * white_level, b + w_b * white_level)
# Match the output maximum value to the input. This ensures the
# output doesn't overflow.
return _match_max_scale((r, g, b, cw, ww), rgb) # type: ignore
def color_rgb_to_hex(r: int, g: int, b: int) -> str:
"""Return a RGB color from a hex color string."""
return f"{round(r):02x}{round(g):02x}{round(b):02x}"
def rgb_hex_to_rgb_list(hex_string: str) -> list[int]:
"""Return an RGB color value list from a hex color string."""
return [
int(hex_string[i : i + len(hex_string) // 3], 16)
for i in range(0, len(hex_string), len(hex_string) // 3)
]
def color_temperature_to_hs(color_temperature_kelvin: float) -> tuple[float, float]:
"""Return an hs color from a color temperature in Kelvin."""
return color_RGB_to_hs(*color_temperature_to_rgb(color_temperature_kelvin))
def color_temperature_to_rgb(
color_temperature_kelvin: float,
) -> tuple[float, float, float]:
"""
Return an RGB color from a color temperature in Kelvin.
This is a rough approximation based on the formula provided by T. Helland
http://www.tannerhelland.com/4435/convert-temperature-rgb-algorithm-code/
"""
# range check
if color_temperature_kelvin < 1000:
color_temperature_kelvin = 1000
elif color_temperature_kelvin > 40000:
color_temperature_kelvin = 40000
tmp_internal = color_temperature_kelvin / 100.0
red = _get_red(tmp_internal)
green = _get_green(tmp_internal)
blue = _get_blue(tmp_internal)
return red, green, blue
def _clamp(color_component: float, minimum: float = 0, maximum: float = 255) -> float:
"""
Clamp the given color component value between the given min and max values.
The range defined by the minimum and maximum values is inclusive, i.e. given a
color_component of 0 and a minimum of 10, the returned value is 10.
"""
color_component_out = max(color_component, minimum)
return min(color_component_out, maximum)
def _get_red(temperature: float) -> float:
"""Get the red component of the temperature in RGB space."""
if temperature <= 66:
return 255
tmp_red = 329.698727446 * math.pow(temperature - 60, -0.1332047592)
return _clamp(tmp_red)
def _get_green(temperature: float) -> float:
"""Get the green component of the given color temp in RGB space."""
if temperature <= 66:
green = 99.4708025861 * math.log(temperature) - 161.1195681661
else:
green = 288.1221695283 * math.pow(temperature - 60, -0.0755148492)
return _clamp(green)
def _get_blue(temperature: float) -> float:
"""Get the blue component of the given color temperature in RGB space."""
if temperature >= 66:
return 255
if temperature <= 19:
return 0
blue = 138.5177312231 * math.log(temperature - 10) - 305.0447927307
return _clamp(blue)
def color_temperature_mired_to_kelvin(mired_temperature: float) -> int:
"""Convert absolute mired shift to degrees kelvin."""
return math.floor(1000000 / mired_temperature)
def color_temperature_kelvin_to_mired(kelvin_temperature: float) -> int:
"""Convert degrees kelvin to mired shift."""
return math.floor(1000000 / kelvin_temperature)
# The following 5 functions are adapted from rgbxy provided by Benjamin Knight
# License: The MIT License (MIT), 2014.
# https://github.com/benknight/hue-python-rgb-converter
def cross_product(p1: XYPoint, p2: XYPoint) -> float:
"""Calculate the cross product of two XYPoints."""
return float(p1.x * p2.y - p1.y * p2.x)
def get_distance_between_two_points(one: XYPoint, two: XYPoint) -> float:
"""Calculate the distance between two XYPoints."""
dx = one.x - two.x
dy = one.y - two.y
return math.sqrt(dx * dx + dy * dy)
def get_closest_point_to_line(A: XYPoint, B: XYPoint, P: XYPoint) -> XYPoint:
"""
Find the closest point from P to a line defined by A and B.
This point will be reproducible by the lamp
as it is on the edge of the gamut.
"""
AP = XYPoint(P.x - A.x, P.y - A.y)
AB = XYPoint(B.x - A.x, B.y - A.y)
ab2 = AB.x * AB.x + AB.y * AB.y
ap_ab = AP.x * AB.x + AP.y * AB.y
t = ap_ab / ab2
if t < 0.0:
t = 0.0
elif t > 1.0:
t = 1.0
return XYPoint(A.x + AB.x * t, A.y + AB.y * t)
def get_closest_point_to_point(
xy_tuple: tuple[float, float], Gamut: GamutType
) -> tuple[float, float]:
"""
Get the closest matching color within the gamut of the light.
Should only be used if the supplied color is outside of the color gamut.
"""
xy_point = XYPoint(xy_tuple[0], xy_tuple[1])
# find the closest point on each line in the CIE 1931 'triangle'.
pAB = get_closest_point_to_line(Gamut.red, Gamut.green, xy_point)
pAC = get_closest_point_to_line(Gamut.blue, Gamut.red, xy_point)
pBC = get_closest_point_to_line(Gamut.green, Gamut.blue, xy_point)
# Get the distances per point and see which point is closer to our Point.
dAB = get_distance_between_two_points(xy_point, pAB)
dAC = get_distance_between_two_points(xy_point, pAC)
dBC = get_distance_between_two_points(xy_point, pBC)
lowest = dAB
closest_point = pAB
if dAC < lowest:
lowest = dAC
closest_point = pAC
if dBC < lowest:
lowest = dBC
closest_point = pBC
# Change the xy value to a value which is within the reach of the lamp.
cx = closest_point.x
cy = closest_point.y
return (cx, cy)
def check_point_in_lamps_reach(p: tuple[float, float], Gamut: GamutType) -> bool:
"""Check if the provided XYPoint can be recreated by a Hue lamp."""
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
q = XYPoint(p[0] - Gamut.red.x, p[1] - Gamut.red.y)
s = cross_product(q, v2) / cross_product(v1, v2)
t = cross_product(v1, q) / cross_product(v1, v2)
return (s >= 0.0) and (t >= 0.0) and (s + t <= 1.0)
def check_valid_gamut(Gamut: GamutType) -> bool:
"""Check if the supplied gamut is valid."""
# Check if the three points of the supplied gamut are not on the same line.
v1 = XYPoint(Gamut.green.x - Gamut.red.x, Gamut.green.y - Gamut.red.y)
v2 = XYPoint(Gamut.blue.x - Gamut.red.x, Gamut.blue.y - Gamut.red.y)
not_on_line = cross_product(v1, v2) > 0.0001
# Check if all six coordinates of the gamut lie between 0 and 1.
red_valid = (
Gamut.red.x >= 0 and Gamut.red.x <= 1 and Gamut.red.y >= 0 and Gamut.red.y <= 1
)
green_valid = (
Gamut.green.x >= 0
and Gamut.green.x <= 1
and Gamut.green.y >= 0
and Gamut.green.y <= 1
)
blue_valid = (
Gamut.blue.x >= 0
and Gamut.blue.x <= 1
and Gamut.blue.y >= 0
and Gamut.blue.y <= 1
)
return not_on_line and red_valid and green_valid and blue_valid
| |
from collections import namedtuple
import numpy
from numpy.linalg import norm
from sklearn import svm
import sklearn
from constants import CORE_COMBO, ARYL, XGROUPS, RGROUPS
from structure import from_data
from data.models import Predictor
ARYL = [x for x in ARYL if len(x) == 1]
XGROUPS = XGROUPS[:-1]
RGROUPS = RGROUPS[:-1]
class MultiStageRegression(object):
def __init__(self, model=svm.SVR()):
self.model = model
self._first_layer = None
self._second_layer = None
def _fit_inner(self, X, y, predictions=None):
models = []
res = []
for i in xrange(y.shape[1]):
if predictions is not None:
added = predictions[:i] + predictions[i + 1:]
X_new = numpy.hstack([X] + added)
else:
X_new = X
m = sklearn.clone(self.model)
m.fit(X_new, y[:, i])
res.append(m.predict(X_new).reshape(-1, 1))
models.append(m)
return models, res
def fit(self, X, y, sample_weight=None):
if len(y.shape) == 1:
y = y.reshape(y.shape[0], 1)
self._first_layer, predictions = self._fit_inner(X, y)
self._second_layer, _ = self._fit_inner(X, y, predictions)
return self
def _predict_inner(self, X, models, predictions=None):
res = []
for i, m in enumerate(models):
if predictions is not None:
added = predictions[:i] + predictions[i + 1:]
X_new = numpy.hstack([X] + added)
else:
X_new = X
res.append(m.predict(X_new).reshape(-1, 1))
return res
def predict(self, X):
if self._first_layer is None or self._second_layer is None:
raise ValueError("Model has not been fit")
predictions = self._predict_inner(X, self._first_layer)
res = self._predict_inner(X, self._second_layer, predictions)
return numpy.hstack(res)
def get_core_features(core):
if core[0] == "T":
corefeatures = [1]
else:
corefeatures = [0]
for base, char in zip(CORE_COMBO, core[1:]):
temp = [0] * len(base)
temp[base.index(char)] = 1
corefeatures.extend(temp)
return corefeatures
def get_extra_features(n, m, x, y, z):
return [int(group[1:]) for group in [n, m, x, y, z]]
def get_end_binary(left, center, right, limit=4):
first = ARYL + XGROUPS
second = ['*'] + RGROUPS
length = len(first) + 2 * len(second)
endfeatures = []
for end in [left, center, right]:
partfeatures = []
end = end.replace('-', '') # no support for flipping yet
count = 0
for char in end:
base = second
if char in first:
if count == limit:
break
count += 1
base = first
temp = [0] * len(base)
temp[base.index(char)] = 1
partfeatures.extend(temp)
partfeatures += [0] * length * (limit - count)
endfeatures.extend(partfeatures)
return endfeatures
def get_end_decay(left, center, right, power=1, H=1, lacunarity=1):
first = ARYL + XGROUPS
second = ['*'] + RGROUPS
both = first + 2 * second
length = len(both)
endfeatures = []
for end in [left, center, right]:
end = end.replace('-', '') # no support for flipping yet
partfeatures = [0] * length
for i, char in enumerate(end):
count = i / 3
part = i % 3
idx = both.index(char)
if char in second and part == 2:
idx = both.index(char, idx + 1)
partfeatures[idx] += decay_function(count + 1, power=power,
H=H, lacunarity=lacunarity)
endfeatures.extend(partfeatures)
return endfeatures
def get_end_decay_corrected(left, center, right, power=1, H=1, lacunarity=1):
lengths = []
for name in ARYL:
struct = from_data(name)
atoms = [x.atoms[1] for x in struct.open_ends("~")]
lengths.append(norm(atoms[0].xyz - atoms[1].xyz))
lengths = numpy.array([lengths])
minlen = lengths.argmin()
ratio_matrix = lengths / lengths.T
first = ARYL + XGROUPS
second = ['*'] + RGROUPS
both = first + 2 * second
length = len(both)
endfeatures = []
for end in [left, center, right]:
end = end.replace('-', '') # no support for flipping yet
partfeatures = [0] * length
arylparts = []
for i, char in enumerate(end):
if char in ARYL:
arylparts.append(ARYL.index(char))
part = i % 3
idx = both.index(char)
if char in second and part == 2:
idx = both.index(char, idx + 1) # go to the second rgroup
if char in ARYL:
distance = ratio_matrix[arylparts[-1], arylparts].sum()
elif char in XGROUPS + second:
if arylparts:
distance = ratio_matrix[minlen, arylparts].sum()
else:
distance = 1
partfeatures[
idx] += decay_function(distance, power=power, H=H, lacunarity=lacunarity)
endfeatures.extend(partfeatures)
return endfeatures
def get_binary_feature_vector(exactname, limit=4):
left, core, center, right, n, m, x, y, z = exactname.split('_')
endfeatures = get_end_binary(left, center, right, limit=limit)
corefeatures = get_core_features(core)
extrafeatures = get_extra_features(n, m, x, y, z)
return corefeatures + endfeatures + extrafeatures + [1]
def get_decay_feature_vector(exactname, power=1, H=1, lacunarity=1):
left, core, center, right, n, m, x, y, z = exactname.split('_')
endfeatures = get_end_decay(
left, center, right, power=power, H=H, lacunarity=lacunarity)
corefeatures = get_core_features(core)
extrafeatures = get_extra_features(n, m, x, y, z)
return corefeatures + endfeatures + extrafeatures + [1]
def get_decay_distance_correction_feature_vector(exactname, power=1, H=1, lacunarity=1):
left, core, center, right, n, m, x, y, z = exactname.split('_')
endfeatures = get_end_decay_corrected(
left, center, right, power=power, H=H, lacunarity=lacunarity)
corefeatures = get_core_features(core)
extrafeatures = get_extra_features(n, m, x, y, z)
return corefeatures + endfeatures + extrafeatures + [1]
def decay_function(distance, power=1, H=1, lacunarity=1):
return (lacunarity * (distance ** -H)) ** power
def get_properties_from_decay_with_predictions(feature):
pred = Predictor.objects.latest()
model = pred.get_predictors()
homo, lumo, gap = model.predict(feature)[0]
Property = namedtuple("Property", ("title", "short", "units", "value",
"error"))
return (
Property("HOMO", "homo", "eV", homo, pred.homo_error),
Property("LUMO", "lumo", "eV", lumo, pred.lumo_error),
Property("Excitation Energy", "gap", "eV", gap, pred.gap_error),
)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
import six
import mox
from oslo.config import cfg
from testtools.matchers import MatchesRegex
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import nova
from heat.engine import function
from heat.engine.notification import stack as notification
from heat.engine import parser
from heat.engine.resources import instance
from heat.engine.resources import loadbalancer as lb
from heat.engine.resources import wait_condition as wc
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.tests.v1_1 import fakes as fakes11
asg_tmpl_without_updt_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create autoscaling group.",
"Parameters" : {},
"Resources" : {
"WebServerGroup" : {
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "10",
"MaxSize" : "20",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : "F20-x86_64-cfntools",
"InstanceType" : "m1.medium",
"KeyName" : "test",
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
}
}
'''
asg_tmpl_with_bad_updt_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create autoscaling group.",
"Parameters" : {},
"Resources" : {
"WebServerGroup" : {
"UpdatePolicy": {
"foo": {
}
},
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "10",
"MaxSize" : "20"
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : "F20-x86_64-cfntools",
"InstanceType" : "m1.medium",
"KeyName" : "test",
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
}
}
'''
asg_tmpl_with_default_updt_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create autoscaling group.",
"Parameters" : {},
"Resources" : {
"WebServerGroup" : {
"UpdatePolicy" : {
"AutoScalingRollingUpdate" : {
}
},
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "10",
"MaxSize" : "20",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : "F20-x86_64-cfntools",
"InstanceType" : "m1.medium",
"KeyName" : "test",
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
}
}
'''
asg_tmpl_with_updt_policy = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Template to create autoscaling group.",
"Parameters" : {},
"Resources" : {
"WebServerGroup" : {
"UpdatePolicy" : {
"AutoScalingRollingUpdate" : {
"MinInstancesInService" : "1",
"MaxBatchSize" : "2",
"PauseTime" : "PT1S"
}
},
"Type" : "AWS::AutoScaling::AutoScalingGroup",
"Properties" : {
"AvailabilityZones" : ["nova"],
"LaunchConfigurationName" : { "Ref" : "LaunchConfig" },
"MinSize" : "10",
"MaxSize" : "20",
"LoadBalancerNames" : [ { "Ref" : "ElasticLoadBalancer" } ]
}
},
"ElasticLoadBalancer" : {
"Type" : "AWS::ElasticLoadBalancing::LoadBalancer",
"Properties" : {
"AvailabilityZones" : ["nova"],
"Listeners" : [ {
"LoadBalancerPort" : "80",
"InstancePort" : "80",
"Protocol" : "HTTP"
}]
}
},
"LaunchConfig" : {
"Type" : "AWS::AutoScaling::LaunchConfiguration",
"Properties": {
"ImageId" : "F20-x86_64-cfntools",
"InstanceType" : "m1.medium",
"KeyName" : "test",
"SecurityGroups" : [ "sg-1" ],
"UserData" : "jsconfig data"
}
}
}
}
'''
class AutoScalingGroupTest(HeatTestCase):
def setUp(self):
super(AutoScalingGroupTest, self).setUp()
self.fc = fakes11.FakeClient()
self.stub_keystoneclient(username='test_stack.CfnLBUser')
cfg.CONF.set_default('heat_waitcondition_server_url',
'http://127.0.0.1:8000/v1/waitcondition')
def _stub_validate(self):
self.m.StubOutWithMock(parser.Stack, 'validate')
parser.Stack.validate().MultipleTimes()
def _stub_lb_create(self):
self.m.StubOutWithMock(wc.WaitConditionHandle, 'get_status')
wc.WaitConditionHandle.get_status().AndReturn(['SUCCESS'])
def _stub_lb_reload(self, num=1, setup=True):
if setup:
self.m.StubOutWithMock(lb.LoadBalancer, 'handle_update')
for i in range(num):
lb.LoadBalancer.handle_update(
mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
def _stub_grp_create(self, capacity=0, setup_lb=True):
"""
Expect creation of instances to capacity. By default, expect creation
of load balancer unless specified.
"""
self._stub_validate()
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(notification, 'send')
notification.send(mox.IgnoreArg()).MultipleTimes().AndReturn(None)
cookie = object()
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
# for load balancer setup
if setup_lb:
self._stub_lb_create()
self._stub_lb_reload()
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
# for each instance in group
for i in range(capacity):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
def _stub_grp_replace(self,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=0):
"""
Expect replacement of the capacity by batch size
"""
# for load balancer setup
self._stub_lb_reload(num_reloads_expected_on_updt)
self.m.StubOutWithMock(notification, 'send')
notification.send(mox.IgnoreArg()).MultipleTimes().AndReturn(None)
# for instances in the group
self.m.StubOutWithMock(instance.Instance, 'handle_create')
self.m.StubOutWithMock(instance.Instance, 'check_create_complete')
self.m.StubOutWithMock(instance.Instance, 'destroy')
cookie = object()
for i in range(num_creates_expected_on_updt):
instance.Instance.handle_create().AndReturn(cookie)
instance.Instance.check_create_complete(cookie).AndReturn(True)
for i in range(num_deletes_expected_on_updt):
instance.Instance.destroy().AndReturn(None)
def _stub_grp_update(self,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=0):
"""
Expect update of the instances
"""
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.servers, 'get')
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.servers.get(mox.IgnoreArg()).\
MultipleTimes().AndReturn(return_server)
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 3}}).\
MultipleTimes().AndReturn((202, None))
self.fc.client.post_servers_1234_action(
body={'confirmResize': None}).\
MultipleTimes().AndReturn((202, None))
self._stub_grp_replace(num_creates_expected_on_updt,
num_deletes_expected_on_updt,
num_reloads_expected_on_updt)
def get_launch_conf_name(self, stack, ig_name):
return stack[ig_name].properties['LaunchConfigurationName']
def test_parse_without_update_policy(self):
tmpl = template_format.parse(asg_tmpl_without_updt_policy)
stack = utils.parse_stack(tmpl)
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
stack.validate()
grp = stack['WebServerGroup']
self.assertFalse(grp.update_policy['AutoScalingRollingUpdate'])
self.m.VerifyAll()
def test_parse_with_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
stack.validate()
tmpl_grp = tmpl['Resources']['WebServerGroup']
tmpl_policy = tmpl_grp['UpdatePolicy']['AutoScalingRollingUpdate']
tmpl_batch_sz = int(tmpl_policy['MaxBatchSize'])
grp = stack['WebServerGroup']
self.assertTrue(grp.update_policy)
self.assertEqual(1, len(grp.update_policy))
self.assertIn('AutoScalingRollingUpdate', grp.update_policy)
policy = grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(1, int(policy['MinInstancesInService']))
self.assertEqual(tmpl_batch_sz, int(policy['MaxBatchSize']))
self.assertEqual('PT1S', policy['PauseTime'])
self.m.VerifyAll()
def test_parse_with_default_update_policy(self):
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy)
stack = utils.parse_stack(tmpl)
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
stack.validate()
grp = stack['WebServerGroup']
self.assertTrue(grp.update_policy)
self.assertEqual(1, len(grp.update_policy))
self.assertIn('AutoScalingRollingUpdate', grp.update_policy)
policy = grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(policy and len(policy) > 0)
self.assertEqual(0, int(policy['MinInstancesInService']))
self.assertEqual(1, int(policy['MaxBatchSize']))
self.assertEqual('PT0S', policy['PauseTime'])
self.m.VerifyAll()
def test_parse_with_bad_update_policy(self):
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
tmpl = template_format.parse(asg_tmpl_with_bad_updt_policy)
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("foo", six.text_type(error))
def test_parse_with_bad_pausetime_in_update_policy(self):
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
tmpl = template_format.parse(asg_tmpl_with_default_updt_policy)
group = tmpl['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['PauseTime'] = 'P1YT1H'
stack = utils.parse_stack(tmpl)
error = self.assertRaises(
exception.StackValidationFailed, stack.validate)
self.assertIn("Only ISO 8601 duration format", six.text_type(error))
def validate_update_policy_diff(self, current, updated):
# load current stack
current_tmpl = template_format.parse(current)
current_stack = utils.parse_stack(current_tmpl)
# get the json snippet for the current InstanceGroup resource
current_grp = current_stack['WebServerGroup']
current_snippets = dict((n, r.parsed_template())
for n, r in current_stack.items())
current_grp_json = current_snippets[current_grp.name]
# load the updated stack
updated_tmpl = template_format.parse(updated)
updated_stack = utils.parse_stack(updated_tmpl)
# get the updated json snippet for the InstanceGroup resource in the
# context of the current stack
updated_grp = updated_stack['WebServerGroup']
updated_grp_json = function.resolve(updated_grp.t)
# identify the template difference
tmpl_diff = updated_grp.update_template_diff(
updated_grp_json, current_grp_json)
updated_policy = (updated_grp.t['UpdatePolicy']
if 'UpdatePolicy' in updated_grp.t else None)
expected = {u'UpdatePolicy': updated_policy}
self.assertEqual(expected, tmpl_diff)
def test_update_policy_added(self):
self.validate_update_policy_diff(asg_tmpl_without_updt_policy,
asg_tmpl_with_updt_policy)
def test_update_policy_updated(self):
updt_template = json.loads(asg_tmpl_with_updt_policy)
grp = updt_template['Resources']['WebServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '2'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT1M30S'
self.validate_update_policy_diff(asg_tmpl_with_updt_policy,
json.dumps(updt_template))
def test_update_policy_removed(self):
self.validate_update_policy_diff(asg_tmpl_with_updt_policy,
asg_tmpl_without_updt_policy)
def update_autoscaling_group(self, init_template, updt_template,
num_updates_expected_on_updt,
num_creates_expected_on_updt,
num_deletes_expected_on_updt,
num_reloads_expected_on_updt,
update_replace,
update_image_id=None):
# setup stack from the initial template
tmpl = template_format.parse(init_template)
stack = utils.parse_stack(tmpl)
self.stub_KeypairConstraint_validate()
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
stack.validate()
self.m.VerifyAll()
self.m.UnsetStubs()
# test stack create
size = int(stack['WebServerGroup'].properties['MinSize'])
self._stub_grp_create(size)
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
# test that update policy is loaded
current_grp = stack['WebServerGroup']
self.assertTrue('AutoScalingRollingUpdate'
in current_grp.update_policy)
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
# test that physical resource name of launch configuration is used
conf = stack['LaunchConfig']
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))
# get launch conf name here to compare result after update
conf_name = self.get_launch_conf_name(stack, 'WebServerGroup')
# test the number of instances created
nested = stack['WebServerGroup'].nested()
self.assertEqual(size, len(nested.resources))
# clean up for next test
self.m.UnsetStubs()
# saves info from initial list of instances for comparison later
init_instances = current_grp.get_instances()
init_names = current_grp.get_instance_names()
init_images = [(i.name, i.t['Properties']['ImageId'])
for i in init_instances]
init_flavors = [(i.name, i.t['Properties']['InstanceType'])
for i in init_instances]
# test stack update
updated_tmpl = template_format.parse(updt_template)
updated_stack = utils.parse_stack(updated_tmpl)
new_grp_tmpl = updated_tmpl['Resources']['WebServerGroup']
new_updt_pol = new_grp_tmpl['UpdatePolicy']['AutoScalingRollingUpdate']
new_batch_sz = int(new_updt_pol['MaxBatchSize'])
self.assertNotEqual(new_batch_sz, init_batch_sz)
if update_replace:
self._stub_grp_replace(size, size, num_reloads_expected_on_updt)
else:
self._stub_grp_update(num_creates_expected_on_updt,
num_deletes_expected_on_updt,
num_reloads_expected_on_updt)
self.stub_wallclock()
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
stack.validate()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(('UPDATE', 'COMPLETE'), stack.state)
# test that the update policy is updated
updated_grp = stack['WebServerGroup']
updt_instances = updated_grp.get_instances()
self.assertTrue('AutoScalingRollingUpdate'
in updated_grp.update_policy)
updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(updated_policy)
self.assertTrue(len(updated_policy) > 0)
self.assertEqual(new_batch_sz, int(updated_policy['MaxBatchSize']))
# test that the launch configuration is replaced
updated_conf_name = self.get_launch_conf_name(stack, 'WebServerGroup')
self.assertNotEqual(conf_name, updated_conf_name)
# test that the group size are the same
updt_instances = updated_grp.get_instances()
updt_names = updated_grp.get_instance_names()
self.assertEqual(len(init_names), len(updt_names))
# test that appropriate number of instance names are the same
matched_names = set(updt_names) & set(init_names)
self.assertEqual(num_updates_expected_on_updt, len(matched_names))
# test that the appropriate number of new instances are created
self.assertEqual(num_creates_expected_on_updt,
len(set(updt_names) - set(init_names)))
# test that the appropriate number of instances are deleted
self.assertEqual(num_deletes_expected_on_updt,
len(set(init_names) - set(updt_names)))
# test that the older instances are the ones being deleted
if num_deletes_expected_on_updt > 0:
deletes_expected = init_names[:num_deletes_expected_on_updt]
self.assertNotIn(deletes_expected, updt_names)
# test if instances are updated
if update_replace:
# test that the image id is changed for all instances
updt_images = [(i.name, i.t['Properties']['ImageId'])
for i in updt_instances]
self.assertEqual(0, len(set(updt_images) & set(init_images)))
else:
# test that instance type is changed for all instances
updt_flavors = [(i.name, i.t['Properties']['InstanceType'])
for i in updt_instances]
self.assertEqual(0, len(set(updt_flavors) & set(init_flavors)))
def test_autoscaling_group_update_replace(self):
"""
Test simple update replace with no conflict in batch size and
minimum instances in service.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
grp = updt_template['Resources']['WebServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
config = updt_template['Resources']['LaunchConfig']
update_image = 'F17-x86_64-cfntools'
config['Properties']['ImageId'] = update_image
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=9,
update_replace=True,
update_image_id=update_image)
def test_autoscaling_group_update_replace_with_adjusted_capacity(self):
"""
Test update replace with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
grp = updt_template['Resources']['WebServerGroup']
policy = grp['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
config = updt_template['Resources']['LaunchConfig']
update_image = 'F17-x86_64-cfntools'
config['Properties']['ImageId'] = update_image
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2,
num_reloads_expected_on_updt=7,
update_replace=True,
update_image_id=update_image)
def test_autoscaling_group_update_replace_huge_batch_size(self):
"""
Test update replace with a huge batch size.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '0'
policy['MaxBatchSize'] = '20'
config = updt_template['Resources']['LaunchConfig']
update_image = 'F17-x86_64-cfntools'
config['Properties']['ImageId'] = update_image
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=3,
update_replace=True,
update_image_id=update_image)
def test_autoscaling_group_update_replace_huge_min_in_service(self):
"""
Test update replace with a huge number of minimum instances in service.
"""
updt_template = json.loads(asg_tmpl_with_updt_policy)
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '20'
policy['MaxBatchSize'] = '1'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['LaunchConfig']
update_image = 'F17-x86_64-cfntools'
config['Properties']['ImageId'] = 'F17-x86_64-cfntools'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=9,
num_creates_expected_on_updt=1,
num_deletes_expected_on_updt=1,
num_reloads_expected_on_updt=13,
update_replace=True,
update_image_id=update_image)
def test_autoscaling_group_update_no_replace(self):
"""
Test simple update only and no replace (i.e. updated instance flavor
in Launch Configuration) with no conflict in batch size and
minimum instances in service.
"""
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '1'
policy['MaxBatchSize'] = '3'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['InstanceType'] = 'm1.large'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=10,
num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=6,
update_replace=False)
def test_instance_group_update_no_replace_with_adjusted_capacity(self):
"""
Test update only and no replace (i.e. updated instance flavor in
Launch Configuration) with capacity adjustment due to conflict in
batch size and minimum instances in service.
"""
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['MinInstancesInService'] = '8'
policy['MaxBatchSize'] = '4'
policy['PauseTime'] = 'PT0S'
config = updt_template['Resources']['LaunchConfig']
config['Properties']['InstanceType'] = 'm1.large'
self.update_autoscaling_group(asg_tmpl_with_updt_policy,
json.dumps(updt_template),
num_updates_expected_on_updt=8,
num_creates_expected_on_updt=2,
num_deletes_expected_on_updt=2,
num_reloads_expected_on_updt=5,
update_replace=False)
def test_autoscaling_group_update_policy_removed(self):
# setup stack from the initial template
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
self.stub_ImageConstraint_validate()
self.stub_KeypairConstraint_validate()
self.m.ReplayAll()
stack.validate()
self.m.VerifyAll()
self.m.UnsetStubs()
# test stack create
size = int(stack['WebServerGroup'].properties['MinSize'])
self._stub_grp_create(size)
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
# test that update policy is loaded
current_grp = stack['WebServerGroup']
self.assertIn('AutoScalingRollingUpdate', current_grp.update_policy)
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
# test that physical resource name of launch configuration is used
conf = stack['LaunchConfig']
conf_name_pattern = '%s-LaunchConfig-[a-zA-Z0-9]+$' % stack.name
self.assertThat(conf.FnGetRefId(), MatchesRegex(conf_name_pattern))
# test the number of instances created
nested = stack['WebServerGroup'].nested()
self.assertEqual(size, len(nested.resources))
# clean up for next test
self.m.UnsetStubs()
# test stack update
updated_tmpl = template_format.parse(asg_tmpl_without_updt_policy)
updated_stack = utils.parse_stack(updated_tmpl)
self._stub_grp_replace(num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=1)
self.m.ReplayAll()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(('UPDATE', 'COMPLETE'), stack.state)
# test that update policy is removed
updated_grp = stack['WebServerGroup']
self.assertFalse(updated_grp.update_policy['AutoScalingRollingUpdate'])
def test_autoscaling_group_update_policy_check_timeout(self):
# setup stack from the initial template
tmpl = template_format.parse(asg_tmpl_with_updt_policy)
stack = utils.parse_stack(tmpl)
# test stack create
size = int(stack['WebServerGroup'].properties['MinSize'])
self._stub_grp_create(size)
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
stack.create()
self.m.VerifyAll()
self.assertEqual(('CREATE', 'COMPLETE'), stack.state)
# test that update policy is loaded
current_grp = stack['WebServerGroup']
self.assertIn('AutoScalingRollingUpdate', current_grp.update_policy)
current_policy = current_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(current_policy)
self.assertTrue(len(current_policy) > 0)
init_updt_policy = tmpl['Resources']['WebServerGroup']['UpdatePolicy']
init_roll_updt = init_updt_policy['AutoScalingRollingUpdate']
init_batch_sz = int(init_roll_updt['MaxBatchSize'])
self.assertEqual(init_batch_sz, int(current_policy['MaxBatchSize']))
# test the number of instances created
nested = stack['WebServerGroup'].nested()
self.assertEqual(size, len(nested.resources))
# clean up for next test
self.m.UnsetStubs()
# modify the pause time and test for error
new_pause_time = 'PT30M'
updt_template = json.loads(copy.deepcopy(asg_tmpl_with_updt_policy))
group = updt_template['Resources']['WebServerGroup']
policy = group['UpdatePolicy']['AutoScalingRollingUpdate']
policy['PauseTime'] = new_pause_time
config = updt_template['Resources']['LaunchConfig']
config['Properties']['ImageId'] = 'F17-x86_64-cfntools'
updated_tmpl = template_format.parse(json.dumps(updt_template))
updated_stack = utils.parse_stack(updated_tmpl)
self._stub_grp_replace(num_creates_expected_on_updt=0,
num_deletes_expected_on_updt=0,
num_reloads_expected_on_updt=1)
self.stub_KeypairConstraint_validate()
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
stack.update(updated_stack)
self.m.VerifyAll()
self.assertEqual(('UPDATE', 'FAILED'), stack.state)
# test that the update policy is updated
updated_grp = stack['WebServerGroup']
self.assertIn('AutoScalingRollingUpdate', updated_grp.update_policy)
updated_policy = updated_grp.update_policy['AutoScalingRollingUpdate']
self.assertTrue(updated_policy)
self.assertTrue(len(updated_policy) > 0)
self.assertEqual(new_pause_time, updated_policy['PauseTime'])
# test that error message match
expected_error_message = ('The current UpdatePolicy will result '
'in stack update timeout.')
self.assertIn(expected_error_message, stack.status_reason)
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common import ring
from swift.common.ring.utils import (tiers_for_dev, build_tier_tree,
validate_and_normalize_ip,
validate_and_normalize_address,
is_valid_ip, is_valid_ipv4,
is_valid_ipv6, is_valid_hostname,
is_local_device, parse_search_value,
parse_search_values_from_opts,
parse_change_values_from_opts,
validate_args, parse_args,
parse_builder_ring_filename_args,
build_dev_from_opts, dispersion_report,
parse_address)
class TestUtils(unittest.TestCase):
def setUp(self):
self.test_dev = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 0}
def get_test_devs():
dev0 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 0}
dev1 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 1}
dev2 = {'region': 1, 'zone': 1, 'ip': '192.168.1.1',
'port': '6000', 'id': 2}
dev3 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2',
'port': '6000', 'id': 3}
dev4 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2',
'port': '6000', 'id': 4}
dev5 = {'region': 1, 'zone': 1, 'ip': '192.168.1.2',
'port': '6000', 'id': 5}
dev6 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1',
'port': '6000', 'id': 6}
dev7 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1',
'port': '6000', 'id': 7}
dev8 = {'region': 1, 'zone': 2, 'ip': '192.168.2.1',
'port': '6000', 'id': 8}
dev9 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2',
'port': '6000', 'id': 9}
dev10 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2',
'port': '6000', 'id': 10}
dev11 = {'region': 1, 'zone': 2, 'ip': '192.168.2.2',
'port': '6000', 'id': 11}
return [dev0, dev1, dev2, dev3, dev4, dev5,
dev6, dev7, dev8, dev9, dev10, dev11]
self.test_devs = get_test_devs()
def test_tiers_for_dev(self):
self.assertEqual(
tiers_for_dev(self.test_dev),
((1,),
(1, 1),
(1, 1, '192.168.1.1'),
(1, 1, '192.168.1.1', 0)))
def test_build_tier_tree(self):
ret = build_tier_tree(self.test_devs)
self.assertEqual(len(ret), 8)
self.assertEqual(ret[()], set([(1,)]))
self.assertEqual(ret[(1,)], set([(1, 1), (1, 2)]))
self.assertEqual(ret[(1, 1)],
set([(1, 1, '192.168.1.2'),
(1, 1, '192.168.1.1')]))
self.assertEqual(ret[(1, 2)],
set([(1, 2, '192.168.2.2'),
(1, 2, '192.168.2.1')]))
self.assertEqual(ret[(1, 1, '192.168.1.1')],
set([(1, 1, '192.168.1.1', 0),
(1, 1, '192.168.1.1', 1),
(1, 1, '192.168.1.1', 2)]))
self.assertEqual(ret[(1, 1, '192.168.1.2')],
set([(1, 1, '192.168.1.2', 3),
(1, 1, '192.168.1.2', 4),
(1, 1, '192.168.1.2', 5)]))
self.assertEqual(ret[(1, 2, '192.168.2.1')],
set([(1, 2, '192.168.2.1', 6),
(1, 2, '192.168.2.1', 7),
(1, 2, '192.168.2.1', 8)]))
self.assertEqual(ret[(1, 2, '192.168.2.2')],
set([(1, 2, '192.168.2.2', 9),
(1, 2, '192.168.2.2', 10),
(1, 2, '192.168.2.2', 11)]))
def test_is_valid_ip(self):
self.assertTrue(is_valid_ip("127.0.0.1"))
self.assertTrue(is_valid_ip("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ip(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ip(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ip(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ip(not_ipv6))
def test_is_valid_ipv4(self):
self.assertTrue(is_valid_ipv4("127.0.0.1"))
self.assertTrue(is_valid_ipv4("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "fe80::"
self.assertFalse(is_valid_ipv4(ipv6))
ipv6 = "::1"
self.assertFalse(is_valid_ipv4(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv4(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv4(not_ipv6))
def test_is_valid_ipv6(self):
self.assertFalse(is_valid_ipv6("127.0.0.1"))
self.assertFalse(is_valid_ipv6("10.0.0.1"))
ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0000:0000:0000:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80:0:0:0:0204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::204:61ff:254.157.241.86"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "fe80::"
self.assertTrue(is_valid_ipv6(ipv6))
ipv6 = "::1"
self.assertTrue(is_valid_ipv6(ipv6))
not_ipv6 = "3ffe:0b00:0000:0001:0000:0000:000a"
self.assertFalse(is_valid_ipv6(not_ipv6))
not_ipv6 = "1:2:3:4:5:6::7:8"
self.assertFalse(is_valid_ipv6(not_ipv6))
def test_is_valid_hostname(self):
self.assertTrue(is_valid_hostname("local"))
self.assertTrue(is_valid_hostname("test.test.com"))
hostname = "test." * 51
self.assertTrue(is_valid_hostname(hostname))
hostname = hostname.rstrip('.')
self.assertTrue(is_valid_hostname(hostname))
hostname = hostname + "00"
self.assertFalse(is_valid_hostname(hostname))
self.assertFalse(is_valid_hostname("$blah#"))
def test_is_local_device(self):
# localhost shows up in whataremyips() output as "::1" for IPv6
my_ips = ["127.0.0.1", "::1"]
my_port = 6000
self.assertTrue(is_local_device(my_ips, my_port,
"127.0.0.1", my_port))
self.assertTrue(is_local_device(my_ips, my_port,
"::1", my_port))
self.assertTrue(is_local_device(
my_ips, my_port,
"0000:0000:0000:0000:0000:0000:0000:0001", my_port))
self.assertTrue(is_local_device(my_ips, my_port,
"localhost", my_port))
self.assertFalse(is_local_device(my_ips, my_port,
"localhost", my_port + 1))
self.assertFalse(is_local_device(my_ips, my_port,
"127.0.0.2", my_port))
# for those that don't have a local port
self.assertTrue(is_local_device(my_ips, None,
my_ips[0], None))
# When servers_per_port is active, the "my_port" passed in is None
# which means "don't include port in the determination of locality
# because it's not reliable in this deployment scenario"
self.assertTrue(is_local_device(my_ips, None,
"127.0.0.1", 6666))
self.assertTrue(is_local_device(my_ips, None,
"::1", 6666))
self.assertTrue(is_local_device(
my_ips, None,
"0000:0000:0000:0000:0000:0000:0000:0001", 6666))
self.assertTrue(is_local_device(my_ips, None,
"localhost", 6666))
self.assertFalse(is_local_device(my_ips, None,
"127.0.0.2", my_port))
def test_validate_and_normalize_ip(self):
ipv4 = "10.0.0.1"
self.assertEqual(ipv4, validate_and_normalize_ip(ipv4))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertEqual(ipv6, validate_and_normalize_ip(ipv6.upper()))
hostname = "test.test.com"
self.assertRaises(ValueError,
validate_and_normalize_ip, hostname)
hostname = "$blah#"
self.assertRaises(ValueError,
validate_and_normalize_ip, hostname)
def test_validate_and_normalize_address(self):
ipv4 = "10.0.0.1"
self.assertEqual(ipv4, validate_and_normalize_address(ipv4))
ipv6 = "fe80::204:61ff:fe9d:f156"
self.assertEqual(ipv6, validate_and_normalize_address(ipv6.upper()))
hostname = "test.test.com"
self.assertEqual(hostname,
validate_and_normalize_address(hostname.upper()))
hostname = "$blah#"
self.assertRaises(ValueError,
validate_and_normalize_address, hostname)
def test_parse_search_value(self):
res = parse_search_value('r0')
self.assertEqual(res, {'region': 0})
res = parse_search_value('r1')
self.assertEqual(res, {'region': 1})
res = parse_search_value('r1z2')
self.assertEqual(res, {'region': 1, 'zone': 2})
res = parse_search_value('d1')
self.assertEqual(res, {'id': 1})
res = parse_search_value('z1')
self.assertEqual(res, {'zone': 1})
res = parse_search_value('-127.0.0.1')
self.assertEqual(res, {'ip': '127.0.0.1'})
res = parse_search_value('127.0.0.1')
self.assertEqual(res, {'ip': '127.0.0.1'})
res = parse_search_value('-[127.0.0.1]:10001')
self.assertEqual(res, {'ip': '127.0.0.1', 'port': 10001})
res = parse_search_value(':10001')
self.assertEqual(res, {'port': 10001})
res = parse_search_value('R127.0.0.10')
self.assertEqual(res, {'replication_ip': '127.0.0.10'})
res = parse_search_value('R[127.0.0.10]:20000')
self.assertEqual(res, {'replication_ip': '127.0.0.10',
'replication_port': 20000})
res = parse_search_value('R:20000')
self.assertEqual(res, {'replication_port': 20000})
res = parse_search_value('/sdb1')
self.assertEqual(res, {'device': 'sdb1'})
res = parse_search_value('_meta1')
self.assertEqual(res, {'meta': 'meta1'})
self.assertRaises(ValueError, parse_search_value, 'OMGPONIES')
def test_parse_search_values_from_opts(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'id': 1,
'region': 2,
'zone': 3,
'ip': "test.test.com",
'port': 6000,
'replication_ip': "r.test.com",
'replication_port': 7000,
'device': "sda3",
'meta': "some meta data",
'weight': 3.14159265359,
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_search_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "127.0.0.1",
"--port", "6000",
"--replication-ip", "127.0.0.10",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "127.0.0.2",
"--change-port", "6001",
"--change-replication-ip", "127.0.0.20",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'id': 1,
'region': 2,
'zone': 3,
'ip': "127.0.0.1",
'port': 6000,
'replication_ip': "127.0.0.10",
'replication_port': 7000,
'device': "sda3",
'meta': "some meta data",
'weight': 3.14159265359,
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_search_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "[127.0.0.1]",
"--port", "6000",
"--replication-ip", "[127.0.0.10]",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "[127.0.0.2]",
"--change-port", "6001",
"--change-replication-ip", "[127.0.0.20]",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_search_values_from_opts(opts)
self.assertEquals(search_values, expected)
def test_parse_change_values_from_opts(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'ip': "change.test.test.com",
'port': 6001,
'replication_ip': "change.r.test.com",
'replication_port': 7001,
'device': "sdb3",
'meta': "some meta data for change",
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_change_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "127.0.0.1",
"--port", "6000",
"--replication-ip", "127.0.0.10",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "127.0.0.2",
"--change-port", "6001",
"--change-replication-ip", "127.0.0.20",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
expected = {
'ip': "127.0.0.2",
'port': 6001,
'replication_ip': "127.0.0.20",
'replication_port': 7001,
'device': "sdb3",
'meta': "some meta data for change",
}
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_change_values_from_opts(opts)
self.assertEquals(search_values, expected)
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "[127.0.0.1]",
"--port", "6000",
"--replication-ip", "[127.0.0.10]",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "[127.0.0.2]",
"--change-port", "6001",
"--change-replication-ip", "[127.0.0.20]",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
search_values = parse_change_values_from_opts(opts)
self.assertEquals(search_values, expected)
def test_validate_args(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
self.assertEqual(opts.id, 1)
self.assertEqual(opts.region, 2)
self.assertEqual(opts.zone, 3)
self.assertEqual(opts.ip, "test.test.com")
self.assertEqual(opts.port, 6000)
self.assertEqual(opts.replication_ip, "r.test.com")
self.assertEqual(opts.replication_port, 7000)
self.assertEqual(opts.device, "sda3")
self.assertEqual(opts.meta, "some meta data")
self.assertEqual(opts.weight, 3.14159265359)
self.assertEqual(opts.change_ip, "change.test.test.com")
self.assertEqual(opts.change_port, 6001)
self.assertEqual(opts.change_replication_ip, "change.r.test.com")
self.assertEqual(opts.change_replication_port, 7001)
self.assertEqual(opts.change_device, "sdb3")
self.assertEqual(opts.change_meta, "some meta data for change")
def test_validate_args_new_cmd_format(self):
argv = \
["--id", "0", "--region", "0", "--zone", "0",
"--ip", "",
"--port", "0",
"--replication-ip", "",
"--replication-port", "0",
"--device", "",
"--meta", "",
"--weight", "0",
"--change-ip", "",
"--change-port", "0",
"--change-replication-ip", "",
"--change-replication-port", "0",
"--change-device", "",
"--change-meta", ""]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
argv = \
["--id", None, "--region", None, "--zone", None,
"--ip", "",
"--port", "0",
"--replication-ip", "",
"--replication-port", "0",
"--device", "",
"--meta", "",
"--weight", None,
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
new_cmd_format, opts, args = validate_args(argv)
self.assertFalse(new_cmd_format)
argv = \
["--id", "0"]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
argv = \
["--region", "0"]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
argv = \
["--zone", "0"]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
argv = \
["--weight", "0"]
new_cmd_format, opts, args = validate_args(argv)
self.assertTrue(new_cmd_format)
def test_parse_args(self):
argv = \
["--id", "1", "--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359",
"--change-ip", "change.test.test.com",
"--change-port", "6001",
"--change-replication-ip", "change.r.test.com",
"--change-replication-port", "7001",
"--change-device", "sdb3",
"--change-meta", "some meta data for change"]
opts, args = parse_args(argv)
self.assertEqual(opts.id, 1)
self.assertEqual(opts.region, 2)
self.assertEqual(opts.zone, 3)
self.assertEqual(opts.ip, "test.test.com")
self.assertEqual(opts.port, 6000)
self.assertEqual(opts.replication_ip, "r.test.com")
self.assertEqual(opts.replication_port, 7000)
self.assertEqual(opts.device, "sda3")
self.assertEqual(opts.meta, "some meta data")
self.assertEqual(opts.weight, 3.14159265359)
self.assertEqual(opts.change_ip, "change.test.test.com")
self.assertEqual(opts.change_port, 6001)
self.assertEqual(opts.change_replication_ip, "change.r.test.com")
self.assertEqual(opts.change_replication_port, 7001)
self.assertEqual(opts.change_device, "sdb3")
self.assertEqual(opts.change_meta, "some meta data for change")
self.assertEqual(len(args), 0)
def test_parse_builder_ring_filename_args(self):
args = 'swift-ring-builder object.builder write_ring'
self.assertEquals((
'object.builder', 'object.ring.gz'
), parse_builder_ring_filename_args(args.split()))
args = 'swift-ring-builder container.ring.gz write_builder'
self.assertEquals((
'container.builder', 'container.ring.gz'
), parse_builder_ring_filename_args(args.split()))
# builder name arg should always fall through
args = 'swift-ring-builder test create'
self.assertEquals((
'test', 'test.ring.gz'
), parse_builder_ring_filename_args(args.split()))
args = 'swift-ring-builder my.file.name create'
self.assertEquals((
'my.file.name', 'my.file.name.ring.gz'
), parse_builder_ring_filename_args(args.split()))
def test_build_dev_from_opts(self):
argv = \
["--region", "2", "--zone", "3",
"--ip", "test.test.com",
"--port", "6000",
"--replication-ip", "r.test.com",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359"]
expected = {
'region': 2,
'zone': 3,
'ip': "test.test.com",
'port': 6000,
'replication_ip': "r.test.com",
'replication_port': 7000,
'device': "sda3",
'meta': "some meta data",
'weight': 3.14159265359,
}
opts, args = parse_args(argv)
device = build_dev_from_opts(opts)
self.assertEquals(device, expected)
argv = \
["--region", "2", "--zone", "3",
"--ip", "[test.test.com]",
"--port", "6000",
"--replication-ip", "[r.test.com]",
"--replication-port", "7000",
"--device", "sda3",
"--meta", "some meta data",
"--weight", "3.14159265359"]
opts, args = parse_args(argv)
self.assertRaises(ValueError, build_dev_from_opts, opts)
argv = \
["--region", "2", "--zone", "3",
"--ip", "[test.test.com]",
"--port", "6000",
"--replication-ip", "[r.test.com]",
"--replication-port", "7000",
"--meta", "some meta data",
"--weight", "3.14159265359"]
opts, args = parse_args(argv)
self.assertRaises(ValueError, build_dev_from_opts, opts)
def test_replication_defaults(self):
args = '-r 1 -z 1 -i 127.0.0.1 -p 6010 -d d1 -w 100'.split()
opts, _ = parse_args(args)
device = build_dev_from_opts(opts)
expected = {
'device': 'd1',
'ip': '127.0.0.1',
'meta': '',
'port': 6010,
'region': 1,
'replication_ip': '127.0.0.1',
'replication_port': 6010,
'weight': 100.0,
'zone': 1,
}
self.assertEquals(device, expected)
args = '-r 1 -z 1 -i test.com -p 6010 -d d1 -w 100'.split()
opts, _ = parse_args(args)
device = build_dev_from_opts(opts)
expected = {
'device': 'd1',
'ip': 'test.com',
'meta': '',
'port': 6010,
'region': 1,
'replication_ip': 'test.com',
'replication_port': 6010,
'weight': 100.0,
'zone': 1,
}
self.assertEquals(device, expected)
def test_dispersion_report(self):
rb = ring.RingBuilder(8, 3, 0)
rb.add_dev({'id': 0, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sda1'})
rb.add_dev({'id': 3, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdb1'})
rb.add_dev({'id': 4, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdc1'})
rb.add_dev({'id': 5, 'region': 1, 'zone': 0, 'weight': 100,
'ip': '127.0.0.0', 'port': 10000, 'device': 'sdd1'})
rb.add_dev({'id': 1, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sda1'})
rb.add_dev({'id': 6, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb1'})
rb.add_dev({'id': 7, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdc1'})
rb.add_dev({'id': 8, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdd1'})
rb.add_dev({'id': 2, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sda1'})
rb.add_dev({'id': 9, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdb1'})
rb.add_dev({'id': 10, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc1'})
rb.add_dev({'id': 11, 'region': 1, 'zone': 1, 'weight': 200,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdd1'})
# this ring is pretty volatile and the assertions are pretty brittle
# so we use a specific seed
rb.rebalance(seed=100)
rb.validate()
self.assertEqual(rb.dispersion, 39.0625)
report = dispersion_report(rb)
self.assertEqual(report['worst_tier'], 'r1z1')
self.assertEqual(report['max_dispersion'], 39.0625)
def build_tier_report(max_replicas, placed_parts, dispersion,
replicas):
return {
'max_replicas': max_replicas,
'placed_parts': placed_parts,
'dispersion': dispersion,
'replicas': replicas,
}
# Each node should store 256 partitions to avoid multiple replicas
# 2/5 of total weight * 768 ~= 307 -> 51 partitions on each node in
# zone 1 are stored at least twice on the nodes
expected = [
['r1z1', build_tier_report(
2, 256, 39.0625, [0, 0, 156, 100])],
['r1z1-127.0.0.1', build_tier_report(
1, 256, 19.53125, [0, 206, 50, 0])],
['r1z1-127.0.0.2', build_tier_report(
1, 256, 19.53125, [0, 206, 50, 0])],
]
report = dispersion_report(rb, 'r1z1[^/]*$', verbose=True)
graph = report['graph']
for i, (expected_key, expected_report) in enumerate(expected):
key, report = graph[i]
self.assertEqual(
(key, report),
(expected_key, expected_report)
)
# overcompensate in r1z0
rb.add_dev({'id': 12, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sda1'})
rb.add_dev({'id': 13, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdb1'})
rb.add_dev({'id': 14, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdc1'})
rb.add_dev({'id': 15, 'region': 1, 'zone': 0, 'weight': 500,
'ip': '127.0.0.3', 'port': 10003, 'device': 'sdd1'})
rb.rebalance(seed=10)
report = dispersion_report(rb)
self.assertEqual(rb.dispersion, 44.53125)
self.assertEqual(report['worst_tier'], 'r1z0-127.0.0.3')
self.assertEqual(report['max_dispersion'], 32.520325203252035)
def test_parse_address_old_format(self):
# Test old format
argv = "127.0.0.1:6000R127.0.0.1:6000/sda1_some meta data"
ip, port, rest = parse_address(argv)
self.assertEqual(ip, '127.0.0.1')
self.assertEqual(port, 6000)
self.assertEqual(rest, 'R127.0.0.1:6000/sda1_some meta data')
if __name__ == '__main__':
unittest.main()
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
import re
import os
from tensorflow.python.keras.preprocessing import text
from tensorflow.python.keras import models
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.layers import Embedding
from tensorflow.python.keras.layers import Conv1D
from tensorflow.python.keras.layers import MaxPooling1D
from tensorflow.python.keras.layers import GlobalAveragePooling1D
from google.cloud import storage
tf.logging.set_verbosity(tf.logging.INFO)
CLASSES = {'github': 0, 'nytimes': 1, 'techcrunch': 2} # label-to-int mapping
TOP_K = 20000 # Limit on the number vocabulary size used for tokenization
MAX_SEQUENCE_LENGTH = 50 # Sentences will be truncated/padded to this length
VOCAB_FILE_PATH = None # where vocabulary is saved, dynamically set in train_and_eval function
PADWORD = 'ZYXW'
"""
Helper function to download data from Google Cloud Storage
# Arguments:
source: string, the GCS URL to download from (e.g. 'gs://bucket/file.csv')
destination: string, the filename to save as on local disk. MUST be filename
ONLY, doesn't support folders. (e.g. 'file.csv', NOT 'folder/file.csv')
# Returns: nothing, downloads file to local disk
"""
def download_from_gcs(source, destination):
search = re.search('gs://(.*?)/(.*)', source)
bucket_name = search.group(1)
blob_name = search.group(2)
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
bucket.blob(blob_name).download_to_filename(destination)
"""
Parses raw tsv containing hacker news headlines and returns (sentence, integer label) pairs
# Arguments:
train_data_path: string, path to tsv containing training data.
can be a local path or a GCS url (gs://...)
eval_data_path: string, path to tsv containing eval data.
can be a local path or a GCS url (gs://...)
# Returns:
((train_sentences, train_labels), (test_sentences, test_labels)): sentences
are lists of strings, labels are numpy integer arrays
"""
def load_hacker_news_data(train_data_path, eval_data_path):
if train_data_path.startswith('gs://'):
download_from_gcs(train_data_path, destination='train.csv')
train_data_path = 'train.csv'
if eval_data_path.startswith('gs://'):
download_from_gcs(eval_data_path, destination='eval.csv')
eval_data_path = 'eval.csv'
# Parse CSV using pandas
column_names = ('label', 'text')
df_train = pd.read_csv(train_data_path, names=column_names, sep='\t')
df_eval = pd.read_csv(eval_data_path, names=column_names, sep='\t')
return ((list(df_train['text']), np.array(df_train['label'].map(CLASSES))),
(list(df_eval['text']), np.array(df_eval['label'].map(CLASSES))))
"""
Create tf.estimator compatible input function
# Arguments:
texts: [strings], list of sentences
labels: numpy int vector, integer labels for sentences
batch_size: int, number of records to use for each train batch
mode: tf.estimator.ModeKeys.TRAIN or tf.estimator.ModeKeys.EVAL
# Returns:
tf.data.Dataset, produces feature and label
tensors one batch at a time
"""
def input_fn(texts, labels, batch_size, mode):
# Convert texts from python strings to tensors
x = tf.constant(texts)
# Map text to sequence of word-integers and pad
x = vectorize_sentences(x)
# Create tf.data.Dataset from tensors
dataset = tf.data.Dataset.from_tensor_slices((x, labels))
# Pad to constant length
dataset = dataset.map(pad)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None #loop indefinitley
dataset = dataset.shuffle(buffer_size=50000) # our input is already shuffled so this is redundant
else:
num_epochs = 1
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
"""
Given an int tensor, remove 0s then pad to a fixed length representation.
#Arguments:
feature: int tensor
label: int. not used in function, just passed through
#Returns:
(int tensor, int) tuple.
"""
def pad(feature, label):
# 1. Remove 0s which represent out of vocabulary words
nonzero_indices = tf.where(tf.not_equal(feature, tf.zeros_like(feature)))
without_zeros = tf.gather(feature,nonzero_indices)
without_zeros = tf.squeeze(without_zeros, axis=1)
# 2. Prepend 0s till MAX_SEQUENCE_LENGTH
padded = tf.pad(without_zeros, [[MAX_SEQUENCE_LENGTH, 0]]) # pad out with zeros
padded = padded[-MAX_SEQUENCE_LENGTH:] # slice to constant length
return (padded, label)
"""
Given sentences, return an integer representation
# Arguments:
sentences: string tensor of shape (?,), contains sentences to vectorize
# Returns:
Integer representation of the sentence. Word-integer mapping is determined
by VOCAB_FILE_PATH. Words out of vocabulary will map to 0
"""
def vectorize_sentences(sentences):
# 1. Remove punctuation
sentences = tf.regex_replace(sentences, '[[:punct:]]', ' ')
# 2. Split string tensor into component words
words = tf.string_split(sentences)
words = tf.sparse_tensor_to_dense(words, default_value=PADWORD)
# 3. Map each word to respective integer
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file=VOCAB_FILE_PATH,
num_oov_buckets=0,
vocab_size=None,
default_value=0, # for words not in vocabulary (OOV)
key_column_index=0,
value_column_index=1,
delimiter=',')
numbers = table.lookup(words)
return numbers
"""
Builds a CNN model using keras and converts to tf.estimator.Estimator
# Arguments
model_dir: string, file path where training files will be written
config: tf.estimator.RunConfig, specifies properties of tf Estimator
filters: int, output dimension of the layers.
kernel_size: int, length of the convolution window.
embedding_dim: int, dimension of the embedding vectors.
dropout_rate: float, percentage of input to drop at Dropout layers.
pool_size: int, factor by which to downscale input at MaxPooling layer.
embedding_path: string , file location of pre-trained embedding (if used)
defaults to None which will cause the model to train embedding from scratch
word_index: dictionary, mapping of vocabulary to integers. used only if
pre-trained embedding is provided
# Returns
A tf.estimator.Estimator
"""
def keras_estimator(model_dir,
config,
learning_rate,
filters=64,
dropout_rate=0.2,
embedding_dim=200,
kernel_size=3,
pool_size=3,
embedding_path=None,
word_index=None):
# Create model instance.
model = models.Sequential()
num_features = min(len(word_index) + 1, TOP_K)
# Add embedding layer. If pre-trained embedding is used add weights to the
# embeddings layer and set trainable to input is_embedding_trainable flag.
if embedding_path != None:
embedding_matrix = get_embedding_matrix(word_index, embedding_path, embedding_dim)
is_embedding_trainable = True # set to False to freeze embedding weights
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH,
weights=[embedding_matrix],
trainable=is_embedding_trainable))
else:
model.add(Embedding(input_dim=num_features,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH))
model.add(Dropout(rate=dropout_rate))
model.add(Conv1D(filters=filters,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Conv1D(filters=filters * 2,
kernel_size=kernel_size,
activation='relu',
bias_initializer='random_uniform',
padding='same'))
model.add(GlobalAveragePooling1D())
model.add(Dropout(rate=dropout_rate))
model.add(Dense(len(CLASSES), activation='softmax'))
# Compile model with learning parameters.
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['acc'])
estimator = tf.keras.estimator.model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
return estimator
"""
Defines the features to be passed to the model during inference
Can pass in string text directly. Tokenization done in serving_input_fn
# Arguments: none
# Returns: tf.estimator.export.ServingInputReceiver
"""
def serving_input_fn():
feature_placeholder = tf.placeholder(tf.string, [None])
features = vectorize_sentences(feature_placeholder)
return tf.estimator.export.TensorServingInputReceiver(features, feature_placeholder)
"""
Takes embedding for generic vocabulary and extracts the embeddings
matching the current vocabulary
The pre-trained embedding file is obtained from https://nlp.stanford.edu/projects/glove/
# Arguments:
word_index: dict, {key =word in vocabulary: value= integer mapped to that word}
embedding_path: string, location of the pre-trained embedding file on disk
embedding_dim: int, dimension of the embedding space
# Returns: numpy matrix of shape (vocabulary, embedding_dim) that contains the embedded
representation of each word in the vocabulary.
"""
def get_embedding_matrix(word_index, embedding_path, embedding_dim):
# Read the pre-trained embedding file and get word to word vector mappings.
embedding_matrix_all = {}
# Download if embedding file is in GCS
if embedding_path.startswith('gs://'):
download_from_gcs(embedding_path, destination='embedding.csv')
embedding_path = 'embedding.csv'
with open(embedding_path) as f:
for line in f: # Every line contains word followed by the vector value
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embedding_matrix_all[word] = coefs
# Prepare embedding matrix with just the words in our word_index dictionary
num_words = min(len(word_index) + 1, TOP_K)
embedding_matrix = np.zeros((num_words, embedding_dim))
for word, i in word_index.items():
if i >= TOP_K:
continue
embedding_vector = embedding_matrix_all.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
return embedding_matrix
"""
Main orchestrator. Responsible for calling all other functions in model.py
# Arguments:
output_dir: string, file path where training files will be written
hparams: dict, command line parameters passed from task.py
# Returns: nothing, kicks off training and evaluation
"""
def train_and_evaluate(output_dir, hparams):
tf.summary.FileWriterCache.clear() # ensure filewriter cache is clear for TensorBoard events file
# Load Data
((train_texts, train_labels), (test_texts, test_labels)) = load_hacker_news_data(
hparams['train_data_path'], hparams['eval_data_path'])
# Create vocabulary from training corpus.
tokenizer = text.Tokenizer(num_words=TOP_K)
tokenizer.fit_on_texts(train_texts)
# Generate vocabulary file from tokenizer object to enable
# creating a native tensorflow lookup table later (used in vectorize_sentences())
tf.gfile.MkDir(output_dir) # directory must exist before we can use tf.gfile.open
global VOCAB_FILE_PATH; VOCAB_FILE_PATH = os.path.join(output_dir,'vocab.txt')
with tf.gfile.Open(VOCAB_FILE_PATH, 'wb') as f:
f.write("{},0\n".format(PADWORD)) # map padword to 0
for word, index in tokenizer.word_index.items():
if index < TOP_K: # only save mappings for TOP_K words
f.write("{},{}\n".format(word, index))
# Create estimator
run_config = tf.estimator.RunConfig(save_checkpoints_steps=500)
estimator = keras_estimator(
model_dir=output_dir,
config=run_config,
learning_rate=hparams['learning_rate'],
embedding_path=hparams['embedding_path'],
word_index=tokenizer.word_index
)
# Create TrainSpec
train_steps = hparams['num_epochs'] * len(train_texts) / hparams['batch_size']
train_spec = tf.estimator.TrainSpec(
input_fn=lambda:input_fn(
train_texts,
train_labels,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.TRAIN),
max_steps=train_steps
)
# Create EvalSpec
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
eval_spec = tf.estimator.EvalSpec(
input_fn=lambda:input_fn(
test_texts,
test_labels,
hparams['batch_size'],
mode=tf.estimator.ModeKeys.EVAL),
steps=None,
exporters=exporter,
start_delay_secs=10,
throttle_secs=10
)
# Start training
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
| |
"""
Tasks viewed from the mturk website nested in an iframe
"""
import json
import random
import datetime
from ua_parser import user_agent_parser
from django.conf import settings
from django.core.context_processors import csrf
from django.views.decorators.http import require_POST
from django.views.decorators.csrf import ensure_csrf_cookie
from django.http import Http404
from django.shortcuts import render, get_object_or_404
from django.views.decorators.cache import never_cache
from common.utils import recursive_sum, recursive_dict_exclude, \
html_error_response
from mturk.models import MtHit, MtAssignment, Experiment, ExperimentWorker
from mturk.tasks import mturk_submit_task, \
increment_hit_counter_task, expire_hit_task
from mturk.utils import get_or_create_mturk_worker_from_request, \
get_content_model_prefetch, fetch_hit_contents, \
fetch_content_tuples
from common.utils import json_success_response, json_error_response
#
# View functions
#
#@staff_member_required
@never_cache
def admin_preview_task(request, experiment_id, override, hit_id=None):
if hit_id:
hit = get_object_or_404(MtHit, id=hit_id)
else:
hits = MtHit.objects \
.filter(hit_type__experiment_id=experiment_id, ) \
.order_by('-num_assignments_completed', '?')[:1]
try:
hit = hits[0]
except:
try:
e = Experiment.objects.get(id=experiment_id)
return html_error_response(
request, 'There are no HITs created for this experiment yet. '
'Experiment id: %s, slug: "%s", title: "%s".' % (
e.id, e.slug, e.new_hit_settings.title)
)
except:
return html_error_response(
request, 'This experiment does not exist. Experiment id: %s.' %
(experiment_id)
)
return external_task(
request, experiment_id=experiment_id, hit=hit, override=override)
@require_POST
def external_incompatible(request, id):
""" Increment view counter for an incompatible view """
increment_hit_counter_task.delay(id, 'incompatible_count')
return json_success_response()
@require_POST
def external_compatible(request, id):
""" Increment view counter for a compatible view """
increment_hit_counter_task.delay(id, 'compatible_count')
return json_success_response()
@ensure_csrf_cookie
#@transaction.atomic <-- already provieded by ATOMIC_REQUESTS
def external_task(request, experiment_id, hit=None, override=None):
"""
Renders a MTurk task, both preview and instructions.
override: either None, "inst", "task", or "tut"
"""
# override is only for staff members
#if not request.user.is_staff:
#override = None
# browser check
response = external_task_browser_check(request)
if response:
return response
# get HIT info etc
context_or_response = external_task_prepare_context(
request, experiment_id, hit, override)
if not isinstance(context_or_response, dict):
return context_or_response
else:
context = context_or_response
# handle tutorials: both GET and POST. if this returns None, then we know
# this is not a tutorial (or tutorial submission)
response = external_task_tutorial(request, context)
if response:
return response
# either task or instructions page
if request.method == 'POST':
return external_task_POST(request, context)
else:
return external_task_GET(request, context)
#
# Helper functions
#
def is_preview_request(request):
""" Return true if this is a request for a task preview """
return ('assignmentId' not in request.GET or
request.GET['assignmentId'] == 'ASSIGNMENT_ID_NOT_AVAILABLE')
def external_task_browser_check(request):
if request.method == "GET":
valid_browser = False
if 'HTTP_USER_AGENT' in request.META:
ua = user_agent_parser.Parse(request.META['HTTP_USER_AGENT'])
if ua['user_agent']['family'].lower() in ('firefox', 'chrome'):
device = ua['device']
if 'is_mobile' not in device or not device['is_mobile']:
valid_browser = True
if not valid_browser:
return html_error_response(
request, '''
This task requires Google Chrome. <br/><br/>
<a class="btn" href="http://www.google.com/chrome/"
target="_blank">Get Google Chrome</a>
''')
return None
def external_task_prepare_context(request, experiment_id, hit, override):
""" Fetch hit, experiment, assignment, worker, etc. Returns either a
dictionary on success, or a response (or exception) if there is some error.
"""
# obtain HIT
if hit is None:
if 'hitId' not in request.GET:
if request.user.is_staff:
return html_error_response(
request, 'HIT ID missing from GET parameters')
else:
raise Http404
hit_id = request.GET['hitId']
try:
hit = MtHit.objects \
.select_related(
'hit_type__experiment',
'hit_type__experiment_settings',
'hit_type__requirements') \
.get(id=hit_id)
except MtHit.DoesNotExist:
# if this HIT cannot be found, tell Amazon about it
if (override is None and
not request.user.is_staff and
'assignmentId' in request.GET and
'workerId' in request.GET and
'turkSubmitTo' in request.GET):
expire_hit_task.delay(hit_id)
raise Http404
# obtain experiment
experiment = hit.hit_type.experiment
if experiment.id != int(experiment_id):
if request.user.is_staff:
return html_error_response(
request, 'Experiment ID (%s) does not match HIT (%s)' % (
experiment_id, experiment.id)
)
else:
raise Http404
# obtain worker and assignment
worker = get_or_create_mturk_worker_from_request(request)
assignment_dirty = False
if worker and 'assignmentId' in request.GET:
assignment, _ = MtAssignment.objects.get_or_create(
id=request.GET['assignmentId'],
defaults={'hit': hit, 'worker': worker})
if assignment.hit != hit or assignment.worker != worker:
assignment.hit = hit
assignment.worker = worker
assignment_dirty = True
else:
assignment = None
worker = None
# obtain worker info specific to the experiment and worker
if experiment and worker:
experiment_worker, _ = ExperimentWorker.objects.get_or_create(
experiment=experiment, worker=worker)
else:
experiment_worker = None
# don't let blocked workers perform our tasks
if (worker and worker.blocked) or (experiment_worker and experiment_worker.blocked):
message = "Your submissions are too low quality. Please stop doing our tasks."
if experiment_worker and experiment_worker.blocked_reason:
message += "<br/><br/>" + experiment_worker.blocked_reason
elif worker and worker.blocked_reason:
message += "<br/><br/>" + worker.blocked_reason
return html_error_response(request, message)
# fetch contents
hit_contents = fetch_hit_contents(hit)
if override and 'publishable' in request.GET:
hit_contents = filter(lambda x: x and x.publishable(), hit_contents)
if not hit.num_contents or not hit_contents:
# (in the if statement, also test hit.num_contents since it is only set
# after the last content is added)
return html_error_response(
request, "Somehow there are no items in this HIT.")
# fetch test (sentinel) contents
if experiment_worker:
if assignment.num_test_contents is None:
n = experiment.test_contents_per_assignment
if n > 0:
# select new test contents from the set of possible contents
# (that the user has not already answered)
test_content_wrappers = experiment.test_contents.all() \
.exclude(responses__experiment_worker=experiment_worker) \
.order_by('-priority')[:n]
# register chosen items with assignment
assignment.test_contents.add(*test_content_wrappers)
else:
test_content_wrappers = []
assignment.num_test_contents = len(test_content_wrappers)
assignment_dirty = True
elif assignment.num_test_contents > 0:
# re-fetch existing contents
test_content_wrappers = assignment.test_contents.all()
else:
test_content_wrappers = []
# fetch objects from inside the wrappers
if test_content_wrappers:
test_contents = fetch_content_tuples([
(x.content_type_id, x.object_id)
for x in test_content_wrappers
])
else:
test_contents = []
else:
test_contents = []
test_content_wrappers = []
# shuffle together (some tasks may sort contents again in javascript)
contents = hit_contents + test_contents
random.shuffle(contents)
# prepare context data
context = {
'hit': hit,
'assignment': assignment,
'worker': worker,
'experiment': experiment,
'experiment_id': experiment_id,
'experiment_worker': experiment_worker,
'slug': experiment.slug,
'hit_contents': hit_contents,
'test_content_wrappers': test_content_wrappers,
'test_contents': test_contents,
'contents': contents,
'num_contents': len(contents),
'num_contents_predicted': (len(hit_contents) +
experiment.test_contents_per_assignment),
'override': override,
}
if len(contents) == 1:
context['content'] = contents[0]
if experiment.version >= 2:
# old experiments (version 1) don't use this
context['contents_json'] = json.dumps(
[c.get_entry_dict() for c in contents])
# list of ids as json
context['content_id_json'] = json.dumps(
[{'id': c.id} for c in contents])
# requirements
for req in hit.hit_type.requirements.values('name', 'value'):
context[req['name']] = req['value']
if assignment_dirty:
assignment.save()
return context
def external_task_tutorial(request, context):
""" Handle tutorials. On a GET, decide whether to serve up a tutorial.
On a POST, record that the tutorial was completed, then the client will
refresh. Returns either a response or None. """
# unpack some variables
experiment, worker, override = [
context[k] for k in ['experiment', 'worker', 'override']]
if (request.method == "GET" and experiment.has_tutorial and
(override == "tut" or not is_preview_request(request))):
show_tutorial = (override == "tut" or
not context['experiment_worker'].tutorial_completed)
if show_tutorial:
context.update(csrf(request))
template_name = experiment.template_name()
return render(request, '%s_tut.html' % template_name, context)
elif (request.method == "POST" and override is None and
'tutorial_complete' in request.POST and
request.POST['tutorial_complete'] == 'true'):
ew_id = context['experiment_worker'].id
ExperimentWorker.objects.filter(id=ew_id) \
.update(tutorial_completed=True)
return json_success_response()
return None
def external_task_POST(request, context):
""" Handles POSTs for mturk tasks. Returns a response. """
# unpack some variables
experiment, hit, assignment, worker, override, experiment_worker = [
context[k] for k in [
'experiment', 'hit', 'assignment', 'worker', 'override',
'experiment_worker'
]
]
# error checks
if override is not None:
return json_error_response(
"You cannot submit in admin preview mode.")
if not worker or not assignment:
return json_error_response(
"There was an error obtaining your Assignment ID from Amazon.")
# check that POST is allowed
if hit.sandbox and not settings.MTURK_ACCEPT_SANDBOX_HITS:
return json_error_response(
"Not currently accepting sandbox HITs. POST data: " +
json.dumps(request.POST))
# extract submit data
results = json.loads(request.POST['results'])
time_ms = json.loads(request.POST['time_ms']) \
if 'time_ms' in request.POST else None
time_active_ms = json.loads(request.POST['time_active_ms']) \
if 'time_active_ms' in request.POST else None
time_load_ms = json.loads(request.POST['time_load_ms']) \
if 'time_load_ms' in request.POST else None
complete = ('partial' not in request.POST or
str(request.POST['partial']) != 'true')
version = json.loads(request.POST['version'])
action_log = request.POST.get('action_log', '')
screen_width = request.POST.get('screen_width', None)
screen_height = request.POST.get('screen_height', None)
# fix any potential str/int issues
if isinstance(time_ms, basestring) and time_ms.isdigit():
time_ms = int(time_ms)
if isinstance(time_active_ms, basestring) and time_active_ms.isdigit():
time_active_ms = int(time_active_ms)
if isinstance(time_load_ms, basestring) and time_load_ms.isdigit():
time_load_ms = int(time_load_ms)
# store assignment POST information
post_dict = {}
meta_dict = {}
for k, v in request.META.iteritems():
# some non-encodable things get put in here -- filter them out by
# forcing the unicode encoding
try:
meta_dict[unicode(k)] = unicode(v)
except:
pass
for k, v in request.POST.iteritems():
# some non-encodable things get put in here -- filter them out by
# forcing the unicode encoding
try:
post_dict[unicode(k)] = unicode(v)
except:
pass
# store dictionaries, not nested dictionaries
post_dict[u'results'] = recursive_dict_exclude(results, [
u'screenshot'])
post_dict[u'time_ms'] = time_ms
post_dict[u'time_active_ms'] = time_active_ms
post_dict[u'time_load_ms'] = time_load_ms
assignment.post_data = json.dumps(post_dict)
assignment.post_meta = json.dumps(meta_dict)
if 'HTTP_USER_AGENT' in request.META:
assignment.user_agent = request.META['HTTP_USER_AGENT']
assignment_dirty = False
experiment_worker_dirty = False
# update assignment info
if complete:
assignment.time_ms = recursive_sum(time_ms)
assignment.time_active_ms = recursive_sum(time_active_ms)
assignment.time_load_ms = recursive_sum(time_load_ms)
assignment.status = MtAssignment.str_to_status['Submitted']
assignment.submit_time = datetime.datetime.now()
assignment.action_log = action_log
assignment.screen_width = screen_width
assignment.screen_height = screen_height
if 'feedback' in request.POST:
assignment.feedback = request.POST['feedback']
# must fill in at least 2/3 fields to count
if assignment.feedback and len(json.loads(assignment.feedback)) >= 2:
assignment.has_feedback = True
assignment_dirty = True
# mark test contents data as seen. it can't be done async or else the next
# task will re-serve the same test items.
rejected_assignment = False
if assignment.num_test_contents:
experiment_worker = context['experiment_worker']
test_content_wrappers = context['test_content_wrappers']
test_contents = context['test_contents']
# grade test contents
responses, responses_correct = hit.hit_type.experiment_settings \
.out_content_model().mturk_grade_test(
test_content_wrappers, test_contents, results)
# store in database
for i, tcw in enumerate(test_content_wrappers):
# If the user accepts multiple HITs at once, then they can be
# served the same test objects. In that case, only store their
# first answer, since the second time they see it, they will know
# it is a test item.
if not tcw.responses.filter(experiment_worker=experiment_worker).exists():
tcw.responses.create(
experiment_worker=experiment_worker,
assignment=assignment,
response=unicode(responses[i]),
correct=responses_correct[i],
)
# update local correct counts
assignment.num_test_correct = sum(responses_correct)
assignment.num_test_incorrect = sum(not x for x in responses_correct)
assignment_dirty = True
# update global correct counts
experiment_worker.num_test_correct = \
experiment_worker.test_content_responses.filter(correct=True).count()
experiment_worker.num_test_incorrect = \
experiment_worker.test_content_responses.filter(correct=False).count()
experiment_worker_dirty = True
# always approve, but give a message if they do badly
if assignment.num_test_incorrect >= 3 and assignment.num_test_correct == 0:
perc = int(100 * assignment.num_test_correct / (
assignment.num_test_correct + assignment.num_test_incorrect))
message = make_reject_message(experiment, hit, perc)
#from mturk.tasks import reject_assignment_task
from mturk.tasks import approve_assignment_task
approve_assignment_task.apply_async(
kwargs={
'assignment_id': assignment.id,
'feedback': message,
}, countdown=60, retry=True, retry_policy={'max_retries': 100})
rejected_assignment = True
# block if accuracy every creeps below 80% (with at least 5 errors)
if experiment_worker.num_test_incorrect > 5:
perc = int(100 * experiment_worker.num_test_correct / (
experiment_worker.num_test_correct +
experiment_worker.num_test_incorrect))
if perc < 80:
message = make_reject_message(experiment, hit, perc)
experiment_worker.block(reason=message, method='T', save=False)
experiment_worker_dirty = True
# otherwise auto-approve
elif (not rejected_assignment and
(experiment_worker.auto_approve or settings.MTURK_AUTO_APPROVE)):
from mturk.tasks import approve_assignment_task
approve_assignment_task.apply_async(
kwargs={
'assignment_id': assignment.id,
'feedback': experiment_worker.auto_approve_message,
}, countdown=60, retry=True, retry_policy={'max_retries': 100})
if assignment_dirty:
assignment.save()
if experiment_worker_dirty:
experiment_worker.save()
# submit (rest of) data asynchronously
mturk_submit_task.apply_async(
# note: 'contents' not serialized in this list -- the task re-fetches
# this from the database.
kwargs={
'user_id': worker.user_id,
'mturk_hit_id': hit.id,
'mturk_assignment_id': assignment.id,
'experiment_id': experiment.id,
'results': results, # dict with content id as key
'time_ms': time_ms, # number or dict with content id as key
'time_active_ms': time_active_ms, # same format as time_ms
'time_load_ms': time_load_ms,
'complete': complete,
'version': version,
},
retry=True,
retry_policy={
'max_retries': None, # (retry forever)
'interval_start': 300,
'interval_step': 60,
'interval_max': 600,
}
)
# success
return json_success_response()
def make_reject_message(experiment, hit, perc):
"""
perc: percentage correct, ranging from 0 to 100.
"""
# make an experiment-specific reject message
module = experiment.get_module()
if module and hasattr(module, 'make_reject_message'):
message = module.make_reject_message(experiment, hit, perc)
else:
message = None
if not message:
message = (
"We checked some of your answers against our correct answers "
"and found that your accuracy was %s percent, which is too "
"low. This is for the task: %s."
) % (perc, hit.hit_type.title)
return message
def external_task_GET(request, context):
""" Handles GETs for mturk tasks. Returns a response. """
# unpack some variables
override, experiment = [context[k] for k in ['override', 'experiment']]
# template name is based on experiment parameters
if experiment.variant:
variant = json.loads(experiment.variant)
else:
variant = None
context['variant'] = variant
# template names
template_name = experiment.template_name()
context['instructions'] = '%s_inst_content.html' % template_name
context['content_thumb_template'] = '%s_thumb.html' % template_name
# fetch examples from database
publishable = override and 'publishable' in request.GET
external_task_prepare_examples(
context, experiment, publishable=publishable)
# add extra context depending on the task
external_task_extra_context(experiment.slug, context)
# decide if we need feedback
external_task_prepare_feedback(request, context)
if override == "task" or not is_preview_request(request):
context.update(csrf(request))
return render(request, '%s.html' % template_name, context)
else:
return render(request, '%s_inst.html' % template_name, context)
def external_task_prepare_feedback(request, context):
""" Sets the necessary feedback variables """
# unpack some variables
experiment, hit, worker = [
context[k] for k in ['experiment', 'hit', 'worker']
]
# ask for feedback if we haven't gotten any yet, and they have
# completed at least two other HITs
context['ask_for_feedback'] = 'false'
context['feedback_bonus'] = 0
if context['worker'] and context['hit'].hit_type.feedback_bonus is not None:
hit_count = MtAssignment.objects.filter(
worker=worker,
hit__hit_type=hit.hit_type,
).count()
if hit_count == 3 or (hit_count >= 10 and hit_count % 10 == 0):
feedback_count = MtAssignment.objects.filter(
worker=worker,
has_feedback=True,
hit__hit_type__experiment__completed_id=experiment.completed_id,
).count()
if feedback_count == 0:
context['ask_for_feedback'] = 'true'
context['feedback_bonus'] = hit.hit_type.feedback_bonus
def external_task_prepare_examples(
context, experiment, num_examples=16, publishable=False):
""" Prepare good/bad examples for display. publishable: if True, only show
creative-commons generic photos -- only useful for generating screenshots
of tasks for publications. """
if not experiment.examples.exists():
return
# get content model
content_model = experiment.examples.all()[0].__class__
prefetch = get_content_model_prefetch(content_model)
# good examples
examples_good = [obj.content for obj in experiment.examples
.filter(good=True).order_by('?')[:num_examples]
.prefetch_related(*prefetch)]
# try and find matching bad examples
group_attr = experiment.examples_group_attr
if (examples_good and content_model and
group_attr and hasattr(content_model, group_attr)):
group_id = group_attr + '_id'
# object ids, matched on group attribute (e.g. 'shape')
ids = content_model.objects \
.filter(**{
group_id + '__in':
[getattr(c, group_id) for c in examples_good]
}) \
.values_list('id', flat=True)
# fetch matching bad examples
examples_bad = [x.content for x in experiment.examples
.filter(object_id__in=ids, good=False)
.prefetch_related(*prefetch)]
# re-order good examples to put matches first
examples_bad_dict = {getattr(x, group_id): x for x in examples_bad}
examples_good.sort(
key=lambda x: getattr(x, group_id) not in examples_bad_dict)
# re-order bad examples to match good examples ordering
examples_bad = []
for c in examples_good:
if getattr(c, group_id) in examples_bad_dict:
examples_bad.append(examples_bad_dict[getattr(c, group_id)])
# fetch remaining examples
num_extra = num_examples - len(examples_bad)
if num_extra > 0:
new_examples_bad_queryset = experiment.examples \
.filter(good=False) \
.exclude(object_id__in=ids) \
.order_by('?')[:num_extra] \
.prefetch_related(*prefetch)
examples_bad += [
obj.content for obj in new_examples_bad_queryset]
else:
examples_bad = [e.content for e in experiment.examples
.filter(good=False).order_by('?')[:num_examples]
.prefetch_related(*prefetch)]
if examples_good:
if publishable:
examples_good = filter(lambda x: x.publishable(), examples_good)
context['examples_good'] = examples_good
context['examples_good_json'] = json.dumps(
[c.get_entry_dict() for c in examples_good])
if examples_bad:
if publishable:
examples_bad = filter(lambda x: x.publishable(), examples_bad)
context['examples_bad'] = examples_bad
context['examples_bad_json'] = json.dumps(
[c.get_entry_dict() for c in examples_bad])
def external_task_extra_context(slug, context):
""" Add extra context for each task """
module = context['experiment'].get_module()
if module and hasattr(module, 'external_task_extra_context'):
module.external_task_extra_context(slug, context)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`cache` --- Data caching facility
======================================
"""
__docformat__ = "restructuredtext en"
import sys
import sqlite3
import importlib
from time import time
try:
import cPickle as pickle
except ImportError:
import pickle
# FIXME: This code is not thread-safe.
# This decision isn't optimal to say at the very least. The solution works
# for this case as Skype4Py threads are queued but it will eventually be
# the cause of concurrent SQLite access troubles.
# Hard-coded SQL queries are only being used in simple default built-in SQLite
# caching system.
SQL_CREATE_TABLE = """
CREATE TABLE IF NOT EXISTS container
(
key TEXT PRIMARY KEY,
value BLOB,
expires FLOAT
)
"""
SQL_INSERT = "INSERT INTO container (key, value, expires) VALUES (?, ?, ?)"
SQL_DELETE = "DELETE FROM container WHERE key = ?"
SQL_SELECT = "SELECT value, expires FROM container WHERE key = ?"
SQL_REPLACE = "REPLACE INTO container (key, value, expires) VALUES (?, ?, ?)"
SQL_CLEAR = "DELETE FROM container"
SQL_CLEAR_EXPIRED = "DELETE FROM container WHERE expires <= ? AND expires != 0"
SQL_COUNT = "SELECT count(*) FROM container WHERE key = ?"
SQL_COUNT_ALL = "SELECT count(*) FROM container"
class BaseCache(object):
"""
Base cache class.
"""
def __init__(self, default_timeout=600):
self._default_timeout = default_timeout
def get(self, key):
"""
Get method should always return NoneType if a requested cache key
is expired.
"""
raise NotImplementedError
def add(self, key, value, timeout=None):
raise NotImplementedError
def set(self, key, value, timeout=None):
raise NotImplementedError
def delete(self, key):
raise NotImplementedError
def clear(self):
raise NotImplementedError
def _prune(self):
"""
Clear expired cache entries. It's probably better not to call this
method directly rather than incorporate the call into other methods
which are modifying cache data (:meth:`BaseCache.add()`,
:meth:`BaseCache.set()`, etc) if necessary.
"""
raise NotImplementedError
def get_cached(self, key, timeout=None):
"""
Caching decorator.
Stores return value of a cached function or method.
.. seealso::
:class:`SimpleCache` for basic usage.
"""
self._timeout = timeout
def decorated(cached_function):
def wrapper(*args, **kwargs):
cached = self.get(key)
if cached is None:
cached = cached_function(*args, **kwargs)
timeout = self._timeout
if timeout is None:
expires = self._default_timeout
else:
expires = timeout
if expires >= 0:
self.set(key, cached, expires)
return cached
return wrapper
return decorated
def __setitem__(self, key, value, timeout=None):
self.set(key, value, timeout)
def __delitem__(self, key):
self.delete(key)
def __getitem__(self, key):
return self.get(key)
def __contains__(self, key):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __unicode__(self):
return "<{0} ({1} stored)>".format(self.__class__.__name__, len(self))
def __str__(self):
return unicode(self).encode("utf-8")
class SimpleCache(BaseCache):
"""
Simple memory cache suited mostly for development purposes.
Stores items in a regular Python dictionary.
Basic usage:
>>> cache = SimpleCache()
>>> cache.add("mykey", 42, timeout=3600)
>>> assert cache.get("mykey") is 42
>>> cache.set("mykey", "derp", timeout=3600)
>>> assert cache.get("mykey") == "derp"
>>> cache.delete("mykey")
>>> assert cache.get("mykey") is None
>>> cache.set("mykey", 42, timeout=3600)
>>> cache.clear()
>>> assert cache.get("mykey") is None
>>> cache.set("mykey", 42, timeout=-1)
>>> # Cache is expired.
>>> assert cache.get("mykey") is None
Decorator API usage:
>>> cache = SimpleCache()
>>> cache.set("mykey", 42, timeout=0)
>>> @cache.get_cached("mykey")
... def do_something():
... return "derp"
...
>>> # Outputs 42 as its output is cached for unlimited time.
>>> assert do_something() is 42
>>> cache.set("mykey", 42, timeout=-1)
>>> # Outputs "derp" as the cached value is expired.
>>> assert do_something() == "derp"
>>> # Caching decorator applies to class methods.
>>> cache = cache # IDE code hinting fix.
>>> cache["mykey"] = 42
>>> class MyClass:
... @cache.get_cached("mykey")
... def do_something(self):
... return "derp"
...
>>> assert MyClass().do_something() is 42
Dictionary-like behaviour:
>>> cache = SimpleCache()
>>> cache["mykey"] = 42
>>> assert cache["mykey"] == cache.get("mykey")
>>> assert "mykey" in cache
>>> del cache["mykey"]
>>> assert "mykey" not in cache
>>> cache["a"] = "herp"
>>> cache["b"] = "derp"
>>> assert len(cache) is 2
"""
# FIXME: Not thread-safe.
def __init__(self, default_timeout=600):
"""
:param default_timeout: default cache TTL in seconds. Timeout set to 0
means cache never expires.
:type default_timeout: `integer`
"""
super(SimpleCache, self).__init__(default_timeout)
self._cache = {}
def get(self, key):
expires, value = self._cache.get(key, (0, None))
if expires > time() or expires == 0:
try:
value = pickle.loads(value)
except TypeError:
value = None
return value
def set(self, key, value, timeout=None):
self._prune()
timeout = timeout or self._default_timeout or 0
expires = time() + timeout if timeout > 0 else timeout
value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._cache.update({key: (expires, value)})
def add(self, key, value, timeout=None):
self._prune()
timeout = timeout or self._default_timeout or 0
expires = time() + timeout if timeout > 0 else timeout
value = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
self._cache.setdefault(key, (expires, value))
def delete(self, key):
self._cache.pop(key, None)
def clear(self):
self._cache.clear()
def _prune(self):
for key, (expires, _) in self._cache.items():
if expires <= time() and expires != 0:
self._cache.pop(key, None)
def __contains__(self, key):
return key in self._cache
def __len__(self):
return len(self._cache)
class SQLiteCache(BaseCache):
"""
Simple SQLite-driven caching backend. Uses hardcoded SQL queries.
Basic usage:
>>> cache = SQLiteCache()
>>> cache.add("herp_ [mykey]--_ DERP", 42, timeout=3600)
>>> assert cache.get("herp_ [mykey]--_ DERP") is 42
>>> cache.set("mykey", "derp", timeout=3600)
>>> assert cache.get("mykey") == "derp"
>>> cache.delete("mykey")
>>> assert cache.get("mykey") is None
>>> cache.set("mykey", 42, timeout=3600)
>>> cache.clear()
>>> assert cache.get("mykey") is None
>>> cache.set("mykey", 42, timeout=-1)
>>> # Cache is expired.
>>> assert cache.get("mykey") is None
Decorator API usage:
>>> cache = SQLiteCache()
>>> cache.set("mykey", 42, timeout=0)
>>> @cache.get_cached("mykey")
... def do_something():
... return "derp"
...
>>> # Outputs 42 as its output is cached for unlimited time.
>>> assert do_something() == 42
>>> cache.set("mykey", 42, timeout=-1)
>>> # Outputs "derp" as the cached value is expired.
>>> assert do_something() == "derp"
>>> # Caching decorator applies to class methods.
>>> cache = cache # IDE code hinting fix.
>>> cache["mykey"] = 42
>>> class MyClass:
... @cache.get_cached("mykey")
... def do_something(self):
... return "derp"
...
>>> assert MyClass().do_something() is 42
Dictionary-like behaviour:
>>> cache = SQLiteCache()
>>> cache["mykey"] = 42
>>> assert cache["mykey"] == cache.get("mykey")
>>> assert "mykey" in cache
>>> del cache["mykey"]
>>> assert "mykey" not in cache
>>> cache["a"] = "herp"
>>> cache["b"] = "derp"
>>> assert len(cache) is 2
"""
def __init__(self, location="", default_timeout=600, autocommit=True):
"""
:param location: database file path for filesystem storage. Empty
string or ":memory:" for in-memory storage
:type location: `str`
:param default_timeout: default cache TTL in seconds
:type default_timeout: `float`
:param autocommit: decides whether database changes should be committed
automatically
:type autocommit: `boolean`
"""
super(SQLiteCache, self).__init__(default_timeout)
assert isinstance(location, basestring)
if not location:
self._location = ":memory:"
else:
self._location = location
self._connection = None
self._autocommit = autocommit
def _get_connection(self):
if self._connection is None:
kwargs = dict(database=self._location, timeout=30)
if self._autocommit:
kwargs.update(dict(isolation_level=None))
# FIXME: Potentially dangerous garbage.
# Not thread-safe.
# See this module's docstring for more information.
kwargs.update(dict(check_same_thread=False))
self._connection = sqlite3.Connection(**kwargs)
self._connection.cursor().execute(SQL_CREATE_TABLE)
return self._connection
def commit(self):
with self._get_connection() as connection:
connection.commit()
def get(self, key):
value = None
with self._get_connection() as connection:
try:
result = connection.cursor().execute(SQL_SELECT,
(key,)).fetchone()
expires = result[1]
if expires >= time() or expires == 0:
value = pickle.loads(str(result[0]))
except TypeError:
pass
return value
def set(self, key, value, timeout=None):
self._prune()
timeout = timeout or self._default_timeout or 0
expires = time() + timeout if timeout > 0 else timeout
# if timeout is None:
# expires = 0
# elif self._default_timeout == 0:
# expires = 0
# elif timeout == 0:
# expires = 0
# else:
# expires = time() + timeout
with self._get_connection() as connection:
value = buffer(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
connection.cursor().execute(SQL_REPLACE, (key, value, expires,))
def add(self, key, value, timeout=None):
self._prune()
timeout = timeout or self._default_timeout or 0
expires = time() + timeout if timeout > 0 else timeout
# if timeout is None:
# expires = 0
# elif self._default_timeout == 0:
# expires = 0
# else:
# expires = time() + timeout
with self._get_connection() as connection:
value = buffer(pickle.dumps(value, pickle.HIGHEST_PROTOCOL))
connection.cursor().execute(SQL_INSERT, (key, value, expires,))
def delete(self, key):
with self._get_connection() as connection:
connection.cursor().execute(SQL_DELETE, (key,))
def clear(self):
with self._get_connection() as connection:
connection.cursor().execute(SQL_CLEAR)
def _prune(self):
"""
>>> from time import sleep
>>> cache = SQLiteCache()
>>> cache.add("mykey", 42, timeout=0.01)
>>> sleep(0.1)
>>> cache._prune()
>>> assert cache.get("mykey") is None
"""
with self._get_connection() as connection:
connection.cursor().execute(SQL_CLEAR_EXPIRED, (time(),))
def __contains__(self, key):
retval = False
with self._get_connection() as connection:
result = connection.cursor().execute(SQL_COUNT, (key,)).fetchone()
count = result[0]
if count == 1:
retval = True
elif count > 1:
raise Exception("Cache contains multiple values with same key")
return retval
def __len__(self):
with self._get_connection() as connection:
result = connection.cursor().execute(SQL_COUNT_ALL).fetchone()
count = result[0]
return count
def __del__(self):
with self._get_connection() as connection:
connection.commit()
connection.cursor().close()
self._connection.close()
class CacheManager(object):
"""
Registry which keeps track of every instantiated cache object.
>>> cm = CacheManager()
>>> cache = cm.get_cache("simple_cache")
>>> same_cache = cm.get_cache("simple_cache")
>>> assert cache is same_cache
"""
_default_backend = SimpleCache
def __init__(self):
self._caches = {}
def get_cache(self, key):
return self._caches.setdefault(key, self._default_backend())
def add_cache(self, key, cacheobj):
assert isinstance(cacheobj, BaseCache)
self._caches.setdefault(key, cacheobj)
def delete_cache(self, key):
self._caches.pop(key, None)
cache_manager = CacheManager()
get_cache = cache_manager.get_cache
add_cache = cache_manager.add_cache
delete_cache = cache_manager.delete_cache
class BaseCacheConfigurator(object):
"""
Base cache configurator class which defines some useful defaults and
methods.
"""
_defaults = {
"backend": "cache.SimpleCache",
# Depending on cache backend, location could be "host:port" pair,
# file path, etc. Defaults to empty string which is ":memory:" for
# cache.SQLiteCache backend.
"location": "",
# Cache time-to-live in seconds, with "0" means no expiration.
"timeout": 0,
}
def __init__(self, config, cache_manager=cache_manager):
self._config = config
self._cache_manager = cache_manager
# self._importer = __import__
self._importer = importlib.import_module
def _resolve(self, s):
"""
Resolve strings to objects using standard import and attribute syntax.
"""
chunks = s.split(".")
used = chunks.pop(0)
try:
found = self._importer(used)
for chunk in chunks:
used += "." + chunk
try:
found = getattr(found, chunk)
except AttributeError:
self._importer(used)
found = getattr(found, chunk)
return found
except ImportError:
error, traceback = sys.exc_info()[1:]
exception = ValueError('Cannot resolve %r: %s' % (s, error))
exception.__cause__, exception.__traceback__ = error, traceback
raise exception
class DictCacheConfigurator(BaseCacheConfigurator):
"""
Configure caching using a dictionary-like object to describe the
configuration.
Example usage:
>>> config = dict(mycache=dict(backend="cache.SQLiteCache"))
>>> dict_config(config)
>>> assert isinstance(get_cache("mycache"), SQLiteCache)
>>> cache = get_cache("other_cache")
>>> assert isinstance(cache, SimpleCache)
>>> cache.set("a", 42)
>>> assert cache.get("a") is 42
"""
def configure(self):
for key, opts in self._config.iteritems():
if not key:
continue
d = self._defaults
backend = self._resolve(str(opts.pop("backend", d.get("backend"))))
timeout = float(opts.pop("timeout", d.get("timeout")))
location = str(opts.pop("location", d.get("location")))
assert not opts, "Unknown options: %s" % ", ".join(opts.keys())
cacheobj = backend(default_timeout=timeout, location=location)
self._cache_manager.add_cache(key, cacheobj)
def dict_config(config):
DictCacheConfigurator(config).configure()
if __name__ == "__main__":
import doctest
doctest.testmod()
| |
"""Test the module nearmiss."""
# Authors: Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# License: MIT
import pytest
import numpy as np
from sklearn.utils._testing import assert_array_equal
from sklearn.neighbors import NearestNeighbors
from imblearn.under_sampling import NearMiss
X = np.array(
[
[1.17737838, -0.2002118],
[0.4960075, 0.86130762],
[-0.05903827, 0.10947647],
[0.91464286, 1.61369212],
[-0.54619583, 1.73009918],
[-0.60413357, 0.24628718],
[0.45713638, 1.31069295],
[-0.04032409, 3.01186964],
[0.03142011, 0.12323596],
[0.50701028, -0.17636928],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[0.99272351, -0.11631728],
[-1.95581933, 0.69609604],
[1.15157493, -1.2981518],
]
)
Y = np.array([1, 2, 1, 0, 2, 1, 2, 2, 1, 2, 0, 0, 2, 1, 2])
VERSION_NEARMISS = (1, 2, 3)
@pytest.mark.parametrize(
"nearmiss_params, err_msg",
[
({"version": 1000}, "must be 1, 2 or 3"),
(
{"version": 1, "n_neighbors": "rnd"},
"n_neighbors must be an interger or an object compatible",
),
(
{
"version": 3,
"n_neighbors": NearestNeighbors(n_neighbors=3),
"n_neighbors_ver3": "rnd",
},
"n_neighbors_ver3 must be an interger or an object compatible",
),
],
)
def test_nearmiss_error(nearmiss_params, err_msg):
nm = NearMiss(**nearmiss_params)
with pytest.raises(ValueError, match=err_msg):
nm.fit_resample(X, Y)
def test_nm_fit_resample_auto():
sampling_strategy = "auto"
X_gt = [
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
]
),
]
y_gt = [
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
]
for version_idx, version in enumerate(VERSION_NEARMISS):
nm = NearMiss(sampling_strategy=sampling_strategy, version=version)
X_resampled, y_resampled = nm.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt[version_idx])
assert_array_equal(y_resampled, y_gt[version_idx])
def test_nm_fit_resample_float_sampling_strategy():
sampling_strategy = {0: 3, 1: 4, 2: 4}
X_gt = [
np.array(
[
[-0.20497017, -0.26630228],
[-0.80809175, -1.09917302],
[0.91464286, 1.61369212],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[1.17737838, -0.2002118],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
[0.99272351, -0.11631728],
]
),
np.array(
[
[-0.20497017, -0.26630228],
[-0.80809175, -1.09917302],
[0.91464286, 1.61369212],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[1.17737838, -0.2002118],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
[0.99272351, -0.11631728],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[-0.05903827, 0.10947647],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
[0.45713638, 1.31069295],
]
),
]
y_gt = [
np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2]),
]
for version_idx, version in enumerate(VERSION_NEARMISS):
nm = NearMiss(sampling_strategy=sampling_strategy, version=version)
X_resampled, y_resampled = nm.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt[version_idx])
assert_array_equal(y_resampled, y_gt[version_idx])
def test_nm_fit_resample_nn_obj():
sampling_strategy = "auto"
nn = NearestNeighbors(n_neighbors=3)
X_gt = [
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[-0.05903827, 0.10947647],
[0.03142011, 0.12323596],
[-0.60413357, 0.24628718],
[0.50701028, -0.17636928],
[0.4960075, 0.86130762],
[0.45713638, 1.31069295],
]
),
np.array(
[
[0.91464286, 1.61369212],
[-0.80809175, -1.09917302],
[-0.20497017, -0.26630228],
[1.17737838, -0.2002118],
[-0.60413357, 0.24628718],
[0.03142011, 0.12323596],
[1.15157493, -1.2981518],
[-0.54619583, 1.73009918],
[0.99272351, -0.11631728],
]
),
]
y_gt = [
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2]),
]
for version_idx, version in enumerate(VERSION_NEARMISS):
nm = NearMiss(
sampling_strategy=sampling_strategy,
version=version,
n_neighbors=nn,
)
X_resampled, y_resampled = nm.fit_resample(X, Y)
assert_array_equal(X_resampled, X_gt[version_idx])
assert_array_equal(y_resampled, y_gt[version_idx])
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for miscellaneous utility functions."""
import functools
import httplib
import pickle
import re
from googlecloudsdk.core import log
from googlecloudsdk.third_party.apis.iam.v1 import iam_v1_messages as msgs
from googlecloudsdk.third_party.apitools.base.py import exceptions
CREATE_KEY_TYPES = (msgs.CreateServiceAccountKeyRequest
.PrivateKeyTypeValueValuesEnum)
KEY_TYPES = (msgs.ServiceAccountKey.PrivateKeyTypeValueValuesEnum)
MANAGED_BY = (msgs.IamProjectsServiceAccountsKeysListRequest
.KeyTypesValueValuesEnum)
def ManagedByFromString(managed_by):
"""Parses a string into a MANAGED_BY enum.
MANAGED_BY is an enum of who manages a service account key resource. IAM
will rotate any SYSTEM_MANAGED keys by default.
Args:
managed_by: A string representation of a MANAGED_BY. Can be one of *user*,
*system* or *any*.
Returns:
A KeyTypeValueValuesEnum (MANAGED_BY) value.
"""
if managed_by == 'user':
return [MANAGED_BY.USER_MANAGED]
elif managed_by == 'system':
return [MANAGED_BY.SYSTEM_MANAGED]
elif managed_by == 'any':
return []
else:
return [MANAGED_BY.KEY_TYPE_UNSPECIFIED]
def KeyTypeFromString(key_str):
"""Parses a string into a KeyType enum.
Args:
key_str: A string representation of a KeyType. Can be either *p12* or
*json*.
Returns:
A PrivateKeyTypeValueValuesEnum value.
"""
if key_str == 'p12':
return KEY_TYPES.TYPE_PKCS12_FILE
elif key_str == 'json':
return KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return KEY_TYPES.TYPE_UNSPECIFIED
def KeyTypeToString(key_type):
"""Get a string version of a KeyType enum.
Args:
key_type: An enum of either KEY_TYPES or CREATE_KEY_TYPES.
Returns:
The string representation of the key_type, such that
parseKeyType(keyTypeToString(x)) is a no-op.
"""
if (key_type == KEY_TYPES.TYPE_PKCS12_FILE or
key_type == CREATE_KEY_TYPES.TYPE_PKCS12_FILE):
return 'p12'
elif (key_type == KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE or
key_type == CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE):
return 'json'
else:
return 'unspecified'
def KeyTypeToCreateKeyType(key_type):
"""Transforms between instances of KeyType enums.
Transforms KeyTypes into CreateKeyTypes.
Args:
key_type: A ServiceAccountKey.PrivateKeyTypeValueValuesEnum value.
Returns:
A IamProjectsServiceAccountKeysCreateRequest.PrivateKeyTypeValueValuesEnum
value.
"""
# For some stupid reason, HTTP requests generates different enum types for
# each instance of an enum in the proto buffer. What's worse is that they're
# not equal to one another.
if key_type == KEY_TYPES.TYPE_PKCS12_FILE:
return CREATE_KEY_TYPES.TYPE_PKCS12_FILE
elif key_type == KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE:
return CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return CREATE_KEY_TYPES.TYPE_UNSPECIFIED
def KeyTypeFromCreateKeyType(key_type):
"""The inverse of *toCreateKeyType*."""
if key_type == CREATE_KEY_TYPES.TYPE_PKCS12_FILE:
return KEY_TYPES.TYPE_PKCS12_FILE
elif key_type == CREATE_KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE:
return KEY_TYPES.TYPE_GOOGLE_CREDENTIALS_FILE
else:
return KEY_TYPES.TYPE_UNSPECIFIED
def CatchServiceAccountErrors(func):
"""Decorator to automatically manage HTTP errors related to api calls."""
@functools.wraps(func)
# pylint:disable=invalid-name
def wrapper(*args, **kwargs):
self = args[0]
try:
return func(*args, **kwargs)
except exceptions.HttpError as error:
error_msg = None
if error.status_code == httplib.NOT_FOUND:
error_msg = 'Not found'
elif error.status_code == httplib.FORBIDDEN:
error_msg = 'Permission denied'
if error_msg:
if self.key_id:
log.error('{0}: key [{1}] for service account [{2}]'.format(
error_msg, self.key_id, self.address))
else:
log.error(
'{0}: service account [{1}]'.format(error_msg, self.address))
return
raise
return CatchHttpErrors(wrapper)
def CatchHttpErrors(func):
"""Decorator to gracefully quit when any unhandled HTTP error occurs."""
@functools.wraps(func)
# pylint:disable=invalid-name
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.HttpError as error:
log.error('Error:\n' + error.content)
raise
return wrapper
def ValidateEmail(email):
"""Super basic, ultra-permissive validator for emails."""
# EMails have a . somewhere after a @
return re.match(r'[^@]+@[^.]+\..+', email)
def ValidateKeyId(key_id):
"""Ensures a key id is well structured."""
# Keys are hexadecimal
return re.match(r'[a-z0-9]+', key_id)
def ValidateAccountId(account_id):
"""Ensures an account id is well structured."""
if len(account_id) > 63:
return False
# This regex is from the protobuffer
return re.match(r'[a-z]([-a-z0-9]*[a-z0-9])', account_id)
def ProjectToProjectResourceName(project):
"""Turns a project id into a project resource name."""
return 'projects/{0}'.format(project)
def EmailToAccountResourceName(email):
"""Turns an email into a service account resource name."""
return 'projects/-/serviceAccounts/{0}'.format(email)
def EmailAndKeyToResourceName(email, key):
"""Turns an email and key id into a key resource name."""
return 'projects/-/serviceAccounts/{0}/keys/{1}'.format(email, key)
def GetKeyIdFromResourceName(name):
"""Gets the key id from a resource name. No validation is done."""
return name.split('/')[5]
def DeepCopy(obj):
return pickle.loads(pickle.dumps(obj))
| |
"""Test the Tradfri config flow."""
from unittest.mock import patch
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tradfri import config_flow
from tests.common import MockConfigEntry, mock_coro
@pytest.fixture
def mock_auth():
"""Mock authenticate."""
with patch(
"homeassistant.components.tradfri.config_flow." "authenticate"
) as mock_auth:
yield mock_auth
@pytest.fixture
def mock_entry_setup():
"""Mock entry setup."""
with patch("homeassistant.components.tradfri." "async_setup_entry") as mock_setup:
mock_setup.return_value = mock_coro(True)
yield mock_setup
async def test_user_connection_successful(hass, mock_auth, mock_entry_setup):
"""Test a successful connection."""
mock_auth.side_effect = lambda hass, host, code: mock_coro(
{"host": host, "gateway_id": "bla"}
)
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "123.123.123.123", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"import_groups": False,
}
async def test_user_connection_timeout(hass, mock_auth, mock_entry_setup):
"""Test a connection timeout."""
mock_auth.side_effect = config_flow.AuthError("timeout")
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "127.0.0.1", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 0
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "timeout"}
async def test_user_connection_bad_key(hass, mock_auth, mock_entry_setup):
"""Test a connection with bad key."""
mock_auth.side_effect = config_flow.AuthError("invalid_security_code")
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "127.0.0.1", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 0
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"security_code": "invalid_security_code"}
async def test_discovery_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via discovery."""
mock_auth.side_effect = lambda hass, host, code: mock_coro(
{"host": host, "gateway_id": "bla"}
)
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "zeroconf"}, data={"host": "123.123.123.123"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"import_groups": False,
}
async def test_import_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via import."""
mock_auth.side_effect = lambda hass, host, code: mock_coro(
{"host": host, "gateway_id": "bla", "identity": "mock-iden", "key": "mock-key"}
)
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "import_groups": True},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
"import_groups": True,
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_no_groups(hass, mock_auth, mock_entry_setup):
"""Test a connection via import and no groups allowed."""
mock_auth.side_effect = lambda hass, host, code: mock_coro(
{"host": host, "gateway_id": "bla", "identity": "mock-iden", "key": "mock-key"}
)
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "import_groups": False},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
"import_groups": False,
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy(hass, mock_gateway_info, mock_entry_setup):
"""Test a connection via import."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: mock_coro(
{"host": host, "identity": identity, "key": key, "gateway_id": "mock-gateway"}
)
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "key": "mock-key", "import_groups": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "mock-gateway",
"identity": "homeassistant",
"key": "mock-key",
"import_groups": True,
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy_no_groups(
hass, mock_gateway_info, mock_entry_setup
):
"""Test a connection via legacy import and no groups allowed."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: mock_coro(
{"host": host, "identity": identity, "key": key, "gateway_id": "mock-gateway"}
)
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "key": "mock-key", "import_groups": False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "mock-gateway",
"identity": "homeassistant",
"key": "mock-key",
"import_groups": False,
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_discovery_duplicate_aborted(hass):
"""Test a duplicate discovery host is ignored."""
MockConfigEntry(domain="tradfri", data={"host": "some-host"}).add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "zeroconf"}, data={"host": "some-host"}
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
async def test_import_duplicate_aborted(hass):
"""Test a duplicate import host is ignored."""
MockConfigEntry(domain="tradfri", data={"host": "some-host"}).add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "import"}, data={"host": "some-host"}
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
async def test_duplicate_discovery(hass, mock_auth, mock_entry_setup):
"""Test a duplicate discovery in progress is ignored."""
result = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "zeroconf"}, data={"host": "123.123.123.123"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "zeroconf"}, data={"host": "123.123.123.123"}
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
| |
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import os
import shutil
import tarfile
import tempfile
from pkg_resources import parse_version
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from bloom.config import BLOOM_CONFIG_BRANCH
from bloom.git import branch_exists
from bloom.git import create_branch
from bloom.git import create_tag
from bloom.git import delete_remote_tag
from bloom.git import delete_tag
from bloom.git import ensure_clean_working_env
from bloom.git import ensure_git_root
from bloom.git import get_last_tag_by_version
from bloom.git import GitClone
from bloom.git import has_changes
from bloom.git import inbranch
from bloom.git import ls_tree
from bloom.git import show
from bloom.git import tag_exists
from bloom.git import track_branches
from bloom.logging import debug
from bloom.logging import error
from bloom.logging import fmt
from bloom.logging import info
from bloom.logging import warning
from bloom.packages import get_package_data
from bloom.util import add_global_arguments
from bloom.util import execute_command
from bloom.util import get_git_clone_state
from bloom.util import handle_global_arguments
from bloom.util import load_url_to_file_handle
def version_check(version):
last_tag = get_last_tag_by_version()
if not last_tag:
return
last_tag_version = last_tag.split('/')[-1]
info(fmt("The latest upstream tag in the release repository is '@!{0}@|'."
.format(last_tag)))
# Ensure the new version is greater than the last tag
if parse_version(version) < parse_version(last_tag_version):
warning("""\
Version discrepancy:
The upstream version '{0}' isn't newer than upstream version '{1}'.
""".format(version, last_tag_version))
def import_tarball(tarball_path, target_branch, version, name):
if tarball_path.endswith('.zip'):
error("Zip archives are not yet supported.", exit=True)
# Create the tarfile handle
targz = tarfile.open(tarball_path, 'r:gz')
with inbranch(target_branch):
# Prepare list of members to extract, ignoring some
ignores = ('.git', '.gitignore', '.svn', '.hgignore', '.hg', 'CVS')
members = targz.getmembers()
members = [m for m in members if m.name.split('/')[-1] not in ignores]
# Clear out the local branch
items = []
for item in os.listdir(os.getcwd()):
if item in ['.git', '..', '.']:
continue
items.append(item)
if len(items) > 0:
execute_command('git rm -rf ' + ' '.join(['"%s"' % i for i in items if i]))
# Clear out any untracked files
execute_command('git clean -fdx')
# Extract the tarball into the clean branch
targz.extractall(os.getcwd(), members)
# Check for folder nesting (mostly hg)
items = []
for item in os.listdir(os.getcwd()):
if not item.startswith('.'):
items.append(item)
tarball_prefix = os.path.basename(tarball_path)[:-len('.tag.gz')]
if [tarball_prefix] == items:
debug('Removing nested tarball folder: ' + str(tarball_prefix))
tarball_prefix_path = os.path.join(os.getcwd(), tarball_prefix)
for item in os.listdir(tarball_prefix_path):
if item in ['.', '..']:
continue
item_path = os.path.join(os.getcwd(), tarball_prefix, item)
debug(
'moving ' + str(item_path) + ' to ' +
str(os.path.join(os.getcwd(), item))
)
shutil.move(item_path, os.path.join(os.getcwd(), item))
shutil.rmtree(tarball_prefix_path)
else:
debug('No nested tarball folder found.')
# Commit changes to the repository
items = []
for item in os.listdir(os.getcwd()):
if item in ['.git', '..', '.']:
continue
items.append(item)
if len(items) > 0:
execute_command('git add ' + ' '.join(['"%s"' % i for i in items if i]))
# Remove any straggling untracked files
execute_command('git clean -dXf')
# Only if we have local changes commit
# (not true if the upstream didn't change any files)
if has_changes():
msg = "Imported upstream version '{0}' of '{1}'"
msg = msg.format(version, name or 'upstream')
cmd = 'git commit -m "{0}"'.format(msg)
execute_command(cmd)
# with inbranch(target_branch):
def handle_tree(tree, directory, root_path, version):
for path, kind in tree.items():
if kind == 'directory':
# Path relative to start path
rel_path = os.path.join(directory, path)
# If it is a file, error
if os.path.isfile(rel_path):
error("In patches path '{0}' is a directory".format(rel_path) +
", but it exists in the upstream branch as a file.",
exit=True)
# If it is not already a directory, create it
if not os.path.isdir(rel_path):
info(" Createing directory... '{0}'".format(rel_path))
os.mkdir(rel_path)
# Recurse on the directory
handle_tree(ls_tree(BLOOM_CONFIG_BRANCH, os.path.join(root_path, rel_path)),
rel_path, root_path, version)
if kind == 'file':
# Path relative to start path
rel_path = os.path.join(directory, path)
# If the local version is a directory, error
if os.path.isdir(rel_path):
error("In patches path '{0}' is a file, ".format(rel_path) +
"but it exists in the upstream branch as a directory.",
exit=True)
# If the file already exists, warn
if os.path.isfile(rel_path):
warning(" File '{0}' already exists, overwriting..."
.format(rel_path))
execute_command('git rm {0}'.format(rel_path), shell=True)
# If package.xml tempalte in version, else grab data
if path in ['stack.xml']:
warning(" Skipping '{0}' templating, fuerte not supported"
.format(rel_path))
if path in ['package.xml']:
info(" Templating '{0}' into upstream branch..."
.format(rel_path))
file_data = show(BLOOM_CONFIG_BRANCH, os.path.join(root_path, rel_path))
file_data = file_data.replace(':{version}', version)
else:
info(" Overlaying '{0}' into upstream branch..."
.format(rel_path))
file_data = show(BLOOM_CONFIG_BRANCH, os.path.join(root_path, rel_path))
# Write file
with open(rel_path, 'wb') as f:
f.write(file_data)
# Add it with git
execute_command('git add {0}'.format(rel_path), shell=True)
def import_patches(patches_path, patches_path_dict, target_branch, version):
info("Overlaying files from patched folder '{0}' on the '{2}' branch into the '{1}' branch..."
.format(patches_path, target_branch, BLOOM_CONFIG_BRANCH))
with inbranch(target_branch):
handle_tree(patches_path_dict, '', patches_path, version)
cmd = ('git commit --allow-empty -m "Overlaid patches from \'{0}\'"'
.format(patches_path))
execute_command(cmd, shell=True)
def import_upstream(tarball_path, patches_path, version, name, replace):
# Check for a url and download it
url = urlparse(tarball_path)
if url.scheme: # Some scheme like http, https, or file...
tmp_dir = tempfile.mkdtemp()
try:
info("Fetching file from url: '{0}'".format(tarball_path))
req = load_url_to_file_handle(tarball_path)
tarball_path = os.path.join(tmp_dir, os.path.basename(url.path))
with open(tarball_path, 'wb') as f:
chunk_size = 16 * 1024
while True:
chunk = req.read(chunk_size)
if not chunk:
break
f.write(chunk)
return import_upstream(tarball_path, patches_path, version, name, replace)
finally:
shutil.rmtree(tmp_dir)
# If there is not tarball at the given path, fail
if not os.path.exists(tarball_path):
error("Specified archive does not exists: '{0}'".format(tarball_path),
exit=True)
# If either version or name are not provided, guess from archive name
if not version or not name:
# Parse tarball name
tarball_file = os.path.basename(tarball_path)
ending = None
if tarball_file.endswith('.tar.gz'):
ending = '.tar.gz'
elif tarball_file.endswith('.zip'):
ending = '.zip'
else:
error("Cannot detect type of archive: '{0}'"
.format(tarball_file), exit=True)
tarball_file = tarball_file[:-len(ending)]
split_tarball_file = tarball_file.split('-')
if len(split_tarball_file) < 2 and not version or len(split_tarball_file) < 1:
error("Cannot detect name and/or version from archive: '{0}'"
.format(tarball_file), exit=True)
if not name and len(split_tarball_file) == 1:
name = split_tarball_file[0]
elif not name and len(split_tarball_file) == 1:
name = '-'.join(split_tarball_file[:-1])
if not version and len(split_tarball_file) < 2:
error("Cannot detect version from archive: '{0}'"
.format(tarball_file) + " and the version was not spcified.",
exit=True)
version = version if version else split_tarball_file[-1]
# Check if the patches_path (if given) exists
patches_path_dict = None
if patches_path:
patches_path_dict = ls_tree(BLOOM_CONFIG_BRANCH, patches_path)
if not patches_path_dict:
error("Given patches path '{0}' does not exist in bloom branch."
.format(patches_path), exit=True)
# Do version checking
version_check(version)
# Check for existing tags
upstream_tag = 'upstream/{0}'.format(version)
if tag_exists(upstream_tag):
if not replace:
error("Tag '{0}' already exists, use --replace to override it."
.format(upstream_tag), exit=True)
warning("Removing tag: '{0}'".format(upstream_tag))
delete_tag(upstream_tag)
if not get_git_clone_state():
delete_remote_tag(upstream_tag)
name_tag = '{0}/{1}'.format(name or 'upstream', version)
if name_tag != upstream_tag and tag_exists(name_tag):
if not replace:
error("Tag '{0}' already exists, use --replace to override it."
.format(name_tag), exit=True)
warning("Removing tag: '{0}'".format(name_tag))
delete_tag(name_tag)
if not get_git_clone_state():
delete_remote_tag(name_tag)
# If there is not upstream branch, create one
if not branch_exists('upstream'):
info("Creating upstream branch.")
create_branch('upstream', orphaned=True)
else:
track_branches(['upstream'])
# Import the given tarball
info("Importing archive into upstream branch...")
import_tarball(tarball_path, 'upstream', version, name)
# Handle patches_path
if patches_path:
import_patches(patches_path, patches_path_dict, 'upstream', version)
# Create tags
with inbranch('upstream'):
# Assert packages in upstream are the correct version
_, actual_version, _ = get_package_data('upstream')
if actual_version != version:
error("The package(s) in upstream are version '{0}', but the version to be released is '{1}', aborting."
.format(actual_version, version), exit=True)
# Create the tag
info("Creating tag: '{0}'".format(upstream_tag))
create_tag(upstream_tag)
if name_tag != upstream_tag:
info("Creating tag: '{0}'".format(name_tag))
create_tag(name_tag)
def get_argument_parser():
parser = argparse.ArgumentParser(description="""\
Imports a given archive into the release repository's upstream branch.
The upstream is cleared of all files, then the archive is extracted
into the upstream branch. If a patches_path is given then the contents
of that folder are overlaid onto the upstream branch, and any
package.xml files are templated on the version. The patches_path must
exist in the bloom branch of the local repository. Then the
'upstream-<version>' tag is created. If a repository name is given
(or guessed from the archive), a '<name>-<version>' tag is also created.
This command must be run in a clean git environment, i.e. no untracked
or uncommitted local changes.
""")
add = parser.add_argument
add('archive_path', help="path or url to the archive to be imported")
add('patches_path', nargs='?', default='',
help="relative path in the '{0}' branch to a folder to be"
.format(BLOOM_CONFIG_BRANCH) +
" overlaid after import of upstream sources (optional)")
add('-v', '--release-version',
help="version being imported (defaults to guessing from archive name)")
add('-n', '--name',
help="name of the repository being imported "
"(defaults to guessing from archive name)")
add('-r', '--replace', action="store_true",
help="""\
allows replacement of an existing upstream import of the same version
""")
return parser
def main(sysargs=None):
from bloom.config import upconvert_bloom_to_config_branch
upconvert_bloom_to_config_branch()
parser = get_argument_parser()
parser = add_global_arguments(parser)
args = parser.parse_args(sysargs)
handle_global_arguments(args)
# Check that the current directory is a serviceable git/bloom repo
try:
ensure_clean_working_env()
ensure_git_root()
except SystemExit:
parser.print_usage()
raise
git_clone = GitClone()
with git_clone:
import_upstream(
args.archive_path,
args.patches_path,
args.release_version,
args.name,
args.replace)
git_clone.commit()
info("I'm happy. You should be too.")
| |
from Child import Child
from Node import Node # noqa: I201
STMT_NODES = [
# continue-stmt -> 'continue' label? ';'?
Node('ContinueStmt', kind='Stmt',
children=[
Child('ContinueKeyword', kind='ContinueToken'),
Child('Label', kind='IdentifierToken',
is_optional=True),
]),
# while-stmt -> label? ':'? 'while' condition-list code-block ';'?
Node('WhileStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('WhileKeyword', kind='WhileToken'),
Child('Conditions', kind='ConditionElementList'),
Child('Body', kind='CodeBlock'),
]),
# defer-stmt -> 'defer' code-block ';'?
Node('DeferStmt', kind='Stmt',
traits=['WithCodeBlock'],
children=[
Child('DeferKeyword', kind='DeferToken'),
Child('Body', kind='CodeBlock'),
]),
# expr-stmt -> expression ';'?
Node('ExpressionStmt', kind='Stmt',
children=[
Child('Expression', kind='Expr'),
]),
# switch-case-list -> switch-case switch-case-list?
Node('SwitchCaseList', kind='SyntaxCollection',
element='Syntax',
element_choices=['SwitchCase', 'IfConfigDecl']),
# repeat-while-stmt -> label? ':'? 'repeat' code-block 'while' expr ';'?
Node('RepeatWhileStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('RepeatKeyword', kind='RepeatToken'),
Child('Body', kind='CodeBlock'),
Child('WhileKeyword', kind='WhileToken'),
Child('Condition', kind='Expr'),
]),
# guard-stmt -> 'guard' condition-list 'else' code-block ';'?
Node('GuardStmt', kind='Stmt',
traits=['WithCodeBlock'],
children=[
Child('GuardKeyword', kind='GuardToken'),
Child('Conditions', kind='ConditionElementList'),
Child('ElseKeyword', kind='ElseToken'),
Child('Body', kind='CodeBlock'),
]),
Node('WhereClause', kind='Syntax',
children=[
Child('WhereKeyword', kind='WhereToken'),
Child('GuardResult', kind='Expr'),
]),
# for-in-stmt -> label? ':'? 'for' 'case'? pattern 'in' expr 'where'?
# expr code-block ';'?
Node('ForInStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('ForKeyword', kind='ForToken'),
Child('CaseKeyword', kind='CaseToken',
is_optional=True),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('InKeyword', kind='InToken'),
Child('SequenceExpr', kind='Expr'),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('Body', kind='CodeBlock'),
]),
# switch-stmt -> identifier? ':'? 'switch' expr '{'
# switch-case-list '}' ';'?
Node('SwitchStmt', kind='Stmt',
traits=['Braced', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('SwitchKeyword', kind='SwitchToken'),
Child('Expression', kind='Expr'),
Child('LeftBrace', kind='LeftBraceToken'),
Child('Cases', kind='SwitchCaseList'),
Child('RightBrace', kind='RightBraceToken'),
]),
# catch-clause-list -> catch-clause catch-clause-list?
Node('CatchClauseList', kind='SyntaxCollection',
element='CatchClause'),
# do-stmt -> identifier? ':'? 'do' code-block catch-clause-list ';'?
Node('DoStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('DoKeyword', kind='DoToken'),
Child('Body', kind='CodeBlock'),
Child('CatchClauses', kind='CatchClauseList',
is_optional=True),
]),
# return-stmt -> 'return' expr? ';'?
Node('ReturnStmt', kind='Stmt',
children=[
Child('ReturnKeyword', kind='ReturnToken'),
Child('Expression', kind='Expr',
is_optional=True),
]),
# yield-stmt -> 'yield' '('? expr-list? ')'?
Node('YieldStmt', kind='Stmt',
children=[
Child('YieldKeyword', kind='YieldToken'),
Child('Yields', kind='Syntax',
node_choices=[
Child('YieldList', kind='YieldList'),
Child('SimpleYield', kind='Expr'),
]),
]),
Node('YieldList', kind='Syntax',
children=[
Child('LeftParen', kind='LeftParenToken'),
Child('ElementList', kind='ExprList'),
Child('TrailingComma', kind='CommaToken', is_optional=True),
Child('RightParen', kind='RightParenToken'),
]),
# fallthrough-stmt -> 'fallthrough' ';'?
Node('FallthroughStmt', kind='Stmt',
children=[
Child('FallthroughKeyword', kind='FallthroughToken'),
]),
# break-stmt -> 'break' identifier? ';'?
Node('BreakStmt', kind='Stmt',
children=[
Child('BreakKeyword', kind='BreakToken'),
Child('Label', kind='IdentifierToken',
is_optional=True),
]),
# case-item-list -> case-item case-item-list?
Node('CaseItemList', kind='SyntaxCollection',
element='CaseItem'),
# condition -> expression
# | availability-condition
# | case-condition
# | optional-binding-condition
Node('ConditionElement', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Condition', kind='Syntax',
node_choices=[
Child('Expression', kind='Expr'),
Child('Availablity', kind='AvailabilityCondition'),
Child('MatchingPattern',
kind='MatchingPatternCondition'),
Child('OptionalBinding',
kind='OptionalBindingCondition'),
]),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# availability-condition -> '#available' '(' availability-spec ')'
Node('AvailabilityCondition', kind='Syntax',
children=[
Child('PoundAvailableKeyword', kind='PoundAvailableToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('AvailabilitySpec', kind='AvailabilitySpecList'),
Child('RightParen', kind='RightParenToken'),
]),
Node('MatchingPatternCondition', kind='Syntax',
children=[
Child('CaseKeyword', kind='CaseToken'),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('Initializer', kind='InitializerClause'),
]),
Node('OptionalBindingCondition', kind='Syntax',
children=[
Child('LetOrVarKeyword', kind='Token',
token_choices=[
'LetToken', 'VarToken',
]),
Child('Pattern', kind='Pattern'),
Child('TypeAnnotation', kind='TypeAnnotation',
is_optional=True),
Child('Initializer', kind='InitializerClause'),
]),
# condition-list -> condition
# | condition ','? condition-list
Node('ConditionElementList', kind='SyntaxCollection',
element='ConditionElement'),
# A declaration in statement position.
# struct Foo {};
Node('DeclarationStmt', kind='Stmt',
children=[
Child('Declaration', kind='Decl'),
]),
# throw-stmt -> 'throw' expr ';'?
Node('ThrowStmt', kind='Stmt',
children=[
Child('ThrowKeyword', kind='ThrowToken'),
Child('Expression', kind='Expr'),
]),
# if-stmt -> identifier? ':'? 'if' condition-list code-block
# else-clause ';'?
Node('IfStmt', kind='Stmt',
traits=['WithCodeBlock', 'Labeled'],
children=[
Child('LabelName', kind='IdentifierToken',
is_optional=True),
Child('LabelColon', kind='ColonToken',
is_optional=True),
Child('IfKeyword', kind='IfToken'),
Child('Conditions', kind='ConditionElementList'),
Child('Body', kind='CodeBlock'),
Child('ElseKeyword', kind='ElseToken',
is_optional=True),
Child('ElseBody', kind='Syntax',
node_choices=[
Child('IfStmt', kind='IfStmt'),
Child('CodeBlock', kind='CodeBlock'),
],
is_optional=True),
]),
# else-if-continuation -> label? ':'? 'while' condition-list code-block ';'
Node('ElseIfContinuation', kind='Syntax',
children=[
Child('IfStatement', kind='IfStmt'),
]),
# else-clause -> 'else' code-block
Node('ElseBlock', kind='Syntax',
traits=['WithCodeBlock'],
children=[
Child('ElseKeyword', kind='ElseToken'),
Child('Body', kind='CodeBlock'),
]),
# switch-case -> unknown-attr? switch-case-label stmt-list
# | unknown-attr? switch-default-label stmt-list
Node('SwitchCase', kind='Syntax',
traits=['WithStatements'],
children=[
Child('UnknownAttr', kind='Attribute', is_optional=True),
Child('Label', kind='Syntax',
node_choices=[
Child('Default', kind='SwitchDefaultLabel'),
Child('Case', kind='SwitchCaseLabel'),
]),
Child('Statements', kind='CodeBlockItemList'),
]),
# switch-default-label -> 'default' ':'
Node('SwitchDefaultLabel', kind='Syntax',
children=[
Child('DefaultKeyword', kind='DefaultToken'),
Child('Colon', kind='ColonToken'),
]),
# case-item -> pattern where-clause? ','?
Node('CaseItem', kind='Syntax',
traits=['WithTrailingComma'],
children=[
Child('Pattern', kind='Pattern'),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# switch-case-label -> 'case' case-item-list ':'
Node('SwitchCaseLabel', kind='Syntax',
children=[
Child('CaseKeyword', kind='CaseToken'),
Child('CaseItems', kind='CaseItemList'),
Child('Colon', kind='ColonToken'),
]),
# catch-clause 'catch' pattern? where-clause? code-block
Node('CatchClause', kind='Syntax',
children=[
Child('CatchKeyword', kind='CatchToken'),
Child('Pattern', kind='Pattern',
is_optional=True),
Child('WhereClause', kind='WhereClause',
is_optional=True),
Child('Body', kind='CodeBlock'),
]),
# e.g. #assert(1 == 2)
Node('PoundAssertStmt', kind='Stmt',
children=[
Child('PoundAssert', kind='PoundAssertToken'),
Child('LeftParen', kind='LeftParenToken'),
Child('Condition', kind='Expr',
description='The assertion condition.'),
Child('Comma', kind='CommaToken', is_optional=True,
description='The comma after the assertion condition.'),
Child('Message', kind='StringLiteralToken', is_optional=True,
description='The assertion message.'),
Child('RightParen', kind='RightParenToken'),
]),
]
| |
"""
missing types & inference
"""
from functools import partial
import numpy as np
from pandas._config import get_option
from pandas._libs import lib
import pandas._libs.missing as libmissing
from pandas._libs.tslibs import NaT, iNaT
from pandas._typing import DtypeObj
from pandas.core.dtypes.common import (
DT64NS_DTYPE,
TD64NS_DTYPE,
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_complex_dtype,
is_datetimelike_v_numeric,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_integer_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
is_string_like_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCExtensionArray,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.inference import is_list_like
isposinf_scalar = libmissing.isposinf_scalar
isneginf_scalar = libmissing.isneginf_scalar
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
notna : Boolean inverse of pandas.isna.
Series.isna : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.isna('dog')
False
>>> pd.isna(pd.NA)
True
>>> pd.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.isna(array)
array([[False, True, False],
[False, False, True]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.isna(index)
array([False, False, True, False])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.isna(df)
0 1 2
0 False False False
1 False True False
>>> pd.isna(df[1])
0 False
1 True
Name: 1, dtype: bool
"""
return _isna(obj)
isnull = isna
def _isna(obj, inf_as_na: bool = False):
"""
Detect missing values, treating None, NaN or NA as null. Infinite
values will also be treated as null if inf_as_na is True.
Parameters
----------
obj: ndarray or object value
Input array or scalar value.
inf_as_na: bool
Whether to treat infinity as null.
Returns
-------
boolean ndarray or boolean
"""
if is_scalar(obj):
if inf_as_na:
return libmissing.checknull_old(obj)
else:
return libmissing.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, ABCMultiIndex):
raise NotImplementedError("isna is not defined for MultiIndex")
elif isinstance(obj, type):
return False
elif isinstance(obj, (ABCSeries, np.ndarray, ABCIndexClass, ABCExtensionArray)):
return _isna_ndarraylike(obj, inf_as_na=inf_as_na)
elif isinstance(obj, ABCDataFrame):
return obj.isna()
elif isinstance(obj, list):
return _isna_ndarraylike(np.asarray(obj, dtype=object), inf_as_na=inf_as_na)
elif hasattr(obj, "__array__"):
return _isna_ndarraylike(np.asarray(obj), inf_as_na=inf_as_na)
else:
return False
def _use_inf_as_na(key):
"""
Option change callback for na/inf behaviour.
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* https://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
inf_as_na = get_option(key)
globals()["_isna"] = partial(_isna, inf_as_na=inf_as_na)
def _isna_ndarraylike(obj, inf_as_na: bool = False):
"""
Return an array indicating which values of the input array are NaN / NA.
Parameters
----------
obj: array-like
The input array whose elements are to be checked.
inf_as_na: bool
Whether or not to treat infinite values as NA.
Returns
-------
array-like
Array of boolean values denoting the NA status of each element.
"""
values = getattr(obj, "_values", obj)
dtype = values.dtype
if is_extension_array_dtype(dtype):
if inf_as_na and is_categorical_dtype(dtype):
result = libmissing.isnaobj_old(values.to_numpy())
else:
result = values.isna()
elif is_string_dtype(dtype):
result = _isna_string_dtype(values, dtype, inf_as_na=inf_as_na)
elif needs_i8_conversion(dtype):
# this is the NaT pattern
result = values.view("i8") == iNaT
else:
if inf_as_na:
result = ~np.isfinite(values)
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
result = obj._constructor(result, index=obj.index, name=obj.name, copy=False)
return result
def _isna_string_dtype(
values: np.ndarray, dtype: np.dtype, inf_as_na: bool
) -> np.ndarray:
# Working around NumPy ticket 1542
shape = values.shape
if is_string_like_dtype(dtype):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
if inf_as_na:
vec = libmissing.isnaobj_old(values.ravel())
else:
vec = libmissing.isnaobj(values.ravel())
result[...] = vec.reshape(shape)
return result
def notna(obj):
"""
Detect non-missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are valid (not missing, which is ``NaN`` in numeric
arrays, ``None`` or ``NaN`` in object arrays, ``NaT`` in datetimelike).
Parameters
----------
obj : array-like or object value
Object to check for *not* null or *non*-missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is valid.
See Also
--------
isna : Boolean inverse of pandas.notna.
Series.notna : Detect valid values in a Series.
DataFrame.notna : Detect valid values in a DataFrame.
Index.notna : Detect valid values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> pd.notna('dog')
True
>>> pd.notna(pd.NA)
False
>>> pd.notna(np.nan)
False
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> pd.notna(array)
array([[ True, False, True],
[ True, True, False]])
For indexes, an ndarray of booleans is returned.
>>> index = pd.DatetimeIndex(["2017-07-05", "2017-07-06", None,
... "2017-07-08"])
>>> index
DatetimeIndex(['2017-07-05', '2017-07-06', 'NaT', '2017-07-08'],
dtype='datetime64[ns]', freq=None)
>>> pd.notna(index)
array([ True, True, False, True])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = pd.DataFrame([['ant', 'bee', 'cat'], ['dog', None, 'fly']])
>>> df
0 1 2
0 ant bee cat
1 dog None fly
>>> pd.notna(df)
0 1 2
0 True True True
1 True False True
>>> pd.notna(df[1])
0 True
1 False
Name: 1, dtype: bool
"""
res = isna(obj)
if is_scalar(res):
return not res
return ~res
notnull = notna
def _isna_compat(arr, fill_value=np.nan) -> bool:
"""
Parameters
----------
arr: a numpy array
fill_value: fill value, default to np.nan
Returns
-------
True if we can fill using this fill_value
"""
dtype = arr.dtype
if isna(fill_value):
return not (is_bool_dtype(dtype) or is_integer_dtype(dtype))
return True
def array_equivalent(left, right, strict_nan: bool = False) -> bool:
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs
in corresponding locations. False otherwise. It is assumed that left and
right are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
strict_nan : bool, default False
If True, consider NaN and None to be different.
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(
... np.array([1, 2, np.nan]),
... np.array([1, 2, np.nan]))
True
>>> array_equivalent(
... np.array([1, np.nan, 2]),
... np.array([1, 2, np.nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
# shape compat
if left.shape != right.shape:
return False
# Object arrays can contain None, NaN and NaT.
# string dtypes must be come to this path for NumPy 1.7.1 compat
if is_string_dtype(left.dtype) or is_string_dtype(right.dtype):
if not strict_nan:
# isna considers NaN and None to be equivalent.
return lib.array_equivalent_object(
ensure_object(left.ravel()), ensure_object(right.ravel())
)
for left_value, right_value in zip(left, right):
if left_value is NaT and right_value is not NaT:
return False
elif left_value is libmissing.NA and right_value is not libmissing.NA:
return False
elif isinstance(left_value, float) and np.isnan(left_value):
if not isinstance(right_value, float) or not np.isnan(right_value):
return False
else:
try:
if np.any(np.asarray(left_value != right_value)):
return False
except TypeError as err:
if "Cannot compare tz-naive" in str(err):
# tzawareness compat failure, see GH#28507
return False
elif "boolean value of NA is ambiguous" in str(err):
return False
raise
return True
# NaNs can occur in float and complex arrays.
if is_float_dtype(left.dtype) or is_complex_dtype(left.dtype):
# empty
if not (np.prod(left.shape) and np.prod(right.shape)):
return True
return ((left == right) | (isna(left) & isna(right))).all()
elif is_datetimelike_v_numeric(left, right):
# GH#29553 avoid numpy deprecation warning
return False
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# datetime64, timedelta64, Period
if not is_dtype_equal(left.dtype, right.dtype):
return False
left = left.view("i8")
right = right.view("i8")
# if we have structured dtypes, compare first
if left.dtype.type is np.void or right.dtype.type is np.void:
if left.dtype != right.dtype:
return False
return np.array_equal(left, right)
def _infer_fill_value(val):
"""
infer the fill value for the nan/NaT from the provided
scalar/ndarray/list-like if we are a NaT, return the correct dtyped
element to provide proper block construction
"""
if not is_list_like(val):
val = [val]
val = np.array(val, copy=False)
if needs_i8_conversion(val.dtype):
return np.array("NaT", dtype=val.dtype)
elif is_object_dtype(val.dtype):
dtype = lib.infer_dtype(ensure_object(val), skipna=False)
if dtype in ["datetime", "datetime64"]:
return np.array("NaT", dtype=DT64NS_DTYPE)
elif dtype in ["timedelta", "timedelta64"]:
return np.array("NaT", dtype=TD64NS_DTYPE)
return np.nan
def _maybe_fill(arr, fill_value=np.nan):
"""
if we have a compatible fill_value and arr dtype, then fill
"""
if _isna_compat(arr, fill_value):
arr.fill(fill_value)
return arr
def na_value_for_dtype(dtype, compat: bool = True):
"""
Return a dtype compat na value
Parameters
----------
dtype : string / dtype
compat : bool, default True
Returns
-------
np.dtype or a pandas dtype
Examples
--------
>>> na_value_for_dtype(np.dtype('int64'))
0
>>> na_value_for_dtype(np.dtype('int64'), compat=False)
nan
>>> na_value_for_dtype(np.dtype('float64'))
nan
>>> na_value_for_dtype(np.dtype('bool'))
False
>>> na_value_for_dtype(np.dtype('datetime64[ns]'))
NaT
"""
dtype = pandas_dtype(dtype)
if is_extension_array_dtype(dtype):
return dtype.na_value
if needs_i8_conversion(dtype):
return NaT
elif is_float_dtype(dtype):
return np.nan
elif is_integer_dtype(dtype):
if compat:
return 0
return np.nan
elif is_bool_dtype(dtype):
if compat:
return False
return np.nan
return np.nan
def remove_na_arraylike(arr):
"""
Return array-like containing only true/non-NaN values, possibly empty.
"""
if is_extension_array_dtype(arr):
return arr[notna(arr)]
else:
return arr[notna(np.asarray(arr))]
def is_valid_nat_for_dtype(obj, dtype: DtypeObj) -> bool:
"""
isna check that excludes incompatible dtypes
Parameters
----------
obj : object
dtype : np.datetime64, np.timedelta64, DatetimeTZDtype, or PeriodDtype
Returns
-------
bool
"""
if not lib.is_scalar(obj) or not isna(obj):
return False
if dtype.kind == "M":
return not isinstance(obj, np.timedelta64)
if dtype.kind == "m":
return not isinstance(obj, np.datetime64)
# must be PeriodDType
return not isinstance(obj, (np.datetime64, np.timedelta64))
| |
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import netaddr
from oslo_log import log
from oslo_serialization import jsonutils as json
import six
from tempest_lib.common.utils import misc as misc_utils
from tempest_lib import exceptions as lib_exc
from tempest.common import fixed_network
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.services.network import resources as net_resources
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
class ScenarioTest(tempest.test.BaseTestCase):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@classmethod
def setup_clients(cls):
super(ScenarioTest, cls).setup_clients()
# Clients (in alphabetical order)
cls.flavors_client = cls.manager.flavors_client
cls.floating_ips_client = cls.manager.floating_ips_client
# Glance image client v1
cls.image_client = cls.manager.image_client
# Compute image client
cls.images_client = cls.manager.images_client
cls.keypairs_client = cls.manager.keypairs_client
# Nova security groups client
cls.security_groups_client = cls.manager.security_groups_client
cls.security_group_rules_client = (
cls.manager.security_group_rules_client)
cls.servers_client = cls.manager.servers_client
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
cls.network_client = cls.manager.network_client
# Heat client
cls.orchestration_client = cls.manager.orchestration_client
if CONF.volume_feature_enabled.api_v1:
cls.volumes_client = cls.manager.volumes_client
cls.snapshots_client = cls.manager.snapshots_client
else:
cls.volumes_client = cls.manager.volumes_v2_client
cls.snapshots_client = cls.manager.snapshots_v2_client
# ## Methods to handle sync and async deletes
def setUp(self):
super(ScenarioTest, self).setUp()
self.cleanup_waits = []
# NOTE(mtreinish) This is safe to do in setUp instead of setUp class
# because scenario tests in the same test class should not share
# resources. If resources were shared between test cases then it
# should be a single scenario test instead of multiples.
# NOTE(yfried): this list is cleaned at the end of test_methods and
# not at the end of the class
self.addCleanup(self._wait_for_cleanups)
def delete_wrapper(self, delete_thing, *args, **kwargs):
"""Ignores NotFound exceptions for delete operations.
@param delete_thing: delete method of a resource. method will be
executed as delete_thing(*args, **kwargs)
"""
try:
# Tempest clients return dicts, so there is no common delete
# method available. Using a callable instead
delete_thing(*args, **kwargs)
except lib_exc.NotFound:
# If the resource is already missing, mission accomplished.
pass
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
cleanup_callable, cleanup_args=None,
cleanup_kwargs=None, waiter_client=None):
"""Adds wait for async resource deletion at the end of cleanups
@param waiter_callable: callable to wait for the resource to delete
with the following waiter_client if specified.
@param thing_id: the id of the resource to be cleaned-up
@param thing_id_param: the name of the id param in the waiter
@param cleanup_callable: method to load pass to self.addCleanup with
the following *cleanup_args, **cleanup_kwargs.
usually a delete method.
"""
if cleanup_args is None:
cleanup_args = []
if cleanup_kwargs is None:
cleanup_kwargs = {}
self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
wait_dict = {
'waiter_callable': waiter_callable,
thing_id_param: thing_id
}
if waiter_client:
wait_dict['client'] = waiter_client
self.cleanup_waits.append(wait_dict)
def _wait_for_cleanups(self):
"""To handle async delete actions, a list of waits is added
which will be iterated over as the last step of clearing the
cleanup queue. That way all the delete calls are made up front
and the tests won't succeed unless the deletes are eventually
successful. This is the same basic approach used in the api tests to
limit cleanup execution time except here it is multi-resource,
because of the nature of the scenario tests.
"""
for wait in self.cleanup_waits:
waiter_callable = wait.pop('waiter_callable')
waiter_callable(**wait)
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
def create_keypair(self, client=None):
if not client:
client = self.keypairs_client
name = data_utils.rand_name(self.__class__.__name__)
# We don't need to create a keypair by pubkey in scenario
body = client.create_keypair(name=name)
self.addCleanup(client.delete_keypair, name)
return body['keypair']
def create_server(self, name=None, image=None, flavor=None,
wait_on_boot=True, wait_on_delete=True,
create_kwargs=None):
"""Creates VM instance.
@param image: image from which to create the instance
@param wait_on_boot: wait for status ACTIVE before continue
@param wait_on_delete: force synchronous delete on cleanup
@param create_kwargs: additional details for instance creation
@return: server dict
"""
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
if image is None:
image = CONF.compute.image_ref
if flavor is None:
flavor = CONF.compute.flavor_ref
if create_kwargs is None:
create_kwargs = {}
network = self.get_tenant_network()
create_kwargs = fixed_network.set_networks_kwarg(network,
create_kwargs)
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = self.servers_client.create_server(name, image, flavor,
**create_kwargs)['server']
if wait_on_delete:
self.addCleanup(waiters.wait_for_server_termination,
self.servers_client,
server['id'])
self.addCleanup_with_wait(
waiter_callable=waiters.wait_for_server_termination,
thing_id=server['id'], thing_id_param='server_id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.servers_client.delete_server, server['id']],
waiter_client=self.servers_client)
if wait_on_boot:
waiters.wait_for_server_status(self.servers_client,
server_id=server['id'],
status='ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = self.servers_client.show_server(server['id'])['server']
self.assertEqual(server['name'], name)
return server
def create_volume(self, size=None, name=None, snapshot_id=None,
imageRef=None, volume_type=None, wait_on_delete=True):
if name is None:
name = data_utils.rand_name(self.__class__.__name__)
volume = self.volumes_client.create_volume(
size=size, display_name=name, snapshot_id=snapshot_id,
imageRef=imageRef, volume_type=volume_type)['volume']
if wait_on_delete:
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
volume['id'])
self.addCleanup(self.delete_wrapper,
self.volumes_client.delete_volume, volume['id'])
else:
self.addCleanup_with_wait(
waiter_callable=self.volumes_client.wait_for_resource_deletion,
thing_id=volume['id'], thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.volumes_client.delete_volume, volume['id']])
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in volume:
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.security_groups_client
_client_rules = self.security_group_rules_client
if secgroup_id is None:
sgs = _client.list_security_groups()['security_groups']
for sg in sgs:
if sg['name'] == 'default':
secgroup_id = sg['id']
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = _client_rules.create_security_group_rule(
parent_group_id=secgroup_id, **ruleset)['security_group_rule']
self.addCleanup(self.delete_wrapper,
_client_rules.delete_security_group_rule,
sg_rule['id'])
rules.append(sg_rule)
return rules
def _create_security_group(self):
# Create security group
sg_name = data_utils.rand_name(self.__class__.__name__)
sg_desc = sg_name + " description"
secgroup = self.security_groups_client.create_security_group(
name=sg_name, description=sg_desc)['security_group']
self.assertEqual(secgroup['name'], sg_name)
self.assertEqual(secgroup['description'], sg_desc)
self.addCleanup(self.delete_wrapper,
self.security_groups_client.delete_security_group,
secgroup['id'])
# Add rules to the security group
self._create_loginable_secgroup_rule(secgroup['id'])
return secgroup
def get_remote_client(self, server_or_ip, username=None, private_key=None,
log_console_of_servers=None):
"""Get a SSH client to a remote server
@param server_or_ip a server object as returned by Tempest compute
client or an IP address to connect to
@param username name of the Linux account on the remote server
@param private_key the SSH private key to use
@param log_console_of_servers a list of server objects. Each server
in the list will have its console printed in the logs in case the
SSH connection failed to be established
@return a RemoteClient object
"""
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
addrs = server_or_ip['addresses'][CONF.compute.network_for_ssh]
try:
ip = (addr['addr'] for addr in addrs if
netaddr.valid_ipv4(addr['addr'])).next()
except StopIteration:
raise lib_exc.NotFound("No IPv4 addresses to use for SSH to "
"remote server.")
if username is None:
username = CONF.scenario.ssh_user
# Set this with 'keypair' or others to log in with keypair or
# username/password.
if CONF.compute.ssh_auth_method == 'keypair':
password = None
if private_key is None:
private_key = self.keypair['private_key']
else:
password = CONF.compute.image_ssh_password
private_key = None
linux_client = remote_client.RemoteClient(ip, username,
pkey=private_key,
password=password)
try:
linux_client.validate_authentication()
except Exception as e:
message = ('Initializing SSH connection to %(ip)s failed. '
'Error: %(error)s' % {'ip': ip, 'error': e})
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
LOG.exception(message)
# If we don't explicitly set for which servers we want to
# log the console output then all the servers will be logged.
# See the definition of _log_console_output()
self._log_console_output(log_console_of_servers)
raise
return linux_client
def _image_create(self, name, fmt, path,
disk_format=None, properties=None):
if properties is None:
properties = {}
name = data_utils.rand_name('%s-' % name)
image_file = open(path, 'rb')
self.addCleanup(image_file.close)
params = {
'name': name,
'container_format': fmt,
'disk_format': disk_format or fmt,
'is_public': 'False',
}
params['properties'] = properties
image = self.image_client.create_image(**params)['image']
self.addCleanup(self.image_client.delete_image, image['id'])
self.assertEqual("queued", image['status'])
self.image_client.update_image(image['id'], data=image_file)
return image['id']
def glance_image_create(self):
img_path = CONF.scenario.img_dir + "/" + CONF.scenario.img_file
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
img_container_format = CONF.scenario.img_container_format
img_disk_format = CONF.scenario.img_disk_format
img_properties = CONF.scenario.img_properties
LOG.debug("paths: img: %s, container_fomat: %s, disk_format: %s, "
"properties: %s, ami: %s, ari: %s, aki: %s" %
(img_path, img_container_format, img_disk_format,
img_properties, ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
img_container_format,
img_path,
disk_format=img_disk_format,
properties=img_properties)
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
self.image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s" % self.image)
def _log_console_output(self, servers=None):
if not CONF.compute_feature_enabled.console_output:
LOG.debug('Console output not supported, cannot log')
return
if not servers:
servers = self.servers_client.list_servers()
servers = servers['servers']
for server in servers:
console_output = self.servers_client.get_console_output(
server['id'], length=None)['output']
LOG.debug('Console output for %s\nbody=\n%s',
server['id'], console_output)
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
def create_server_snapshot(self, server, name=None):
# Glance client
_image_client = self.image_client
# Compute client
_images_client = self.images_client
if name is None:
name = data_utils.rand_name('scenario-snapshot')
LOG.debug("Creating a snapshot image for server: %s", server['name'])
image = _images_client.create_image(server['id'], name=name)
image_id = image.response['location'].split('images/')[1]
_image_client.wait_for_image_status(image_id, 'active')
self.addCleanup_with_wait(
waiter_callable=_image_client.wait_for_resource_deletion,
thing_id=image_id, thing_id_param='id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[_image_client.delete_image, image_id])
snapshot_image = _image_client.get_image_meta(image_id)
bdm = snapshot_image.get('properties', {}).get('block_device_mapping')
if bdm:
bdm = json.loads(bdm)
if bdm and 'snapshot_id' in bdm[0]:
snapshot_id = bdm[0]['snapshot_id']
self.addCleanup(
self.snapshots_client.wait_for_resource_deletion,
snapshot_id)
self.addCleanup(
self.delete_wrapper, self.snapshots_client.delete_snapshot,
snapshot_id)
self.snapshots_client.wait_for_snapshot_status(snapshot_id,
'available')
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image %s for server %s",
image_name, server['name'])
return snapshot_image
def nova_volume_attach(self):
volume = self.servers_client.attach_volume(
self.server['id'], volumeId=self.volume['id'], device='/dev/%s'
% CONF.compute.volume_device_name)['volumeAttachment']
self.assertEqual(self.volume['id'], volume['id'])
self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
# Refresh the volume after the attachment
self.volume = self.volumes_client.show_volume(volume['id'])['volume']
def nova_volume_detach(self):
self.servers_client.detach_volume(self.server['id'], self.volume['id'])
self.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
volume = self.volumes_client.show_volume(self.volume['id'])['volume']
self.assertEqual('available', volume['status'])
def rebuild_server(self, server_id, image=None,
preserve_ephemeral=False, wait=True,
rebuild_kwargs=None):
if image is None:
image = CONF.compute.image_ref
rebuild_kwargs = rebuild_kwargs or {}
LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
server_id, image, preserve_ephemeral)
self.servers_client.rebuild_server(
server_id=server_id, image_ref=image,
preserve_ephemeral=preserve_ephemeral,
**rebuild_kwargs)
if wait:
waiters.wait_for_server_status(self.servers_client,
server_id, 'ACTIVE')
def ping_ip_address(self, ip_address, should_succeed=True,
ping_timeout=None):
timeout = ping_timeout or CONF.compute.ping_timeout
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.communicate()
return (proc.returncode == 0) == should_succeed
return tempest.test.call_until_true(ping, timeout, 1)
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
self.get_remote_client(ip_address, username, private_key)
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
LOG.debug('checking network connections to IP %s with user: %s' %
(ip_address, username))
try:
self.check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers)
raise
def create_floating_ip(self, thing, pool_name=None):
"""Creates a floating IP and associates to a server using
Nova clients
"""
floating_ip = (self.floating_ips_client.create_floating_ip(pool_name)
['floating_ip'])
self.addCleanup(self.delete_wrapper,
self.floating_ips_client.delete_floating_ip,
floating_ip['id'])
self.floating_ips_client.associate_floating_ip_to_server(
floating_ip['ip'], thing['id'])
return floating_ip
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
This class provide helpers for network scenario tests, using the neutron
API. Helpers from ancestor which use the nova network API are overridden
with the neutron API.
This Class also enforces using Neutron instead of novanetwork.
Subclassed tests will be skipped if Neutron is not enabled
"""
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(NetworkScenarioTest, cls).skip_checks()
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
@classmethod
def resource_setup(cls):
super(NetworkScenarioTest, cls).resource_setup()
cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, client=None, tenant_id=None,
namestart='network-smoke-'):
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_network(name=name, tenant_id=tenant_id)
network = net_resources.DeletableNetwork(client=client,
**result['network'])
self.assertEqual(network.name, name)
self.addCleanup(self.delete_wrapper, network.delete)
return network
def _list_networks(self, *args, **kwargs):
"""List networks using admin creds """
networks_list = self.admin_manager.network_client.list_networks(
*args, **kwargs)
return networks_list['networks']
def _list_subnets(self, *args, **kwargs):
"""List subnets using admin creds """
subnets_list = self.admin_manager.network_client.list_subnets(
*args, **kwargs)
return subnets_list['subnets']
def _list_routers(self, *args, **kwargs):
"""List routers using admin creds """
routers_list = self.admin_manager.network_client.list_routers(
*args, **kwargs)
return routers_list['routers']
def _list_ports(self, *args, **kwargs):
"""List ports using admin creds """
ports_list = self.admin_manager.network_client.list_ports(
*args, **kwargs)
return ports_list['ports']
def _list_agents(self, *args, **kwargs):
"""List agents using admin creds """
agents_list = self.admin_manager.network_client.list_agents(
*args, **kwargs)
return agents_list['agents']
def _create_subnet(self, network, client=None, namestart='subnet-smoke',
**kwargs):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
if not client:
client = self.network_client
def cidr_in_use(cidr, tenant_id):
"""
:return True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
return len(cidr_in_use) != 0
ip_version = kwargs.pop('ip_version', 4)
if ip_version == 6:
tenant_cidr = netaddr.IPNetwork(
CONF.network.tenant_network_v6_cidr)
num_bits = CONF.network.tenant_network_v6_mask_bits
else:
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
num_bits = CONF.network.tenant_network_mask_bits
result = None
str_cidr = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(num_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
continue
subnet = dict(
name=data_utils.rand_name(namestart),
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str_cidr,
ip_version=ip_version,
**kwargs
)
try:
result = client.create_subnet(**subnet)
break
except lib_exc.Conflict as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_resources.DeletableSubnet(client=client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.addCleanup(self.delete_wrapper, subnet.delete)
return subnet
def _create_port(self, network_id, client=None, namestart='port-quotatest',
**kwargs):
if not client:
client = self.network_client
name = data_utils.rand_name(namestart)
result = client.create_port(
name=name,
network_id=network_id,
**kwargs)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_resources.DeletablePort(client=client,
**result['port'])
self.addCleanup(self.delete_wrapper, port.delete)
return port
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
ports = self._list_ports(device_id=server['id'],
fixed_ip=ip_addr)
# it might happen here that this port has more then one ip address
# as in case of dual stack- when this port is created on 2 subnets
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
if netaddr.valid_ipv4(fxip["ip_address"])]
self.assertEqual(len(port_map), 1,
"Found multiple IPv4 addresses: %s. "
"Unable to determine which port to target."
% port_map)
return port_map[0]
def _get_network_by_name(self, network_name):
net = self._list_networks(name=network_name)
self.assertNotEqual(len(net), 0,
"Unable to get network by name: %s" % network_name)
return net_resources.AttributeDict(net[0])
def create_floating_ip(self, thing, external_network_id=None,
port_id=None, client=None):
"""Creates a floating IP and associates to a resource/port using
Neutron client
"""
if not external_network_id:
external_network_id = CONF.network.public_network_id
if not client:
client = self.network_client
if not port_id:
port_id, ip4 = self._get_server_port_id_and_ip4(thing)
else:
ip4 = None
result = client.create_floatingip(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing['tenant_id'],
fixed_ip_address=ip4
)
floating_ip = net_resources.DeletableFloatingIp(
client=client,
**result['floatingip'])
self.addCleanup(self.delete_wrapper, floating_ip.delete)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id, _ = self._get_server_port_id_and_ip4(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
"""
:param floating_ip: type DeletableFloatingIp
"""
floating_ip.update(port_id=None)
self.assertIsNone(floating_ip.port_id)
return floating_ip
def check_floating_ip_status(self, floating_ip, status):
"""Verifies floatingip reaches the given status
:param floating_ip: net_resources.DeletableFloatingIp floating IP to
to check status
:param status: target status
:raises: AssertionError if status doesn't match
"""
def refresh():
floating_ip.refresh()
return status == floating_ip.status
tempest.test.call_until_true(refresh,
CONF.network.build_timeout,
CONF.network.build_interval)
self.assertEqual(status, floating_ip.status,
message="FloatingIP: {fp} is at status: {cst}. "
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip.status,
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True,
servers_for_debug=None):
if not CONF.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in six.iteritems(server['addresses']):
for ip_address in ip_addresses:
self.check_vm_connectivity(ip_address['addr'],
username,
private_key,
should_connect=should_connect)
except Exception as e:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers_for_debug)
self._log_net_info(e)
raise
def _check_remote_connectivity(self, source, dest, should_succeed=True):
"""
check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest)
except lib_exc.SSHExecCommandFailed:
LOG.warn('Failed to ping IP: %s via a ssh connection from: %s.'
% (dest, source.ssh_client.host))
return not should_succeed
return should_succeed
return tempest.test.call_until_true(ping_remote,
CONF.compute.ping_timeout,
1)
def _create_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
if client is None:
client = self.network_client
if tenant_id is None:
tenant_id = client.tenant_id
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule(client=client,
secgroup=secgroup)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, client=None, tenant_id=None,
namestart='secgroup-smoke'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
result = client.create_security_group(**sg_dict)
secgroup = net_resources.DeletableSecurityGroup(
client=client,
**result['security_group']
)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
self.addCleanup(self.delete_wrapper, secgroup.delete)
return secgroup
def _default_security_group(self, client=None, tenant_id=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
return net_resources.DeletableSecurityGroup(client=client,
**sgs[0])
def _create_security_group_rule(self, secgroup=None, client=None,
tenant_id=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if client is None:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
if secgroup is None:
secgroup = self._default_security_group(client=client,
tenant_id=tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id)
ruleset.update(kwargs)
sg_rule = client.create_security_group_rule(**ruleset)
sg_rule = net_resources.DeletableSecurityGroupRule(
client=client,
**sg_rule['security_group_rule']
)
self.addCleanup(self.delete_wrapper, sg_rule.delete)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
return sg_rule
def _create_loginable_secgroup_rule(self, client=None, secgroup=None):
"""These rules are intended to permit inbound ssh and icmp
traffic from all sources, so no group_id is provided.
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
if client is None:
client = self.network_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
),
dict(
# ipv6-icmp for ping6
protocol='icmp',
ethertype='IPv6',
)
]
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
client=client, secgroup=secgroup, **ruleset)
except lib_exc.Conflict as ex:
# if rule already exist - skip rule and continue
msg = 'Security group rule already exists'
if msg not in ex._error_string:
raise ex
else:
self.assertEqual(r_direction, sg_rule.direction)
rules.append(sg_rule)
return rules
def _ssh_to_server(self, server, private_key):
ssh_login = CONF.compute.image_ssh_user
return self.get_remote_client(server,
username=ssh_login,
private_key=private_key)
def _get_router(self, client=None, tenant_id=None):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
body = client.show_router(router_id)
return net_resources.AttributeDict(**body['router'])
elif network_id:
router = self._create_router(client, tenant_id)
router.set_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
client = self.network_client
if not tenant_id:
tenant_id = client.tenant_id
name = data_utils.rand_name(namestart)
result = client.create_router(name=name,
admin_state_up=True,
tenant_id=tenant_id)
router = net_resources.DeletableRouter(client=client,
**result['router'])
self.assertEqual(router.name, name)
self.addCleanup(self.delete_wrapper, router.delete)
return router
def _update_router_admin_state(self, router, admin_state_up):
router.update(admin_state_up=admin_state_up)
self.assertEqual(admin_state_up, router.admin_state_up)
def create_networks(self, client=None, tenant_id=None,
dns_nameservers=None):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
on the same shared network.
:param client: network client to create resources with.
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
:returns: network, subnet, router
"""
if CONF.baremetal.driver_enabled:
# NOTE(Shrews): This exception is for environments where tenant
# credential isolation is available, but network separation is
# not (the current baremetal case). Likely can be removed when
# test account mgmt is reworked:
# https://blueprints.launchpad.net/tempest/+spec/test-accounts
if not CONF.compute.fixed_network_name:
m = 'fixed_network_name must be specified in config'
raise exceptions.InvalidConfiguration(m)
network = self._get_network_by_name(
CONF.compute.fixed_network_name)
router = None
subnet = None
else:
network = self._create_network(client=client, tenant_id=tenant_id)
router = self._get_router(client=client, tenant_id=tenant_id)
subnet_kwargs = dict(network=network, client=client)
# use explicit check because empty list is a valid option
if dns_nameservers is not None:
subnet_kwargs['dns_nameservers'] = dns_nameservers
subnet = self._create_subnet(**subnet_kwargs)
subnet.add_to_router(router.id)
return network, subnet, router
def create_server(self, name=None, image=None, flavor=None,
wait_on_boot=True, wait_on_delete=True,
create_kwargs=None):
vnic_type = CONF.network.port_vnic_type
# If vnic_type is configured create port for
# every network
if vnic_type:
ports = []
networks = []
create_port_body = {'binding:vnic_type': vnic_type,
'namestart': 'port-smoke'}
if create_kwargs:
net_client = create_kwargs.get("network_client",
self.network_client)
# Convert security group names to security group ids
# to pass to create_port
if create_kwargs.get('security_groups'):
security_groups = net_client.list_security_groups().get(
'security_groups')
sec_dict = dict([(s['name'], s['id'])
for s in security_groups])
sec_groups_names = [s['name'] for s in create_kwargs[
'security_groups']]
security_groups_ids = [sec_dict[s]
for s in sec_groups_names]
if security_groups_ids:
create_port_body[
'security_groups'] = security_groups_ids
networks = create_kwargs.get('networks')
else:
net_client = self.network_client
# If there are no networks passed to us we look up
# for the tenant's private networks and create a port
# if there is only one private network. The same behaviour
# as we would expect when passing the call to the clients
# with no networks
if not networks:
networks = net_client.list_networks(filters={
'router:external': False})
self.assertEqual(1, len(networks),
"There is more than one"
" network for the tenant")
for net in networks:
net_id = net['uuid']
port = self._create_port(network_id=net_id,
client=net_client,
**create_port_body)
ports.append({'port': port.id})
if ports:
create_kwargs['networks'] = ports
self.ports = ports
return super(NetworkScenarioTest, self).create_server(
name=name, image=image, flavor=flavor,
wait_on_boot=wait_on_boot, wait_on_delete=wait_on_delete,
create_kwargs=create_kwargs)
# power/provision states as of icehouse
class BaremetalPowerStates(object):
"""Possible power states of an Ironic node."""
POWER_ON = 'power on'
POWER_OFF = 'power off'
REBOOT = 'rebooting'
SUSPEND = 'suspended'
class BaremetalProvisionStates(object):
"""Possible provision states of an Ironic node."""
NOSTATE = None
INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYWAIT = 'wait call-back'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETING = 'deleting'
DELETED = 'deleted'
ERROR = 'error'
class BaremetalScenarioTest(ScenarioTest):
credentials = ['primary', 'admin']
@classmethod
def skip_checks(cls):
super(BaremetalScenarioTest, cls).skip_checks()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
msg = 'Ironic not available or Ironic compute driver not enabled'
raise cls.skipException(msg)
@classmethod
def setup_clients(cls):
super(BaremetalScenarioTest, cls).setup_clients()
cls.baremetal_client = cls.admin_manager.baremetal_client
@classmethod
def resource_setup(cls):
super(BaremetalScenarioTest, cls).resource_setup()
# allow any issues obtaining the node list to raise early
cls.baremetal_client.list_nodes()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
if not isinstance(target_states, list):
target_states = [target_states]
def check_state():
node = self.get_node(node_id=node_id)
if node.get(state_attr) in target_states:
return True
return False
if not tempest.test.call_until_true(
check_state, timeout, interval):
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
(node_id, state_attr, target_states))
raise exceptions.TimeoutException(msg)
def wait_provisioning_state(self, node_id, state, timeout):
self._node_state_timeout(
node_id=node_id, state_attr='provision_state',
target_states=state, timeout=timeout)
def wait_power_state(self, node_id, state):
self._node_state_timeout(
node_id=node_id, state_attr='power_state',
target_states=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
except lib_exc.NotFound:
pass
return node is not None
if not tempest.test.call_until_true(
_get_node, CONF.baremetal.association_timeout, 1):
msg = ('Timed out waiting to get Ironic node by instance id %s'
% instance_id)
raise exceptions.TimeoutException(msg)
def get_node(self, node_id=None, instance_id=None):
if node_id:
_, body = self.baremetal_client.show_node(node_id)
return body
elif instance_id:
_, body = self.baremetal_client.show_node_by_instance_uuid(
instance_id)
if body['nodes']:
return body['nodes'][0]
def get_ports(self, node_uuid):
ports = []
_, body = self.baremetal_client.list_node_ports(node_uuid)
for port in body['ports']:
_, p = self.baremetal_client.show_port(port['uuid'])
ports.append(p)
return ports
def add_keypair(self):
self.keypair = self.create_keypair()
def verify_connectivity(self, ip=None):
if ip:
dest = self.get_remote_client(ip)
else:
dest = self.get_remote_client(self.instance)
dest.validate_authentication()
def boot_instance(self):
create_kwargs = {
'key_name': self.keypair['name']
}
self.instance = self.create_server(
wait_on_boot=False, create_kwargs=create_kwargs)
self.wait_node(self.instance['id'])
self.node = self.get_node(instance_id=self.instance['id'])
self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
self.wait_provisioning_state(
self.node['uuid'],
[BaremetalProvisionStates.DEPLOYWAIT,
BaremetalProvisionStates.ACTIVE],
timeout=15)
self.wait_provisioning_state(self.node['uuid'],
BaremetalProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout)
waiters.wait_for_server_status(self.servers_client,
self.instance['id'], 'ACTIVE')
self.node = self.get_node(instance_id=self.instance['id'])
self.instance = (self.servers_client.show_server(self.instance['id'])
['server'])
def terminate_instance(self):
self.servers_client.delete_server(self.instance['id'])
self.wait_power_state(self.node['uuid'],
BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node['uuid'],
BaremetalProvisionStates.NOSTATE,
timeout=CONF.baremetal.unprovision_timeout)
class EncryptionScenarioTest(ScenarioTest):
"""
Base class for encryption scenario tests
"""
credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(EncryptionScenarioTest, cls).setup_clients()
if CONF.volume_feature_enabled.api_v1:
cls.admin_volume_types_client = cls.os_adm.volume_types_client
else:
cls.admin_volume_types_client = cls.os_adm.volume_types_v2_client
def _wait_for_volume_status(self, status):
self.status_timeout(
self.volume_client.volumes, self.volume.id, status)
def nova_boot(self):
self.keypair = self.create_keypair()
create_kwargs = {'key_name': self.keypair['name']}
self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def create_volume_type(self, client=None, name=None):
if not client:
client = self.admin_volume_types_client
if not name:
name = 'generic'
randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s", randomized_name)
body = client.create_volume_type(
randomized_name)['volume_type']
self.assertIn('id', body)
self.addCleanup(client.delete_volume_type, body['id'])
return body
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
control_location=None):
if not client:
client = self.admin_volume_types_client
if not type_id:
volume_type = self.create_volume_type()
type_id = volume_type['id']
LOG.debug("Creating an encryption type for volume type: %s", type_id)
client.create_encryption_type(
type_id, provider=provider, key_size=key_size, cipher=cipher,
control_location=control_location)['encryption']
class SwiftScenarioTest(ScenarioTest):
"""
Provide harness to do Swift scenario tests.
Subclasses implement the tests that use the methods provided by this
class.
"""
@classmethod
def skip_checks(cls):
super(SwiftScenarioTest, cls).skip_checks()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
super(SwiftScenarioTest, cls).setup_credentials()
operator_role = CONF.object_storage.operator_role
cls.os_operator = cls.get_client_manager(roles=[operator_role])
@classmethod
def setup_clients(cls):
super(SwiftScenarioTest, cls).setup_clients()
# Clients for Swift
cls.account_client = cls.os_operator.account_client
cls.container_client = cls.os_operator.container_client
cls.object_client = cls.os_operator.object_client
def get_swift_stat(self):
"""get swift status for our user account."""
self.account_client.list_account_containers()
LOG.debug('Swift status information obtained successfully')
def create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.container_client.create_container(name)
# look for the container to assure it is created
self.list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
self.addCleanup(self.delete_wrapper,
self.container_client.delete_container,
name)
return name
def delete_container(self, container_name):
self.container_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
def upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
obj_data = data_utils.arbitrary_string()
self.object_client.create_object(container_name, obj_name, obj_data)
self.addCleanup(self.delete_wrapper,
self.object_client.delete_object,
container_name,
obj_name)
return obj_name, obj_data
def delete_object(self, container_name, filename):
self.object_client.delete_object(container_name, filename)
self.list_and_check_container_objects(container_name,
not_present_obj=[filename])
def list_and_check_container_objects(self, container_name,
present_obj=None,
not_present_obj=None):
"""
List objects for a given container and assert which are present and
which are not.
"""
if present_obj is None:
present_obj = []
if not_present_obj is None:
not_present_obj = []
_, object_list = self.container_client.list_container_contents(
container_name)
if present_obj:
for obj in present_obj:
self.assertIn(obj, object_list)
if not_present_obj:
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
def change_container_acl(self, container_name, acl):
metadata_param = {'metadata_prefix': 'x-container-',
'metadata': {'read': acl}}
self.container_client.update_container_metadata(container_name,
**metadata_param)
resp, _ = self.container_client.list_container_metadata(container_name)
self.assertEqual(resp['x-container-read'], acl)
def download_and_verify(self, container_name, obj_name, expected_data):
_, obj = self.object_client.get_object(container_name, obj_name)
self.assertEqual(obj, expected_data)
| |
from typing import Sequence, List, Optional, Union, Any, Dict
from enum import Enum
from numbers import Number
import numpy as np
import datetime
from pydantic import BaseModel, Field
from .config import SweepConfig
from ._types import floating
class RunState(str, Enum):
pending = "pending"
running = "running"
finished = "finished"
killed = "killed"
crashed = "crashed"
failed = "failed"
preempted = "preempted"
preempting = "preempting"
def is_number(x: Any) -> bool:
"""Check if a value is a finite number."""
try:
return (
np.isscalar(x)
and np.isfinite(x)
and isinstance(x, Number)
and not isinstance(x, bool)
)
except TypeError:
return False
class SweepRun(BaseModel):
"""A wandb Run that is part of a Sweep.
>>> run = SweepRun(
... name="my_run",
... state=RunState.running,
... config={"a": {"value": 1}},
... )
Args:
name: Name of the run.
state: State of the run.
config: `dict` representation of the run's wandb.config.
summaryMetrics: `dict` of summary statistics for the run.
history: List of dicts containing the arguments to calls of wandb.log made during the run.
search_info: Dict containing information produced by the search algorithm.
early_terminate_info: Dict containing information produced by the early terminate algorithm.
stopped: Whether the run was stopped in the sweep
shouldStop: Whether the run should stop in the sweep
heartbeat_at: The last time the backend received a heart beat from the run
exitcode: The exitcode of the process that trained the run
running: Whether the run is currently running
"""
name: Optional[str] = None
summary_metrics: Optional[dict] = Field(
default_factory=lambda: {}, alias="summaryMetrics"
)
history: List[dict] = Field(default_factory=lambda: [], alias="sampledHistory")
config: dict = Field(default_factory=lambda: {})
state: RunState = RunState.pending
search_info: Optional[Dict] = None
early_terminate_info: Optional[Dict] = None
stopped: bool = False
should_stop: bool = Field(default=False, alias="shouldStop")
heartbeat_at: Optional[datetime.datetime] = Field(default=None, alias="heartbeatAt")
exitcode: Optional[int] = None
running: Optional[bool] = None
class Config:
use_enum_values = True
allow_population_by_field_name = True
def metric_history(
self, metric_name: str, filter_invalid: bool = False
) -> List[floating]:
return [
d[metric_name]
for d in self.history
if metric_name in d
and not (filter_invalid and not is_number(d[metric_name]))
]
def summary_metric(self, metric_name: str) -> floating:
if self.summary_metrics is None:
raise ValueError("this run has no summary metrics")
if metric_name not in self.summary_metrics:
raise KeyError(f"{metric_name} is not a summary metric of this run.")
return self.summary_metrics[metric_name]
def metric_extremum(self, metric_name: str, kind: str) -> floating:
"""Calculate the maximum or minimum value of a specified metric.
>>> run = SweepRun(history=[{'a': 1}, {'b': 3}, {'a': 2, 'b': 4}], summary_metrics={'a': 50})
>>> assert run.metric_extremum('a', 'maximum') == 50
Args:
metric_name: The name of the target metric.
kind: What kind of extremum to get (either "maximum" or "minimum").
Returns:
The maximum or minimum metric.
"""
cmp_func = np.max if kind == "maximum" else np.min
try:
summary_metric = [self.summary_metric(metric_name)]
except KeyError:
summary_metric = []
all_metrics = self.metric_history(metric_name) + summary_metric
if len(all_metrics) == 0:
raise ValueError(f"Cannot extract metric {metric_name} from run")
all_metrics = list(filter(is_number, all_metrics))
if len(all_metrics) == 0:
raise ValueError("Run does not have any finite metric values")
return cmp_func(all_metrics)
def next_run(
sweep_config: Union[dict, SweepConfig],
runs: List[SweepRun],
validate: bool = False,
**kwargs,
) -> Optional[SweepRun]:
return next_runs(sweep_config, runs, validate, 1, **kwargs)[0]
def next_runs(
sweep_config: Union[dict, SweepConfig],
runs: List[SweepRun],
validate: bool = False,
n: int = 1,
**kwargs,
) -> Sequence[Optional[SweepRun]]:
"""Calculate the next runs in a sweep.
>>> suggested_run = next_runs({
... 'method': 'grid',
... 'parameters': {'a': {'values': [1, 2, 3]}}
... }, [])
>>> assert suggested_run[0].config['a']['value'] == 1
Args:
sweep_config: The config for the sweep.
runs: List of runs in the sweep.
validate: Whether to validate `sweep_config` against the SweepConfig JSONschema.
If true, will raise a Validation error if `sweep_config` does not conform to
the schema. If false, will attempt to run the sweep with an unvalidated schema.
n: the number of runs to return
Returns:
The suggested runs.
"""
from .grid_search import grid_search_next_runs
from .random_search import random_search_next_runs
from .bayes_search import bayes_search_next_runs
# validate the sweep config
if validate:
sweep_config = SweepConfig(sweep_config)
if "method" not in sweep_config:
raise ValueError("Sweep config must contain method section")
if "parameters" not in sweep_config:
raise ValueError("Sweep config must contain parameters section")
if not (
isinstance(sweep_config["parameters"], dict)
and len(sweep_config["parameters"]) > 0
):
raise ValueError(
"Parameters section of sweep config must be a dict of at least length 1"
)
method = sweep_config["method"]
if method == "grid":
return grid_search_next_runs(
runs, sweep_config, validate=validate, n=n, **kwargs
)
elif method == "random":
return random_search_next_runs(sweep_config, validate=validate, n=n)
elif method == "bayes":
return bayes_search_next_runs(
runs, sweep_config, validate=validate, n=n, **kwargs
)
else:
raise ValueError(
f'Invalid search type {method}, must be one of ["grid", "random", "bayes"]'
)
def stop_runs(
sweep_config: Union[dict, SweepConfig],
runs: List[SweepRun],
validate: bool = False,
) -> List[SweepRun]:
"""Calculate the runs in a sweep to stop by early termination.
>>> to_stop = stop_runs({
... "method": "grid",
... "metric": {"name": "loss", "goal": "minimize"},
... "early_terminate": {
... "type": "hyperband",
... "max_iter": 5,
... "eta": 2,
... "s": 2,
... },
... "parameters": {"a": {"values": [1, 2, 3]}},
... }, [
... SweepRun(
... name="a",
... state=RunState.finished, # This is already stopped
... history=[
... {"loss": 10},
... {"loss": 9},
... ],
... ),
... SweepRun(
... name="b",
... state=RunState.running, # This should be stopped
... history=[
... {"loss": 10},
... {"loss": 10},
... ],
... ),
... SweepRun(
... name="c",
... state=RunState.running, # This passes band 1 but not band 2
... history=[
... {"loss": 10},
... {"loss": 8},
... {"loss": 8},
... ],
... ),
... SweepRun(
... name="d",
... state=RunState.running,
... history=[
... {"loss": 10},
... {"loss": 7},
... {"loss": 7},
... ],
... ),
... SweepRun(
... name="e",
... state=RunState.finished,
... history=[
... {"loss": 10},
... {"loss": 6},
... {"loss": 6},
... ],
... ),
...])
Args:
sweep_config: The config for the sweep.
runs: List of runs in the sweep.
validate: Whether to validate `sweep_config` against the SweepConfig JSONschema.
If true, will raise a Validation error if `sweep_config` does not conform to
the schema. If false, will attempt to run the sweep with an unvalidated schema.
Returns:
A list of the runs to stop.
"""
from .hyperband_stopping import hyperband_stop_runs
# validate the sweep config
if validate:
sweep_config = SweepConfig(sweep_config)
if "metric" not in sweep_config:
raise ValueError('early terminate requires "metric" section')
if "early_terminate" not in sweep_config:
raise ValueError('early terminate requires "early_terminate" section.')
et_type = sweep_config["early_terminate"]["type"]
if et_type == "hyperband":
return hyperband_stop_runs(runs, sweep_config, validate=validate)
else:
raise ValueError(
f'Invalid early stopping type {et_type}, must be one of ["hyperband"]'
)
| |
# encoding: utf-8
"""
Plot-related objects. A plot is known as a chart group in the MS API. A chart
can have more than one plot overlayed on each other, such as a line plot
layered over a bar plot.
"""
from __future__ import absolute_import, print_function, unicode_literals
from ..enum.chart import XL_CHART_TYPE as XL
from ..oxml.ns import qn
from ..oxml.simpletypes import ST_BarDir, ST_Grouping
from .series import SeriesCollection
from ..text import Font
from ..util import lazyproperty
class Plot(object):
"""
A distinct plot that appears in the plot area of a chart. A chart may
have more than one plot, in which case they appear as superimposed
layers, such as a line plot appearing on top of a bar chart.
"""
def __init__(self, xChart, chart):
super(Plot, self).__init__()
self._element = xChart
self._chart = chart
@property
def categories(self):
"""
A tuple containing the category strings for this plot, in the order
they appear on the category axis.
"""
xChart = self._element
category_pt_elms = xChart.cat_pts
return tuple(pt.v.text for pt in category_pt_elms)
@property
def chart(self):
"""
The |Chart| object containing this plot.
"""
return self._chart
@property
def data_labels(self):
"""
|DataLabels| instance providing properties and methods on the
collection of data labels associated with this plot.
"""
dLbls = self._element.dLbls
if dLbls is None:
raise ValueError(
'plot has no data labels, set has_data_labels = True first'
)
return DataLabels(dLbls)
@property
def has_data_labels(self):
"""
Read/write boolean, |True| if the series has data labels. Assigning
|True| causes data labels to be added to the plot. Assigning False
removes any existing data labels.
"""
return self._element.dLbls is not None
@has_data_labels.setter
def has_data_labels(self, value):
"""
Add, remove, or leave alone the ``<c:dLbls>`` child element depending
on current state and assigned *value*. If *value* is |True| and no
``<c:dLbls>`` element is present, a new default element is added with
default child elements and settings. When |False|, any existing dLbls
element is removed.
"""
if bool(value) is False:
self._element._remove_dLbls()
else:
if self._element.dLbls is None:
self._element._add_dLbls()
@lazyproperty
def series(self):
"""
A sequence of |Series| objects representing the series in this plot,
in the order they appear in the plot.
"""
return SeriesCollection(self._element)
@property
def vary_by_categories(self):
"""
Read/write boolean value specifying whether to use a different color
for each of the points in this plot. Only effective when there is
a single series; PowerPoint automatically varies color by series when
more than one series is present.
"""
varyColors = self._element.varyColors
if varyColors is None:
return True
return varyColors.val
@vary_by_categories.setter
def vary_by_categories(self, value):
self._element.get_or_add_varyColors().val = bool(value)
class AreaPlot(Plot):
"""
An area plot.
"""
class Area3DPlot(Plot):
"""
A 3-dimensional area plot.
"""
class BarPlot(Plot):
"""
A bar chart-style plot.
"""
@property
def gap_width(self):
"""
Width of gap between bar(s) of each category, as an integer
percentage of the bar width. The default value for a new bar chart is
150, representing 150% or 1.5 times the width of a single bar.
"""
gapWidth = self._element.gapWidth
if gapWidth is None:
return 150
return gapWidth.val
@gap_width.setter
def gap_width(self, value):
gapWidth = self._element.get_or_add_gapWidth()
gapWidth.val = value
@property
def overlap(self):
"""
Read/write int value in range -100..100 specifying a percentage of
the bar width by which to overlap adjacent bars in a multi-series bar
chart. Default is 0. A setting of -100 creates a gap of a full bar
width and a setting of 100 causes all the bars in a category to be
superimposed. A stacked bar plot has overlap of 100 by default.
"""
overlap = self._element.overlap
if overlap is None:
return 0
return overlap.val
@overlap.setter
def overlap(self, value):
"""
Set the value of the ``<c:overlap>`` child element to *int_value*,
or remove the overlap element if *int_value* is 0.
"""
if value == 0:
self._element._remove_overlap()
return
self._element.get_or_add_overlap().val = value
class DataLabels(object):
"""
Collection of data labels associated with a plot, and perhaps with
a series or data point, although the latter two are not yet implemented.
"""
def __init__(self, dLbls):
super(DataLabels, self).__init__()
self._element = dLbls
@lazyproperty
def font(self):
"""
The |Font| object that provides access to the text properties for
these data labels, such as bold, italic, etc.
"""
defRPr = self._element.defRPr
font = Font(defRPr)
return font
@property
def number_format(self):
"""
Read/write string specifying the format for the numbers on this set
of data labels. Returns 'General' if no number format has been set.
Note that this format string has no effect on rendered data labels
when :meth:`number_format_is_linked` is |True|. Assigning a format
string to this property automatically sets
:meth:`number_format_is_linked` to |False|.
"""
numFmt = self._element.numFmt
if numFmt is None:
return 'General'
return numFmt.formatCode
@number_format.setter
def number_format(self, value):
self._element.get_or_add_numFmt().formatCode = value
self.number_format_is_linked = False
@property
def number_format_is_linked(self):
"""
Read/write boolean specifying whether number formatting should be
taken from the source spreadsheet rather than the value of
:meth:`number_format`.
"""
numFmt = self._element.numFmt
if numFmt is None:
return True
souceLinked = numFmt.sourceLinked
if souceLinked is None:
return True
return numFmt.sourceLinked
@number_format_is_linked.setter
def number_format_is_linked(self, value):
numFmt = self._element.get_or_add_numFmt()
numFmt.sourceLinked = value
@property
def position(self):
"""
Read/write :ref:`XlDataLabelPosition` enumeration value specifying
the position of the data labels with respect to their data point, or
|None| if no position is specified. Assigning |None| causes
PowerPoint to choose the default position, which varies by chart
type.
"""
dLblPos = self._element.dLblPos
if dLblPos is None:
return None
return dLblPos.val
@position.setter
def position(self, value):
if value is None:
self._element._remove_dLblPos()
return
self._element.get_or_add_dLblPos().val = value
class LinePlot(Plot):
"""
A line chart-style plot.
"""
class PiePlot(Plot):
"""
A pie chart-style plot.
"""
def PlotFactory(xChart, chart):
"""
Return an instance of the appropriate subclass of Plot based on the
tagname of *plot_elm*.
"""
try:
PlotCls = {
qn('c:areaChart'): AreaPlot,
qn('c:area3DChart'): Area3DPlot,
qn('c:barChart'): BarPlot,
qn('c:lineChart'): LinePlot,
qn('c:pieChart'): PiePlot,
}[xChart.tag]
except KeyError:
raise ValueError('unsupported plot type %s' % xChart.tag)
return PlotCls(xChart, chart)
class PlotTypeInspector(object):
"""
"One-shot" service object that knows how to identify the type of a plot
as a member of the XL_CHART_TYPE enumeration.
"""
@classmethod
def chart_type(cls, plot):
"""
Return the member of :ref:`XlChartType` that corresponds to the chart
type of *plot*.
"""
try:
chart_type_method = {
'AreaPlot': cls._differentiate_area_chart_type,
'Area3DPlot': cls._differentiate_area_3d_chart_type,
'BarPlot': cls._differentiate_bar_chart_type,
'LinePlot': cls._differentiate_line_chart_type,
'PiePlot': cls._differentiate_pie_chart_type,
}[plot.__class__.__name__]
except KeyError:
raise NotImplementedError(
"chart_type() not implemented for %s" %
plot.__class__.__name__
)
return chart_type_method(plot)
@classmethod
def _differentiate_area_3d_chart_type(cls, plot):
return {
ST_Grouping.STANDARD: XL.THREE_D_AREA,
ST_Grouping.STACKED: XL.THREE_D_AREA_STACKED,
ST_Grouping.PERCENT_STACKED: XL.THREE_D_AREA_STACKED_100,
}[plot._element.grouping_val]
@classmethod
def _differentiate_area_chart_type(cls, plot):
return {
ST_Grouping.STANDARD: XL.AREA,
ST_Grouping.STACKED: XL.AREA_STACKED,
ST_Grouping.PERCENT_STACKED: XL.AREA_STACKED_100,
}[plot._element.grouping_val]
@classmethod
def _differentiate_bar_chart_type(cls, plot):
barChart = plot._element
if barChart.barDir.val == ST_BarDir.BAR:
return {
ST_Grouping.CLUSTERED: XL.BAR_CLUSTERED,
ST_Grouping.STACKED: XL.BAR_STACKED,
ST_Grouping.PERCENT_STACKED: XL.BAR_STACKED_100,
}[barChart.grouping_val]
if barChart.barDir.val == ST_BarDir.COL:
return {
ST_Grouping.CLUSTERED: XL.COLUMN_CLUSTERED,
ST_Grouping.STACKED: XL.COLUMN_STACKED,
ST_Grouping.PERCENT_STACKED: XL.COLUMN_STACKED_100,
}[barChart.grouping_val]
raise ValueError(
"invalid barChart.barDir value '%s'" % barChart.barDir.val
)
@classmethod
def _differentiate_line_chart_type(cls, plot):
lineChart = plot._element
if cls._has_line_markers(lineChart):
return {
ST_Grouping.STANDARD: XL.LINE_MARKERS,
ST_Grouping.STACKED: XL.LINE_MARKERS_STACKED,
ST_Grouping.PERCENT_STACKED: XL.LINE_MARKERS_STACKED_100,
}[plot._element.grouping_val]
else:
return {
ST_Grouping.STANDARD: XL.LINE,
ST_Grouping.STACKED: XL.LINE_STACKED,
ST_Grouping.PERCENT_STACKED: XL.LINE_STACKED_100,
}[plot._element.grouping_val]
@classmethod
def _has_line_markers(cls, lineChart):
none_marker_symbols = lineChart.xpath(
'./c:ser/c:marker/c:symbol[@val="none"]'
)
if none_marker_symbols:
return False
return True
@classmethod
def _differentiate_pie_chart_type(cls, plot):
pieChart = plot._element
explosion = pieChart.xpath('./c:ser/c:explosion')
return XL.PIE_EXPLODED if explosion else XL.PIE
| |
'''
encounter_db.py
Provides methods to write a list of stellar close encounters to files in a systematic way.
The provided output is meant to be later loaded in a systematic way.
Each star cluster run is saved in a directory, categorized by the cluster parameters.
To load multiple runs, `os.walk` may be used.
'''
import os
import time
import hashlib
import zlib
import json
from amuse.units import units
ENCOUNTER_FILENAME = 'encounters.txt'
CLUSTER_FILENAME = 'cluster.txt'
CLUSTER_PARAMS_FILENAME = 'cluster_params.json'
def make_run_id():
return hex(zlib.crc32(str(time.mktime(time.gmtime()))) & 0xffffffff)
# Clean up all the bodies in an encounter
class EncounterBody(object):
"""
A model for a single star of an encounter. All attributes should be in
SI units.
"""
# Set the properties of self
def __init__(self, id, mass, radius, pos, velocity):
self.id = id
self.mass = mass
self.radius = radius
self.pos = pos
self.velocity = velocity
# Copy the properties over from the particle to self
def copy_to_particle(self, particle):
particle.id = self.id
particle.mass = self.mass
particle.radius = self.radius
particle.position = self.pos
particle.velocity = self.velocity
# Convert all bodys to si units
@staticmethod
def from_particle(part, conv):
id = part.id
mass = conv.to_si(part.mass)
radius = conv.to_si(part.radius)
pos = [
conv.to_si(part.x),
conv.to_si(part.y),
conv.to_si(part.z)
]
velocity = [
conv.to_si(part.vx),
conv.to_si(part.vy),
conv.to_si(part.vz)
]
return EncounterBody(id, mass, radius, pos, velocity)
def __repr__(self):
return '<Body {0}: mass={1}>'.format(self.id, self.mass)
class OrbitParams(object):
"""
A model for binary orbital parameters of an encounter.
All units should be in SI.
"""
def __init__(self, M, a, e, r, E, t):
self.M = M
self.a = a
self.e = e
self.r = r
self.E = E
self.t = t
# calulate peri and apo
@property
def peri(self):
return abs(self.a* (1.0 - self.e)).as_quantity_in(units.AU)
@property
def apo(self):
return abs(self.a * (1.0 + self.e)).as_quantity_in(units.AU)
# convert parameters to si
@staticmethod
def from_nbody_params(M, a, e, r, E, t, conv):
return OrbitParams(
conv.to_si(M),
conv.to_si(a),
conv.to_si(e),
conv.to_si(r),
conv.to_si(E),
conv.to_si(t))
# Records all relevant data of an encounter
class Encounter(object):
"""
All data for a single encounter.
"""
def __init__(self, bodies, orbit_params, time, id=None):
self.bodies = bodies
self.orbit_params = orbit_params
self.time = time
self.id = id
# Return time, ids, object (star planet), and orbital params
def __repr__(self):
return \
'''<Encounter @ t={0}
\tperi={5}, tapo={6}, r_init={7}, ecc={8}
\tBody {1}: {3}
\tBody {2}: {4}
>''' \
.format(self.time.value_in(units.Myr), self.bodies[0].id,
self.bodies[1].id, self.bodies[0], self.bodies[1],
self.orbit_params.peri, self.orbit_params.apo, self.orbit_params.r,
self.orbit_params.e)
# This class establishes how many n_bodies were in a cluster and what time the cluster ended
class ClusterParameters(object):
def __init__(self, n_bodies, t_end):
self.n_bodies = n_bodies
self.t_end = t_end
# This finds the directory the cluster was stored in
def get_dir_string(self):
return 'king_n={0}'.format(self.n_bodies)
class EncounterDbWriter(object):
"""
Writer class for emitting encounter records in the encounter db.
A single EncounterDbWriter instance is intended to write all data for
a given cluster.
"""
def __init__(self, root_directory, cluster_params):
self.root_directory = root_directory
self.run_id = make_run_id()
self.run_id_str = EncounterDbWriter.make_run_id_string(self.run_id)
self.directory = self.root_directory
self.cluster_params = cluster_params
if cluster_params is not None:
cluster_params_dir = cluster_params.get_dir_string()
self.directory = os.path.join(self.root_directory, cluster_params_dir)
self.directory = os.path.join(self.directory, self.run_id_str)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.encounter_filename = os.path.join(self.directory, ENCOUNTER_FILENAME)
self.encounter_file = open(self.encounter_filename, 'w')
self.next_encounter_id = 0
def output_directory_root(self):
return self.directory
@staticmethod
def make_run_id_string(run_id):
date_part = time.strftime('%Y_%m_%d-%H%M%S')
return '{0}_{1}'.format(date_part, run_id)
def finalize(self, end_time, expected_end, ex=None):
param_obj = {
'end_time': end_time.number,
'target_time': expected_end.number,
'num_bodies': self.cluster_params.n_bodies,
'exception': None
}
if ex is not None:
param_obj['exception'] = ex
filename = os.path.join(self.directory, CLUSTER_PARAMS_FILENAME)
with open(filename, 'w') as json_file:
json_file.write(json.dumps(param_obj))
self.encounter_file.close()
# Write out key information about the cluster
def write_cluster(self, stars, conv):
cluster_filename = os.path.join(self.directory, CLUSTER_FILENAME)
with open(cluster_filename, 'w') as f:
for star in stars:
fields = [
star.id,
conv.to_si(star.mass).value_in(units.MSun),
conv.to_si(star.radius).value_in(units.RSun),
conv.to_si(star.x).value_in(units.AU),
conv.to_si(star.y).value_in(units.AU),
conv.to_si(star.z).value_in(units.AU),
conv.to_si(star.vx).value_in(units.kms),
conv.to_si(star.vy).value_in(units.kms),
conv.to_si(star.vz).value_in(units.kms)
]
f.write('\t'.join([str(x) for x in fields]) + '\n')
# Write out information about the encounters
def write_encounter(self, encounter, conv):
if encounter.id is None:
encounter.id = self.next_encounter_id
self.next_encounter_id += 1
orbit = encounter.orbit_params
fields = [
encounter.time.value_in(units.Myr),
orbit.peri.value_in(units.AU),
orbit.apo.value_in(units.AU),
orbit.r.value_in(units.AU),
orbit.e,
orbit.a.value_in(units.AU),
orbit.M.value_in(units.MSun),
orbit.E.value_in(units.m**2 / units.s**2),
self.next_encounter_id
]
for star in encounter.bodies:
star_params = [
star.mass.value_in(units.MSun),
star.radius.value_in(units.km),
star.pos[0].value_in(units.AU),
star.pos[1].value_in(units.AU),
star.pos[2].value_in(units.AU),
star.velocity[0].value_in(units.kms),
star.velocity[1].value_in(units.kms),
star.velocity[2].value_in(units.kms),
star.id
]
fields.extend(star_params)
self.encounter_file.write(
'\t'.join([str(x) for x in fields]) + '\n')
self.encounter_file.flush()
class EncounterDbReader(object):
BODY_IDX_START = 9
BODY_IDX_SIZE = 9
def __init__(self, directory):
self.directory = directory
if not os.path.exists(self.directory):
raise IOError('Specified encounter directory does not exist')
def encounters(self):
encounter_filename = os.path.join(self.directory, ENCOUNTER_FILENAME)
with open(encounter_filename, 'r') as f:
for line in f:
fields = line.split('\t')
orbit = self._parse_orbital_params(fields)
star1 = self._parse_body(fields, 0)
star2 = self._parse_body(fields, 1)
time = float(fields[0]) | units.Myr
encounter_id = int(fields[8])
yield Encounter([star1, star2], orbit, time, encounter_id)
# Retrieve params from a cluster
def get_cluster_details(self):
details_filename = os.path.join(self.directory, CLUSTER_PARAMS_FILENAME)
with open(details_filename, 'r') as f:
params = json.load(f)
return params
# Retrieve information about the stars in the cluster
def cluster_stars(self):
cluster_filename = os.path.join(self.directory, CLUSTER_FILENAME)
with open(cluster_filename, 'r') as f:
for line in f:
fields = line.split('\t')
id = fields[0],
mass = float(fields[1]) | units.MSun
radius = float(fields[2]) | units.AU
pos = [float(x) | units.AU for x in fields[3:6]]
velocity = [float(x) | units.kms for x in fields[6:9]]
yield EncounterBody(id, mass, radius, pos, velocity)
# Retrieve orbital params
def _parse_orbital_params(self, fields):
time = float(fields[0]) | units.Myr
r = float(fields[3]) | units.AU
e = float(fields[4])
a = float(fields[5]) | units.AU
M = float(fields[6]) | units.MSun
E = float(fields[7]) | (units.m**2 / units.s**2)
return OrbitParams(M, a, e, r, E, time)
# Sets different attributes their correct datatype then calls EncounterBody
def _parse_body(self, fields, body_idx):
idx_base = self.BODY_IDX_START + body_idx*self.BODY_IDX_SIZE
id = str(fields[idx_base+8]).strip()
mass = float(fields[idx_base+0]) | units.MSun
radius = float(fields[idx_base+1]) | units.km
pos = [float(fields[idx_base+i]) | units.AU for i in [2, 3, 4]]
velocity = [float(fields[idx_base+i]) | units.kms for i in [5, 6, 7]]
return EncounterBody(id, mass, radius, pos, velocity)
| |
#!/usr/bin/env python
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
-------------------------------------------------------------------------------
Usage: singletest.py [options]
This script allows you to run mbed defined test cases for particular MCU(s)
and corresponding toolchain(s).
Options:
-h, --help show this help message and exit
-i FILE, --tests=FILE
Points to file with test specification
-M FILE, --MUTS=FILE Points to file with MUTs specification (overwrites
settings.py and private_settings.py)
-g, --goanna-for-tests
Run Goanna static analyse tool for tests
-G, --goanna-for-sdk Run Goanna static analyse tool for mbed SDK
-s, --suppress-summary
Suppresses display of wellformatted table with test
results
-v, --verbose Verbose mode (pronts some extra information)
Example: singletest.py -i test_spec.json -M muts_all.json
-------------------------------------------------------------------------------
File format example: test_spec.json
{
"targets": {
"KL46Z": ["ARM", "GCC_ARM"],
"LPC1768": ["ARM", "GCC_ARM", "GCC_CR", "GCC_CS", "IAR"],
"LPC11U24": ["uARM"],
"NRF51822": ["ARM"]
}
}
File format example: muts_all.json
{
"1" : {"mcu": "LPC1768",
"port":"COM4", "disk":"J:\\",
"peripherals": ["TMP102", "digital_loop", "port_loop", "analog_loop", "SD"]
},
"2" : {"mcu": "KL25Z",
"port":"COM7", "disk":"G:\\",
"peripherals": ["digital_loop", "port_loop", "analog_loop"]
}
}
"""
# Check if 'prettytable' module is installed
try:
from prettytable import PrettyTable
except ImportError, e:
print "Error: Can't import 'prettytable' module: %s"% e
exit(-1)
# Check if 'serial' module is installed
try:
from serial import Serial
except ImportError, e:
print "Error: Can't import 'serial' module: %s"% e
exit(-1)
import sys
import json
import optparse
import pprint
import re
import os
from types import ListType
from os.path import join, abspath, dirname, exists, basename
from shutil import copy
from subprocess import call
from time import sleep, time
from subprocess import Popen, PIPE
from threading import Thread
from Queue import Queue, Empty
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
# Imports related to mbed build pi
from workspace_tools.build_api import build_project, build_mbed_libs, build_lib
from workspace_tools.build_api import mcu_toolchain_matrix
from workspace_tools.build_api import get_unique_supported_toolchains
from workspace_tools.build_api import get_target_supported_toolchains
from workspace_tools.paths import BUILD_DIR
from workspace_tools.paths import HOST_TESTS
from workspace_tools.targets import TARGET_MAP
from workspace_tools.tests import TEST_MAP
from workspace_tools.tests import TESTS
from workspace_tools.libraries import LIBRARIES, LIBRARY_MAP
# Be sure that the tools directory is in the search path
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
# Imports related to mbed build pi
from workspace_tools.settings import MUTs
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception, _:
pass
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs."""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"end" : TEST_RESULT_UNDEF}
def __init__(self, _global_loops_count=1, _test_loops_list=""):
pattern = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(pattern)
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count} """
result = {}
if test_loops_str:
test_loops = test_loops_str.split(',')
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value. """
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def file_copy_method_selector(self, image_path, disk, copy_method):
""" Copy file depending on method you want to use. Handles exception
and return code from shell copy commands. """
result = True
resutl_msg = ""
if copy_method == "cp" or copy_method == "copy" or copy_method == "xcopy":
cmd = [copy_method,
image_path.encode('ascii', 'ignore'),
disk.encode('ascii', 'ignore') + basename(image_path).encode('ascii', 'ignore')]
try:
ret = call(cmd, shell=True)
if ret:
resutl_msg = "Return code: %d. Command: "% ret + " ".join(cmd)
result = False
except Exception, e:
resutl_msg = e
result = False
else:
copy_method = "shutils.copy()"
# Default python method
try:
copy(image_path, disk)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg, copy_method
def delete_file(file_path):
""" Remove file from the system """
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception, e:
resutl_msg = e
result = False
return result, resutl_msg
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target. Test is being invoked afterwards. """
data = json.loads(test_spec)
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
# Find a suitable MUT:
mut = None
for id, m in MUTs.iteritems():
if m['mcu'] == data['mcu']:
mut = m
break
if mut is None:
print "Error: No mbed available: mut[%s]" % data['mcu']
return
disk = mut['disk']
port = mut['port']
target_by_mcu = TARGET_MAP[mut['mcu']]
# Program
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, )
image_path = image
if not exists(image_path):
print "Error: Image file does not exist: %s" % image_path
elapsed_time = 0
test_result = "{error}"
return (test_result, target_name, toolchain_name,
test_id, test_description, round(elapsed_time, 2), duration)
# Program MUT with proper image file
if not disk.endswith('/') and not disk.endswith('\\'):
disk += '/'
# Tests can be looped so test results must be stored for the same test
test_all_result = []
for test_index in range(test_loops):
# Choose one method of copy files to mbed virtual drive
_copy_res, _err_msg, _copy_method = self.file_copy_method_selector(image_path, disk, opts.copy_method)
# Host test execution
start_host_exec_time = time()
if not _copy_res: # Serial port copy error
test_result = "IOERR_COPY"
print "Error: Copy method '%s'. %s"% (_copy_method, _err_msg)
else:
# Copy Extra Files
if not target_by_mcu.is_disk_virtual and test.extra_files:
for f in test.extra_files:
copy(f, disk)
sleep(target_by_mcu.program_cycle_s())
# Host test execution
start_host_exec_time = time()
test_result = self.run_host_test(test.host_test, disk, port, duration, opts.verbose)
test_all_result.append(test_result)
elapsed_time = time() - start_host_exec_time
print print_test_result(test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration)
return (self.shape_global_test_loop_result(test_all_result), target_name, toolchain_name,
test_id, test_description, round(elapsed_time, 2),
duration, self.shape_test_loop_ok_result_count(test_all_result))
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string """
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result):
""" Reformats list of results to simple string """
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
return result
def run_host_test(self, name, disk, port, duration, verbose=False, extra_serial=""):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution."""
# print "{%s} port:%s disk:%s" % (name, port, disk),
cmd = ["python", "%s.py" % name, '-p', port, '-d', disk, '-t', str(duration), "-e", extra_serial]
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
start = time()
line = ''
output = []
while (time() - start) < duration:
try:
c = obs.queue.get(block=True, timeout=1)
except Empty, _:
c = None
if c:
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
if '{end}' in line: break
line = ''
else:
line += c
# Stop test process
obs.stop()
# Handle verbose mode
if verbose:
print "Test::Output::Start"
print "".join(output)
print "Test::Output::Finish"
# Parse test 'output' data
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def is_peripherals_available(target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case."""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in MUTs.iteritems():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def print_test_result(test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data."""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return result
def shape_test_request(mcu, image_path, test_id, duration=10):
""" Function prepares JOSN structure describing test specification."""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure """
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print "Error in '%s' file: %s" % (json_spec_filename, json_error_msg)
except IOError as fileopen_error_msg:
print "Error: %s" % (fileopen_error_msg)
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", "):
""" Prints MUTs configuration passed to test script for verboseness. """
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for property in mut_info:
if property not in muts_info_cols:
muts_info_cols.append(property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if type(cell_val) == ListType:
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness. """
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in toolchains_info_cols:
# Check for conflicts
conflict = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts: # Print conflicts if the exist
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = ", ".join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=','):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases. """
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id', 'automated', 'description', 'peripherals', 'host_test', 'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in TEST_MAP:
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if type(test[col]) == ListType:
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if (saturation > 0):
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names """
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary """
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def generate_test_summary_by_target(test_summary):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix """
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = ""
result_dict = {} # test : { toolchain : result }
for target in unique_targets:
result = "Test summary:\n"
for test in test_summary:
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = { }
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_toolchains
pt = PrettyTable(pt_cols)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
test_results = result_dict[test]
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
result += "\n"
return result
def generate_test_summary(test_summary):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across """
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"])
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {single_test.TEST_RESULT_OK : 0,
single_test.TEST_RESULT_FAIL : 0,
single_test.TEST_RESULT_ERROR : 0,
single_test.TEST_RESULT_UNDEF : 0,
single_test.TEST_RESULT_IOERR_COPY : 0,
single_test.TEST_RESULT_IOERR_DISK : 0,
single_test.TEST_RESULT_IOERR_SERIAL : 0,
single_test.TEST_RESULT_TIMEOUT : 0 }
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.iteritems()])
result += "\n"
return result
if __name__ == '__main__':
# Command line options
parser = optparse.OptionParser()
parser.add_option('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
help='Points to file with test specification')
parser.add_option('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_option('-g', '--goanna-for-tests',
dest='goanna_for_tests',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for tests')
parser.add_option('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
metavar=False,
action="store_true",
help='Run Goanna static analyse tool for mbed SDK')
parser.add_option('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_option('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_option('-r', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_option('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_option('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_option('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests.')
parser.add_option('-c', '--copy-method',
dest='copy_method',
help="You can choose which copy method you want to use put bin in mbed. You can choose from 'cp', 'copy', 'xcopy'. Default is python shutils.copy method.")
parser.add_option('-n', '--test-by-names',
dest='test_by_names',
help='Runs only test enumerated it this switch')
parser.add_option("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_option("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_option('', '--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_option('', '--loops',
dest='test_loops_list',
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_option('', '--global-loops',
dest='test_global_loops_value',
help='Set global number of test loops per test. Default value is set 1')
parser.add_option('', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. E.g. you can call all test binaries firmware.bin')
parser.add_option('', '--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.description = """This script allows you to run mbed defined test cases for particular MCU(s) and corresponding toolchain(s)."""
parser.epilog = """Example: singletest.py -i test_spec.json -M muts_all.json"""
(opts, args) = parser.parse_args()
# Print summary / information about automation test status
if opts.test_automation_report:
print get_avail_tests_summary_table()
exit(0)
# Print summary / information about automation test status
if opts.test_case_report:
test_case_report_cols = ['id', 'automated', 'description', 'peripherals', 'host_test', 'duration', 'source_dir']
print get_avail_tests_summary_table(cols=test_case_report_cols, result_summary=False, join_delim='\n')
exit(0)
# Only prints matrix of supported toolchains
if opts.supported_toolchains:
mcu_toolchain_matrix()
exit(0)
# Open file with test specification
# test_spec_filename tells script which targets and their toolchain(s)
# should be covered by the test scenario
test_spec = get_json_data_from_file(opts.test_spec_filename) if opts.test_spec_filename else None
if test_spec is None:
parser.print_help()
exit(-1)
# Get extra MUTs if applicable
if opts.muts_spec_filename:
MUTs = get_json_data_from_file(opts.muts_spec_filename)
if MUTs is None:
parser.print_help()
exit(-1)
# Only prints read MUTs configuration
if MUTs and opts.verbose_test_configuration_only:
print "MUTs configuration in %s:"% opts.muts_spec_filename
print print_muts_configuration_from_json(MUTs)
print
print "Test specification in %s:"% opts.test_spec_filename
print print_test_configuration_from_json(test_spec)
exit(0)
# Verbose test specification and MUTs configuration
if MUTs and opts.verbose:
print print_muts_configuration_from_json(MUTs)
if test_spec and opts.verbose:
print print_test_configuration_from_json(test_spec)
# Magic happens here... ;)
start = time()
single_test = SingleTestRunner(_global_loops_count=opts.test_global_loops_value, _test_loops_list=opts.test_loops_list)
clean = test_spec.get('clean', False)
test_ids = test_spec.get('test_ids', [])
groups = test_spec.get('test_groups', [])
# Here we store test results
test_summary = []
for target, toolchains in test_spec['targets'].iteritems():
for toolchain in toolchains:
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print 'Skipped tests for %s target. Target platform not found' % (target)
continue
T = TARGET_MAP[target]
build_mbed_libs_options = ["analyze"] if opts.goanna_for_mbed_sdk else None
build_mbed_libs_result = build_mbed_libs(T, toolchain, options=build_mbed_libs_options)
if not build_mbed_libs_result:
print 'Skipped tests for %s target. Toolchain %s is not yet supported for this target' % (T.name, toolchain)
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
for test_id, test in TEST_MAP.iteritems():
if opts.test_by_names and test_id not in opts.test_by_names.split(','):
continue
if test_ids and test_id not in test_ids:
continue
if opts.test_only_peripheral and not test.peripherals:
if opts.verbose_skipped_tests:
print "TargetTest::%s::NotPeripheralTestSkipped()" % (target)
continue
if opts.test_only_common and test.peripherals:
if opts.verbose_skipped_tests:
print "TargetTest::%s::PeripheralTestSkipped()" % (target)
continue
if test.automated and test.is_supported(target, toolchain):
if not is_peripherals_available(target, test.peripherals):
if opts.verbose_skipped_tests:
test_peripherals = test.peripherals if test.peripherals else []
print "TargetTest::%s::TestSkipped(%s)" % (target, ",".join(test_peripherals))
continue
# This is basic structure storing test results
test_result = {
'target': target,
'toolchain': toolchain,
'test_id': test_id,
}
build_project_options = ["analyze"] if opts.goanna_for_tests else None
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
libraries = []
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies:
libraries.append(lib['id'])
# Build libs for test
for lib_id in libraries:
build_lib(lib_id, T, toolchain, options=build_project_options,
verbose=opts.verbose, clean=clean)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
project_name = opts.firmware_global_name if opts.firmware_global_name else None
path = build_project(test.source_dir, join(build_dir, test_id),
T, toolchain, test.dependencies,
options=build_project_options,
clean=clean,
verbose=opts.verbose,
name=project_name,
macros=MACROS,
inc_dirs=INC_DIRS)
test_result_cache = join(dirname(path), "test_result.json")
if opts.only_build_tests:
# We are skipping testing phase, and suppress summary
opts.suppress_summary = True
continue
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = shape_test_request(target, path, test_id, test.duration)
test_loops = single_test.get_test_loop_count(test_id)
single_test_result = single_test.handle(test_spec, target, toolchain, test_loops=test_loops)
test_summary.append(single_test_result)
# print test_spec, target, toolchain
elapsed_time = time() - start
# Human readable summary
if not opts.suppress_summary:
# prints well-formed summary with results (SQL table like)
print generate_test_summary(test_summary)
if opts.test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print generate_test_summary_by_target(test_summary)
print "Completed in %d sec" % (time() - start)
| |
# -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
""" This module is responsible to run the analyzer commands. """
import re
import os
import os.path
import tempfile
import functools
import subprocess
import logging
from libscanbuild.compilation import classify_source, compiler_language
from libscanbuild.clang import get_version, get_arguments
from libscanbuild.shell import decode
__all__ = ['run']
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
def decorator(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
for key in required:
if key not in args[0]:
raise KeyError('{0} not passed to {1}'.format(
key, function.__name__))
return function(*args, **kwargs)
return wrapper
return decorator
@require(['command', # entry from compilation database
'directory', # entry from compilation database
'file', # entry from compilation database
'clang', # clang executable name (and path)
'direct_args', # arguments from command line
'force_debug', # kill non debug macros
'output_dir', # where generated report files shall go
'output_format', # it's 'plist' or 'html' or both
'output_failures']) # generate crash reports or not
def run(opts):
""" Entry point to run (or not) static analyzer against a single entry
of the compilation database.
This complex task is decomposed into smaller methods which are calling
each other in chain. If the analyzis is not possibe the given method
just return and break the chain.
The passed parameter is a python dictionary. Each method first check
that the needed parameters received. (This is done by the 'require'
decorator. It's like an 'assert' to check the contract between the
caller and the called method.) """
try:
command = opts.pop('command')
command = command if isinstance(command, list) else decode(command)
logging.debug("Run analyzer against '%s'", command)
opts.update(classify_parameters(command))
return arch_check(opts)
except Exception:
logging.error("Problem occured during analyzis.", exc_info=1)
return None
@require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language',
'error_type', 'error_output', 'exit_code'])
def report_failure(opts):
""" Create report when analyzer failed.
The major report is the preprocessor output. The output filename generated
randomly. The compiler output also captured into '.stderr.txt' file.
And some more execution context also saved into '.info.txt' file. """
def extension(opts):
""" Generate preprocessor file extension. """
mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'}
return mapping.get(opts['language'], '.i')
def destination(opts):
""" Creates failures directory if not exits yet. """
name = os.path.join(opts['output_dir'], 'failures')
if not os.path.isdir(name):
os.makedirs(name)
return name
error = opts['error_type']
(handle, name) = tempfile.mkstemp(suffix=extension(opts),
prefix='clang_' + error + '_',
dir=destination(opts))
os.close(handle)
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '-fsyntax-only', '-E'] +
opts['flags'] + [opts['file'], '-o', name], cwd)
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
subprocess.call(cmd, cwd=cwd)
# write general information about the crash
with open(name + '.info.txt', 'w') as handle:
handle.write(opts['file'] + os.linesep)
handle.write(error.title().replace('_', ' ') + os.linesep)
handle.write(' '.join(cmd) + os.linesep)
handle.write(' '.join(os.uname()) + os.linesep)
handle.write(get_version(opts['clang']))
handle.close()
# write the captured output too
with open(name + '.stderr.txt', 'w') as handle:
handle.writelines(opts['error_output'])
handle.close()
# return with the previous step exit code and output
return {
'error_output': opts['error_output'],
'exit_code': opts['exit_code']
}
@require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir',
'output_format'])
def run_analyzer(opts, continuation=report_failure):
""" It assembles the analysis command line and executes it. Capture the
output of the analysis and returns with it. If failure reports are
requested, it calls the continuation to generate it. """
def output():
""" Creates output file name for reports. """
if opts['output_format'] in {'plist', 'plist-html'}:
(handle, name) = tempfile.mkstemp(prefix='report-',
suffix='.plist',
dir=opts['output_dir'])
os.close(handle)
return name
return opts['output_dir']
cwd = opts['directory']
cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] +
opts['flags'] + [opts['file'], '-o', output()],
cwd)
logging.debug('exec command in %s: %s', cwd, ' '.join(cmd))
child = subprocess.Popen(cmd,
cwd=cwd,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = child.stdout.readlines()
child.stdout.close()
# do report details if it were asked
child.wait()
if opts.get('output_failures', False) and child.returncode:
error_type = 'crash' if child.returncode & 127 else 'other_error'
opts.update({
'error_type': error_type,
'error_output': output,
'exit_code': child.returncode
})
return continuation(opts)
# return the output for logging and exit code for testing
return {'error_output': output, 'exit_code': child.returncode}
@require(['flags', 'force_debug'])
def filter_debug_flags(opts, continuation=run_analyzer):
""" Filter out nondebug macros when requested. """
if opts.pop('force_debug'):
# lazy implementation just append an undefine macro at the end
opts.update({'flags': opts['flags'] + ['-UNDEBUG']})
return continuation(opts)
@require(['file', 'directory'])
def set_file_path_relative(opts, continuation=filter_debug_flags):
""" Set source file path to relative to the working directory.
The only purpose of this function is to pass the SATestBuild.py tests. """
opts.update({'file': os.path.relpath(opts['file'], opts['directory'])})
return continuation(opts)
@require(['language', 'compiler', 'file', 'flags'])
def language_check(opts, continuation=set_file_path_relative):
""" Find out the language from command line parameters or file name
extension. The decision also influenced by the compiler invocation. """
accepted = frozenset({
'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output',
'c++-cpp-output', 'objective-c-cpp-output'
})
# language can be given as a parameter...
language = opts.pop('language')
compiler = opts.pop('compiler')
# ... or find out from source file extension
if language is None and compiler is not None:
language = classify_source(opts['file'], compiler == 'c')
if language is None:
logging.debug('skip analysis, language not known')
return None
elif language not in accepted:
logging.debug('skip analysis, language not supported')
return None
else:
logging.debug('analysis, language: %s', language)
opts.update({'language': language,
'flags': ['-x', language] + opts['flags']})
return continuation(opts)
@require(['arch_list', 'flags'])
def arch_check(opts, continuation=language_check):
""" Do run analyzer through one of the given architectures. """
disabled = frozenset({'ppc', 'ppc64'})
received_list = opts.pop('arch_list')
if received_list:
# filter out disabled architectures and -arch switches
filtered_list = [a for a in received_list if a not in disabled]
if filtered_list:
# There should be only one arch given (or the same multiple
# times). If there are multiple arch are given and are not
# the same, those should not change the pre-processing step.
# But that's the only pass we have before run the analyzer.
current = filtered_list.pop()
logging.debug('analysis, on arch: %s', current)
opts.update({'flags': ['-arch', current] + opts['flags']})
return continuation(opts)
else:
logging.debug('skip analysis, found not supported arch')
return None
else:
logging.debug('analysis, on default arch')
return continuation(opts)
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| |
# Copyright (c) 2010 Friedrich Romstedt <friedrichromstedt@gmail.com>
# See also <www.friedrichromstedt.org> (if e-mail has changed)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Developed since: May 2010
"""The FigureCanvasTk class draws a matplotlayers.backend.PIL.FigureCanvas to
Tkinter via a Tkinter.Canvas instance.
To actually control some Stack, a matplotlayers.backends.StackCanvas must be
created, and this StackCanvas has to be added to the FigureCanvas as a client
via figure_canvas.register(stack_canvas). This will forward all the mouse
events to the stack_canvas, making it controllable. The benefit of this
approach is, that several StackCanvases can be controlled by the same
FigureCanvas, thus making panning and zooming several Axes in a Figure easy."""
import Tkinter
import PIL.ImageTk
import matplotlayers.backends.tk # for .has_mainloop
import matplotlayers.backends.tk.stack_settings
import matplotlayers.backends.tk.figure_settings
class FigureCanvasTk:
def __init__(self, master,
figure,
shape = None,
tk_canvas = None):
"""figure is a matplotlib.figure.Figure instance. SHAPE is the extent
of the Tkinter.Canvas if a new one is created. TK_CANVAS can be used
to hand over an already-existing Tkinter.Canvas to draw upon. If a
new Tkinter.Canvas is created, it is .pack()ed with arguments
expand=True and fill=Tkinter.BOTH. The default SHAPE is (400, 400).
MASTER is only used when a new Tkinter.Canvas is created."""
if shape is None:
shape = (400, 400)
# Initialise attributes ...
self.figure = figure
self.PIL_canvas = matplotlayers.backends.PIL.FigureCanvasPIL(figure)
self.pixelsize = None
(self.photoimage, self.photoimage_tag) = (None, None)
# Create Tkinter.Canvas if necessary ...
if tk_canvas is None:
self.tk_canvas = Tkinter.Canvas(master,
highlightthickness=0,
background='white',
width=shape[0],
height=shape[1])
# Initialise client registry ...
self.clients = []
# Bind methods ...
# Windows: Use Buttons 1 & 3 (at least with my 3-button mouse)
# Mac: Use Buttons 1 & 2 (at least with my MacBook)
# => So we bind to both buttons 2 & 3 for the right-click.
self.tk_canvas.bind('<Configure>', self.tk_configure, add=True)
self.tk_canvas.bind('<ButtonPress-1>', self.tk_start_zoom, add=True)
self.tk_canvas.bind('<ButtonPress-2>', self.tk_start_pan, add=True)
self.tk_canvas.bind('<ButtonPress-3>', self.tk_start_pan, add=True)
self.tk_canvas.bind('<ButtonRelease-1>', self.tk_stop_zoom, add=True)
self.tk_canvas.bind('<ButtonRelease-2>', self.tk_stop_pan, add=True)
self.tk_canvas.bind('<ButtonRelease-3>', self.tk_stop_pan, add=True)
self.tk_canvas.bind('<Motion>', self.tk_motion, add=True)
self.tk_canvas.bind('<Double-Button-1>', self.tk_show_figure_settings,
add=True)
self.tk_canvas.bind('<Double-Button-2>', self.tk_show_stack_settings,
add=True)
self.tk_canvas.bind('<Double-Button-3>', self.tk_show_stack_settings,
add=True)
# Pack Tkinter.Canvas if newly created ...
if tk_canvas is None:
self.tk_canvas.pack(expand=True, fill=Tkinter.BOTH)
#
# Conversion method ...
#
def pixelcoords2figurecoords(self, (pixelx, pixely)):
"""Convert the canvas-relative pixel coordinate (PIXELX, PIXELY) to
figure-relative float coordinate (figurex, figurey). PIXELY is zero
at the top, figurey is zero at the bottom of the figure."""
return (float(pixelx) / self.pixelsize[0],
1 - float(pixely) / self.pixelsize[1])
#
# Client registry ...
#
def register(self, client):
"""Register the client CLIENT to the canvas."""
if client not in self.clients:
self.clients.append(client)
def unregister(self, client):
"""Unregister the client CLIENT from the canvas."""
if client in self.clients:
self.clients.remove(client)
#
# Tk callbacks ...
#
def tk_configure(self, event):
"""Called upon reconfiguration of the .tk_canvas ."""
self.pixelsize = (event.width, event.height)
self.update()
def tk_autozoom(self, event):
"""Called upon activation of autozooming."""
figurecoords = self.pixelcoords2figurecoords((event.x, event.y))
for client in self.clients:
if client.event_location_applies(figurecoords):
client.autozoom()
self.update()
def tk_start_zoom(self, event):
"""Called upon start of zooming."""
figurecoords = self.pixelcoords2figurecoords((event.x, event.y))
for client in self.clients:
if client.event_location_applies(figurecoords):
client.start_zoom(figurecoords)
self.update()
def tk_start_pan(self, event):
"""Called upon start of panning."""
figurecoords = self.pixelcoords2figurecoords((event.x, event.y))
for client in self.clients:
if client.event_location_applies(figurecoords):
client.start_pan(figurecoords)
self.update()
def tk_stop_zoom(self, event):
"""Called upon stop of zooming."""
for client in self.clients:
client.stop_zoom()
self.update()
def tk_stop_pan(self, event):
"""Called upon stop of panning."""
for client in self.clients:
client.stop_pan()
self.update()
def tk_show_stack_settings(self, event):
"""Called when the settings dialog shall be shown."""
figurecoords = self.pixelcoords2figurecoords((event.x, event.y))
for client in self.clients:
if client.event_location_applies(figurecoords):
# Create dialog:
matplotlayers.backends.tk.stack_settings.StackSettings(
self.tk_canvas, client.stack, self.update)
def tk_show_figure_settings(self, event):
"""Called when the Figure settings dialog shall be shown."""
# Create dialog:
matplotlayers.backends.tk.figure_settings.FigureSettings(
self.tk_canvas, self.figure)
def tk_motion(self, event):
"""Called when the cursor is moved."""
figurecoords = self.pixelcoords2figurecoords((event.x, event.y))
for client in self.clients:
# Pass motion events on the all clients. Once the start event
# did apply to a client, also motion events at locations where
# start events would not apply, shall be passed on.
client.motion(figurecoords)
self.update()
#
# Update method ...
#
def update(self):
"""Redraws the figure."""
if self.pixelsize is None:
# If this is called before .tk_configure() was initialised we
# ignore the call silently.
return
# Retrieve the image.
image = self.PIL_canvas.output_PIL(self.pixelsize)
# Store the old photoimage attributes before overwriting them.
(old_photoimage, old_photoimage_tag) = \
(self.photoimage, self.photoimage_tag)
# Create the new photoimage.
self.photoimage = PIL.ImageTk.PhotoImage(image)
# Put it on the Canvas before deleting the old one. This avoids
# flickering.
self.photoimage_tag = self.tk_canvas.create_image((0, 0),
image = self.photoimage, anchor='nw')
if old_photoimage is not None and old_photoimage_tag is not None:
# If there was a previous image, remove it from the Canvas.
self.tk_canvas.delete(old_photoimage_tag)
if not matplotlayers.backends.tk.has_mainloop:
# If we /have/ a mainloop, we /must/ avoid calling .update().
# This showed up when having a mainloop, crashing the program ...
# It hung up in irregular widely spread time intervals. When
# avoiding the .update() call in this case, everything worked like
# a charm again.
self.tk_canvas.update()
#
# Tk Destroy method ...
#
def destroy(self):
"""Destroys the Canvas associated with this FigureCanvas. The Canvas
is destroyed irrespective of whether it was created in .__init__() or
handed over by the user."""
if self.photoimage is not None and self.photoimage_tag is not None:
# Remove the image from the Canvas if it exists.
self.tk_canvas.delete(self.photoimage_tag)
# Destroy the drawing Canvas.
self.tk_canvas.destroy()
| |
#this program corresponds to special.py
from decimal import Decimal
import types
from numpy.testing import *
import scipy.signal as signal
from scipy.signal import lfilter, correlate, convolve, convolve2d
from numpy import array, arange
import numpy as np
class _TestConvolve(TestCase):
def test_basic(self):
a = [3,4,5,6,5,4]
b = [1,2,3]
c = convolve(a,b, old_behavior=self.old_behavior)
assert_array_equal(c,array([3,10,22,28,32,32,23,12]))
def test_complex(self):
x = array([1+1j, 2+1j, 3+1j])
y = array([1+1j, 2+1j])
z = convolve(x, y,old_behavior=self.old_behavior)
assert_array_equal(z, array([2j, 2+6j, 5+8j, 5+5j]))
def test_zero_order(self):
a = 1289
b = 4567
c = convolve(a,b,old_behavior=self.old_behavior)
assert_array_equal(c,a*b)
def test_2d_arrays(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve(a,b,old_behavior=self.old_behavior)
d = array( [[2 ,7 ,16,17,12],\
[10,30,62,58,38],\
[12,31,58,49,30]])
assert_array_equal(c,d)
def test_valid_mode(self):
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
c = convolve(a,b,'valid',old_behavior=self.old_behavior)
assert_array_equal(c,array([70,78,73,65]))
class OldTestConvolve(_TestConvolve):
old_behavior = True
@dec.deprecated()
def test_basic(self):
_TestConvolve.test_basic(self)
@dec.deprecated()
def test_complex(self):
_TestConvolve.test_complex(self)
@dec.deprecated()
def test_2d_arrays(self):
_TestConvolve.test_2d_arrays(self)
@dec.deprecated()
def test_same_mode(self):
_TestConvolve.test_same_mode(self)
@dec.deprecated()
def test_valid_mode(self):
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
c = convolve(a,b,'valid',old_behavior=self.old_behavior)
assert_array_equal(c,array([70,78,73,65]))
@dec.deprecated()
def test_same_mode(self):
a = [1,2,3,3,1,2]
b = [1,4,3,4,5,6,7,4,3,2,1,1,3]
c = convolve(a,b,'same',old_behavior=self.old_behavior)
d = array([14,25,35,43,57,61,63,57,45,36,25,20,17])
assert_array_equal(c,d)
class TestConvolve(_TestConvolve):
old_behavior = False
def test_valid_mode(self):
# 'valid' mode if b.size > a.size does not make sense with the new
# behavior
a = [1,2,3,6,5,3]
b = [2,3,4,5,3,4,2,2,1]
def _test():
convolve(a,b,'valid',old_behavior=self.old_behavior)
self.failUnlessRaises(ValueError, _test)
def test_same_mode(self):
a = [1,2,3,3,1,2]
b = [1,4,3,4,5,6,7,4,3,2,1,1,3]
c = convolve(a,b,'same',old_behavior=self.old_behavior)
d = array([57,61,63,57,45,36])
assert_array_equal(c,d)
class _TestConvolve2d(TestCase):
def test_2d_arrays(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
d = array( [[2 ,7 ,16,17,12],\
[10,30,62,58,38],\
[12,31,58,49,30]])
e = convolve2d(a,b,old_behavior=self.old_behavior)
assert_array_equal(e,d)
def test_valid_mode(self):
e = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
f = [[1,2,3],[3,4,5]]
g = convolve2d(e,f,'valid',old_behavior=self.old_behavior)
h = array([[62,80,98,116,134]])
assert_array_equal(g,h)
def test_fillvalue(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
fillval = 1
c = convolve2d(a,b,'full','fill',fillval,old_behavior=self.old_behavior)
d = array([[24,26,31,34,32],\
[28,40,62,64,52],\
[32,46,67,62,48]])
assert_array_equal(c,d)
def test_wrap_boundary(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve2d(a,b,'full','wrap',old_behavior=self.old_behavior)
d = array([[80,80,74,80,80],\
[68,68,62,68,68],\
[80,80,74,80,80]])
assert_array_equal(c,d)
def test_sym_boundary(self):
a = [[1,2,3],[3,4,5]]
b = [[2,3,4],[4,5,6]]
c = convolve2d(a,b,'full','symm',old_behavior=self.old_behavior)
d = array([[34,30,44, 62, 66],\
[52,48,62, 80, 84],\
[82,78,92,110,114]])
class OldTestConvolve2d(_TestConvolve2d):
old_behavior = True
@dec.deprecated()
def test_2d_arrays(self):
_TestConvolve2d.test_2d_arrays(self)
@dec.deprecated()
def test_same_mode(self):
e = [[1,2,3],[3,4,5]]
f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
g = convolve2d(e,f,'same',old_behavior=self.old_behavior)
h = array([[ 7,16,22,28, 34, 40, 37],\
[30,62,80,98,116,134,114]])
assert_array_equal(g,h)
@dec.deprecated()
def test_valid_mode(self):
_TestConvolve2d.test_valid_mode(self)
@dec.deprecated()
def test_fillvalue(self):
_TestConvolve2d.test_fillvalue(self)
@dec.deprecated()
def test_wrap_boundary(self):
_TestConvolve2d.test_wrap_boundary(self)
@dec.deprecated()
def test_sym_boundary(self):
_TestConvolve2d.test_sym_boundary(self)
@dec.deprecated()
def test_valid_mode2(self):
# Test when in2.size > in1.size: old behavior is to do so that
# convolve2d(in2, in1) == convolve2d(in1, in2)
e = [[1,2,3],[3,4,5]]
f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
g = convolve2d(e,f,'valid',old_behavior=self.old_behavior)
h = array([[62,80,98,116,134]])
assert_array_equal(g,h)
#class TestConvolve2d(_TestConvolve2d):
# old_behavior = False
# def test_same_mode(self):
# e = [[1,2,3],[3,4,5]]
# f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
# g = convolve2d(e,f,'same',old_behavior=self.old_behavior)
# h = array([[80,98,116],\
# [70,82,94]])
# assert_array_equal(g,h)
#
# def test_valid_mode2(self):
# # Test when in2.size > in1.size
# e = [[1,2,3],[3,4,5]]
# f = [[2,3,4,5,6,7,8],[4,5,6,7,8,9,10]]
# def _test():
# convolve2d(e,f,'valid',old_behavior=self.old_behavior)
# self.failUnlessRaises(ValueError, _test)
class TestFFTConvolve(TestCase):
def test_real(self):
x = array([1,2,3])
assert_array_almost_equal(signal.fftconvolve(x,x), [1,4,10,12,9.])
def test_complex(self):
x = array([1+1j,2+2j,3+3j])
assert_array_almost_equal(signal.fftconvolve(x,x),
[0+2.0j, 0+8j, 0+20j, 0+24j, 0+18j])
def test_2d_real_same(self):
a = array([[1,2,3],[4,5,6]])
assert_array_almost_equal(signal.fftconvolve(a,a),\
array([[1,4,10,12,9],\
[8,26,56,54,36],\
[16,40,73,60,36]]))
def test_2d_complex_same(self):
a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]])
c = signal.fftconvolve(a,a)
d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],\
[10j,44j,118j,156j,122j],\
[3+4j,10+20j,21+56j,18+76j,11+60j]])
assert_array_almost_equal(c,d)
def test_real_same_mode(self):
a = array([1,2,3])
b = array([3,3,5,6,8,7,9,0,1])
c = signal.fftconvolve(a,b,'same')
d = array([9.,20.,25.,35.,41.,47.,39.,28.,2.])
assert_array_almost_equal(c,d)
def test_real_valid_mode(self):
a = array([3,2,1])
b = array([3,3,5,6,8,7,9,0,1])
c = signal.fftconvolve(a,b,'valid')
d = array([24.,31.,41.,43.,49.,25.,12.])
assert_array_almost_equal(c,d)
def test_zero_order(self):
a = array([4967])
b = array([3920])
c = signal.fftconvolve(a,b)
d = a*b
assert_equal(c,d)
def test_random_data(self):
np.random.seed(1234)
a = np.random.rand(1233) + 1j*np.random.rand(1233)
b = np.random.rand(1321) + 1j*np.random.rand(1321)
c = signal.fftconvolve(a, b, 'full')
d = np.convolve(a, b, 'full')
assert np.allclose(c, d, rtol=1e-10)
class TestMedFilt(TestCase):
def test_basic(self):
f = [[50, 50, 50, 50, 50, 92, 18, 27, 65, 46],
[50, 50, 50, 50, 50, 0, 72, 77, 68, 66],
[50, 50, 50, 50, 50, 46, 47, 19, 64, 77],
[50, 50, 50, 50, 50, 42, 15, 29, 95, 35],
[50, 50, 50, 50, 50, 46, 34, 9, 21, 66],
[70, 97, 28, 68, 78, 77, 61, 58, 71, 42],
[64, 53, 44, 29, 68, 32, 19, 68, 24, 84],
[ 3, 33, 53, 67, 1, 78, 74, 55, 12, 83],
[ 7, 11, 46, 70, 60, 47, 24, 43, 61, 26],
[32, 61, 88, 7, 39, 4, 92, 64, 45, 61]]
d = signal.medfilt(f, [7, 3])
e = signal.medfilt2d(np.array(f, np.float), [7, 3])
assert_array_equal(d, [[ 0, 50, 50, 50, 42, 15, 15, 18, 27, 0],
[ 0, 50, 50, 50, 50, 42, 19, 21, 29, 0],
[50, 50, 50, 50, 50, 47, 34, 34, 46, 35],
[50, 50, 50, 50, 50, 50, 42, 47, 64, 42],
[50, 50, 50, 50, 50, 50, 46, 55, 64, 35],
[33, 50, 50, 50, 50, 47, 46, 43, 55, 26],
[32, 50, 50, 50, 50, 47, 46, 45, 55, 26],
[ 7, 46, 50, 50, 47, 46, 46, 43, 45, 21],
[ 0, 32, 33, 39, 32, 32, 43, 43, 43, 0],
[ 0, 7, 11, 7, 4, 4, 19, 19, 24, 0]])
assert_array_equal(d, e)
class TestWiener(TestCase):
def test_basic(self):
g = array([[5,6,4,3],[3,5,6,2],[2,3,5,6],[1,6,9,7]],'d')
correct = array([[2.16374269,3.2222222222, 2.8888888889, 1.6666666667],[2.666666667, 4.33333333333, 4.44444444444, 2.8888888888],[2.222222222, 4.4444444444, 5.4444444444, 4.801066874837],[1.33333333333, 3.92735042735, 6.0712560386, 5.0404040404]])
h = signal.wiener(g)
assert_array_almost_equal(h,correct,decimal=6)
class TestCSpline1DEval(TestCase):
def test_basic(self):
y=array([1,2,3,4,3,2,1,2,3.0])
x=arange(len(y))
dx=x[1]-x[0]
cj = signal.cspline1d(y)
x2=arange(len(y)*10.0)/10.0
y2=signal.cspline1d_eval(cj, x2, dx=dx,x0=x[0])
# make sure interpolated values are on knot points
assert_array_almost_equal(y2[::10], y, decimal=5)
class TestOrderFilt(TestCase):
def test_basic(self):
assert_array_equal(signal.order_filter([1,2,3],[1,0,1],1),
[2,3,2])
class TestChebWin:
def test_cheb_odd(self):
cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348,
0.198891, 0.235450, 0.274846, 0.316836,
0.361119, 0.407338, 0.455079, 0.503883,
0.553248, 0.602637, 0.651489, 0.699227,
0.745266, 0.789028, 0.829947, 0.867485,
0.901138, 0.930448, 0.955010, 0.974482,
0.988591, 0.997138, 1.000000, 0.997138,
0.988591, 0.974482, 0.955010, 0.930448,
0.901138, 0.867485, 0.829947, 0.789028,
0.745266, 0.699227, 0.651489, 0.602637,
0.553248, 0.503883, 0.455079, 0.407338,
0.361119, 0.316836, 0.274846, 0.235450,
0.198891, 0.165348, 0.134941, 0.107729,
0.200938])
cheb_odd = signal.chebwin(53, at=-40)
assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4)
def test_cheb_even(self):
cheb_even_true = array([0.203894, 0.107279, 0.133904,
0.163608, 0.196338, 0.231986,
0.270385, 0.311313, 0.354493,
0.399594, 0.446233, 0.493983,
0.542378, 0.590916, 0.639071,
0.686302, 0.732055, 0.775783,
0.816944, 0.855021, 0.889525,
0.920006, 0.946060, 0.967339,
0.983557, 0.994494, 1.000000,
1.000000, 0.994494, 0.983557,
0.967339, 0.946060, 0.920006,
0.889525, 0.855021, 0.816944,
0.775783, 0.732055, 0.686302,
0.639071, 0.590916, 0.542378,
0.493983, 0.446233, 0.399594,
0.354493, 0.311313, 0.270385,
0.231986, 0.196338, 0.163608,
0.133904, 0.107279, 0.203894])
cheb_even = signal.chebwin(54, at=-40)
assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4)
class _TestLinearFilter(TestCase):
dt = None
def test_rank1(self):
x = np.linspace(0, 5, 6).astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, -0.5]).astype(self.dt)
# Test simple IIR
y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)
assert_array_almost_equal(lfilter(b, a, x), y_r)
# Test simple FIR
b = np.array([1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
assert_array_almost_equal(lfilter(b, a, x), y_r)
# Test IIR with initial conditions
b = np.array([1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
zi = np.array([1]).astype(self.dt)
y_r = np.array([1, 1, 3, 5, 7, 9.]).astype(self.dt)
zf_r = np.array([5]).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
b = np.array([1, 1, 1]).astype(self.dt)
a = np.array([1]).astype(self.dt)
zi = np.array([1, 1]).astype(self.dt)
y_r = np.array([1, 2, 3, 6, 9, 12.]).astype(self.dt)
zf_r = np.array([9, 5]).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank2(self):
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0 = np.array([[0, 2, 4], [6, 4, 2], [0, 2, 4], [6 ,4 ,2]],
dtype=self.dt)
y_r2_a1 = np.array([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], dtype=self.dt)
y = lfilter(b, a, x, axis = 0)
assert_array_almost_equal(y_r2_a0, y)
y = lfilter(b, a, x, axis = 1)
assert_array_almost_equal(y_r2_a1, y)
def test_rank2_init_cond_a1(self):
# Test initial condition handling along axis 1
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0_1 = np.array([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]], dtype=self.dt)
zf_r = np.array([-5, -17, -29, -41])[:, np.newaxis].astype(self.dt)
y, zf = lfilter(b, a, x, axis = 1, zi = np.ones((4, 1)))
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
def test_rank2_init_cond_a0(self):
# Test initial condition handling along axis 0
shape = (4, 3)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
x = x.astype(self.dt)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
y_r2_a0_0 = np.array([[1, 3, 5], [5, 3, 1], [1, 3, 5], [5 ,3 ,1]],
dtype=self.dt)
zf_r = np.array([[-23, -23, -23]], dtype=self.dt)
y, zf = lfilter(b, a, x, axis = 0, zi = np.ones((1, 3)))
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank3(self):
shape = (4, 3, 2)
x = np.linspace(0, np.prod(shape) - 1, np.prod(shape)).reshape(shape)
b = np.array([1, -1]).astype(self.dt)
a = np.array([0.5, 0.5]).astype(self.dt)
# Test last axis
y = lfilter(b, a, x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
assert_array_almost_equal(y[i, j], lfilter(b, a, x[i, j]))
def test_empty_zi(self):
"""Regression test for #880: empty array for zi crashes."""
a = np.ones(1).astype(self.dt)
b = np.ones(1).astype(self.dt)
x = np.arange(5).astype(self.dt)
zi = np.ones(0).astype(self.dt)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
self.failUnless(zf.dtype == self.dt)
self.failUnless(zf.size == 0)
class TestLinearFilterFloat32(_TestLinearFilter):
dt = np.float32
class TestLinearFilterFloat64(_TestLinearFilter):
dt = np.float64
class TestLinearFilterFloatExtended(_TestLinearFilter):
dt = np.longdouble
class TestLinearFilterComplex64(_TestLinearFilter):
dt = np.complex64
class TestLinearFilterComplex128(_TestLinearFilter):
dt = np.complex128
class TestLinearFilterComplexxxiExtended28(_TestLinearFilter):
dt = np.longcomplex
class TestLinearFilterDecimal(_TestLinearFilter):
dt = np.dtype(Decimal)
class _TestCorrelateReal(TestCase):
dt = None
def _setup_rank1(self):
# a.size should be greated than b.size for the tests
a = np.linspace(0, 3, 4).astype(self.dt)
b = np.linspace(1, 2, 2).astype(self.dt)
y_r = np.array([0, 2, 5, 8, 3]).astype(self.dt)
return a, b, y_r
def test_rank1_valid(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'valid', old_behavior=False)
assert_array_almost_equal(y, y_r[1:4])
self.failUnless(y.dtype == self.dt)
def test_rank1_same(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'same', old_behavior=False)
assert_array_almost_equal(y, y_r[:-1])
self.failUnless(y.dtype == self.dt)
def test_rank1_full(self):
a, b, y_r = self._setup_rank1()
y = correlate(a, b, 'full', old_behavior=False)
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank1_valid_old(self):
# This test assume a.size > b.size
a, b, y_r = self._setup_rank1()
y = correlate(b, a, 'valid')
assert_array_almost_equal(y, y_r[1:4])
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank1_same_old(self):
# This test assume a.size > b.size
a, b, y_r = self._setup_rank1()
y = correlate(b, a, 'same')
assert_array_almost_equal(y, y_r[:-1])
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank1_full_old(self):
# This test assume a.size > b.size
a, b, y_r = self._setup_rank1()
y = correlate(b, a, 'full')
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
def _setup_rank3(self):
a = np.linspace(0, 39, 40).reshape((2, 4, 5), order='F').astype(self.dt)
b = np.linspace(0, 23, 24).reshape((2, 3, 4), order='F').astype(self.dt)
y_r = array([[[ 0., 184., 504., 912., 1360., 888., 472., 160.,],
[ 46., 432., 1062., 1840., 2672., 1698., 864., 266.,],
[ 134., 736., 1662., 2768., 3920., 2418., 1168., 314.,],
[ 260., 952., 1932., 3056., 4208., 2580., 1240., 332.,] ,
[ 202., 664., 1290., 1984., 2688., 1590., 712., 150.,] ,
[ 114., 344., 642., 960., 1280., 726., 296., 38.,]],
[[ 23., 400., 1035., 1832., 2696., 1737., 904., 293.,],
[ 134., 920., 2166., 3680., 5280., 3306., 1640., 474.,],
[ 325., 1544., 3369., 5512., 7720., 4683., 2192., 535.,],
[ 571., 1964., 3891., 6064., 8272., 4989., 2324., 565.,],
[ 434., 1360., 2586., 3920., 5264., 3054., 1312., 230.,],
[ 241., 700., 1281., 1888., 2496., 1383., 532., 39.,]],
[[ 22., 214., 528., 916., 1332., 846., 430., 132.,],
[ 86., 484., 1098., 1832., 2600., 1602., 772., 206.,],
[ 188., 802., 1698., 2732., 3788., 2256., 1018., 218.,],
[ 308., 1006., 1950., 2996., 4052., 2400., 1078., 230.,],
[ 230., 692., 1290., 1928., 2568., 1458., 596., 78.,],
[ 126., 354., 636., 924., 1212., 654., 234., 0.,]]],
dtype=self.dt)
return a, b, y_r
def test_rank3_valid(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, "valid", old_behavior=False)
assert_array_almost_equal(y, y_r[1:2,2:4,3:5])
self.failUnless(y.dtype == self.dt)
def test_rank3_same(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, "same", old_behavior=False)
assert_array_almost_equal(y, y_r[0:-1,1:-1,1:-2])
self.failUnless(y.dtype == self.dt)
def test_rank3_all(self):
a, b, y_r = self._setup_rank3()
y = correlate(a, b, old_behavior=False)
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank3_valid_old(self):
a, b, y_r = self._setup_rank3()
y = correlate(b, a, "valid")
assert_array_almost_equal(y, y_r[1:2,2:4,3:5])
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank3_same_old(self):
a, b, y_r = self._setup_rank3()
y = correlate(b, a, "same")
assert_array_almost_equal(y, y_r[0:-1,1:-1,1:-2])
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank3_all_old(self):
a, b, y_r = self._setup_rank3()
y = correlate(b, a)
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
for i in [np.ubyte, np.byte, np.ushort, np.short, np.uint, np.int,
np.ulonglong, np.ulonglong, np.float32, np.float64, np.longdouble,
Decimal]:
name = "TestCorrelate%s" % i.__name__.title()
globals()[name] = types.ClassType(name, (_TestCorrelateReal,), {"dt": i})
class _TestCorrelateComplex(TestCase):
dt = None
def _setup_rank1(self, mode):
a = np.random.randn(10).astype(self.dt)
a += 1j * np.random.randn(10).astype(self.dt)
b = np.random.randn(8).astype(self.dt)
b += 1j * np.random.randn(8).astype(self.dt)
y_r = (correlate(a.real, b.real, mode=mode, old_behavior=False) +
correlate(a.imag, b.imag, mode=mode, old_behavior=False)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag, mode=mode, old_behavior=False) +
correlate(a.imag, b.real, mode=mode, old_behavior=False))
return a, b, y_r
def test_rank1_valid(self):
a, b, y_r = self._setup_rank1('valid')
y = correlate(a, b, 'valid', old_behavior=False)
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
def test_rank1_same(self):
a, b, y_r = self._setup_rank1('same')
y = correlate(a, b, 'same', old_behavior=False)
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
def test_rank1_full(self):
a, b, y_r = self._setup_rank1('full')
y = correlate(a, b, 'full', old_behavior=False)
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
def test_rank3(self):
a = np.random.randn(10, 8, 6).astype(self.dt)
a += 1j * np.random.randn(10, 8, 6).astype(self.dt)
b = np.random.randn(8, 6, 4).astype(self.dt)
b += 1j * np.random.randn(8, 6, 4).astype(self.dt)
y_r = (correlate(a.real, b.real, old_behavior=False)
+ correlate(a.imag, b.imag, old_behavior=False)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag, old_behavior=False) +
correlate(a.imag, b.real, old_behavior=False))
y = correlate(a, b, 'full', old_behavior=False)
assert_array_almost_equal(y, y_r, decimal=4)
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank1_valid_old(self):
a, b, y_r = self._setup_rank1('valid')
y = correlate(b, a.conj(), 'valid')
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank1_same_old(self):
a, b, y_r = self._setup_rank1('same')
y = correlate(b, a.conj(), 'same')
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank1_full_old(self):
a, b, y_r = self._setup_rank1('full')
y = correlate(b, a.conj(), 'full')
assert_array_almost_equal(y, y_r)
self.failUnless(y.dtype == self.dt)
@dec.deprecated()
def test_rank3_old(self):
a = np.random.randn(10, 8, 6).astype(self.dt)
a += 1j * np.random.randn(10, 8, 6).astype(self.dt)
b = np.random.randn(8, 6, 4).astype(self.dt)
b += 1j * np.random.randn(8, 6, 4).astype(self.dt)
y_r = (correlate(a.real, b.real, old_behavior=False)
+ correlate(a.imag, b.imag, old_behavior=False)).astype(self.dt)
y_r += 1j * (-correlate(a.real, b.imag, old_behavior=False) +
correlate(a.imag, b.real, old_behavior=False))
y = correlate(b, a.conj(), 'full')
assert_array_almost_equal(y, y_r, decimal=4)
self.failUnless(y.dtype == self.dt)
for i in [np.csingle, np.cdouble, np.clongdouble]:
name = "TestCorrelate%s" % i.__name__.title()
globals()[name] = types.ClassType(name, (_TestCorrelateComplex,), {"dt": i})
class TestFiltFilt:
def test_basic(self):
out = signal.filtfilt([1,2,3], [1,2,3], np.arange(12))
assert_equal(out, arange(12))
class TestDecimate:
def test_basic(self):
x = np.arange(6)
assert_array_equal(signal.decimate(x, 2, n=1).round(), x[::2])
if __name__ == "__main__":
run_module_suite()
| |
#!/usr/bin/env python
"""Library for processing of artifacts.
This file contains non-GRR specific pieces of artifact processing and is
intended to end up as an independent library.
"""
import re
from typing import Iterable
from typing import Text
from grr_response_core.lib import interpolation
from grr_response_core.lib import objectfilter
from grr_response_core.lib.rdfvalues import structs as rdf_structs
class Error(Exception):
"""Base exception."""
class ConditionError(Error):
"""An invalid artifact condition was specified."""
class ArtifactProcessingError(Error):
"""Unable to process artifact."""
class KbInterpolationMissingAttributesError(Error):
"""An exception class for missing knowledgebase attributes."""
def __init__(self, attrs: Iterable[Text]) -> None:
message = "Some attributes could not be located in the knowledgebase: {}"
message = message.format(", ".join(attrs))
super().__init__(message)
self.attrs = list(attrs)
class KbInterpolationUnknownAttributesError(Error):
"""An exception class for non-existing knowledgebase attributes."""
def __init__(self, attrs: Iterable[Text]) -> None:
message = "Some attributes are not part of the knowledgebase: {}"
message = message.format(", ".join(attrs))
super().__init__(message)
self.attrs = list(attrs)
class KnowledgeBaseUninitializedError(Error):
"""Attempt to process artifact without a valid Knowledge Base."""
class KnowledgeBaseAttributesMissingError(Error):
"""Knowledge Base is missing key attributes."""
INTERPOLATED_REGEX = re.compile(r"%%([^%]+?)%%")
def InterpolateListKbAttributes(input_list, knowledge_base):
interpolated_list = []
for element in input_list:
interpolated_list.extend(InterpolateKbAttributes(element, knowledge_base))
return interpolated_list
def InterpolateKbAttributes(pattern, knowledge_base):
"""Interpolate all knowledgebase attributes in pattern.
Args:
pattern: A string with potential interpolation markers. For example:
"/home/%%users.username%%/Downloads/"
knowledge_base: The knowledge_base to interpolate parameters from.
Raises:
KbInterpolationMissingAttributesError: If any of the required pattern
parameters is not present in the knowledgebase.
KbInterpolationUnknownAttributesError: If any of the specified pattern
parameters is not a valid knowledgebase attribute.
Returns:
An iterator over all unique strings generated by expanding the pattern.
"""
# TODO(hanuszczak): Importing `rdf_client` module (where knowledgebase RDF
# class is defined) causes a cyclic dependency. As a workaround, we get the
# class object here but it is obviously a terrible solution and modules should
# be refactored instead.
kb_cls = knowledge_base.__class__
# TODO(hanuszczak): Control flow feels a bit awkward here because of error
# handling that tries not to break any functionality. With the new utilities
# it should be possible to improve the code, changing the behaviour to a more
# sane one.
interpolator = interpolation.Interpolator(pattern)
missing_attr_names = set()
unknown_attr_names = set()
for var_id in interpolator.Vars():
var_name = str(var_id).lower()
if var_name not in kb_cls.type_infos:
unknown_attr_names.add(var_name)
for scope_id in interpolator.Scopes():
scope_name = str(scope_id).lower()
if not (scope_name in kb_cls.type_infos and
isinstance(kb_cls.type_infos[scope_name], rdf_structs.ProtoList)):
unknown_attr_names.add(scope_name)
continue
scope_type = kb_cls.type_infos[scope_name].delegate.type
for var_id in interpolator.ScopeVars(scope_id):
var_name = str(var_id).lower()
if var_name not in scope_type.type_infos:
unknown_attr_names.add("{}.{}".format(scope_name, var_name))
continue
if unknown_attr_names:
raise KbInterpolationUnknownAttributesError(unknown_attr_names)
for vid in interpolator.Vars():
attr_name = str(vid).lower()
value = getattr(knowledge_base, attr_name)
if not value:
missing_attr_names.add(attr_name)
continue
interpolator.BindVar(attr_name, value) # pytype: disable=wrong-arg-types
for scope_id in interpolator.Scopes():
scope_name = str(scope_id).lower()
kb_structs = getattr(knowledge_base, scope_name)
if not kb_structs:
missing_attr_names.add(scope_name)
continue
scope_bound = False
scope_missing_attr_names = set()
for kb_struct in kb_structs:
bindings = {}
var_ids = interpolator.ScopeVars(scope_id)
for var_id in var_ids:
attr_name = str(var_id).lower()
value = getattr(kb_struct, attr_name)
if not value:
scope_missing_attr_names.add("{}.{}".format(scope_name, attr_name))
continue
bindings[var_id] = value
if len(bindings) == len(var_ids):
interpolator.BindScope(scope_id, bindings)
scope_bound = True
if not scope_bound:
missing_attr_names.update(scope_missing_attr_names)
if missing_attr_names:
raise KbInterpolationMissingAttributesError(missing_attr_names)
return interpolator.Interpolate()
def GetWindowsEnvironmentVariablesMap(knowledge_base):
"""Return a dictionary of environment variables and their values.
Implementation maps variables mentioned in
https://en.wikipedia.org/wiki/Environment_variable#Windows to known
KB definitions.
Args:
knowledge_base: A knowledgebase object.
Returns:
A dictionary built from a given knowledgebase object where keys are
variables names and values are their values.
"""
environ_vars = {}
if knowledge_base.environ_path:
environ_vars["path"] = knowledge_base.environ_path
if knowledge_base.environ_temp:
environ_vars["temp"] = knowledge_base.environ_temp
if knowledge_base.environ_systemroot:
environ_vars["systemroot"] = knowledge_base.environ_systemroot
if knowledge_base.environ_windir:
environ_vars["windir"] = knowledge_base.environ_windir
if knowledge_base.environ_programfiles:
environ_vars["programfiles"] = knowledge_base.environ_programfiles
environ_vars["programw6432"] = knowledge_base.environ_programfiles
if knowledge_base.environ_programfilesx86:
environ_vars["programfiles(x86)"] = knowledge_base.environ_programfilesx86
if knowledge_base.environ_systemdrive:
environ_vars["systemdrive"] = knowledge_base.environ_systemdrive
if knowledge_base.environ_allusersprofile:
environ_vars["allusersprofile"] = knowledge_base.environ_allusersprofile
environ_vars["programdata"] = knowledge_base.environ_allusersprofile
if knowledge_base.environ_allusersappdata:
environ_vars["allusersappdata"] = knowledge_base.environ_allusersappdata
for user in knowledge_base.users:
if user.appdata:
environ_vars.setdefault("appdata", []).append(user.appdata)
if user.localappdata:
environ_vars.setdefault("localappdata", []).append(user.localappdata)
if user.userdomain:
environ_vars.setdefault("userdomain", []).append(user.userdomain)
if user.userprofile:
environ_vars.setdefault("userprofile", []).append(user.userprofile)
return environ_vars
def ExpandWindowsEnvironmentVariables(data_string, knowledge_base):
r"""Take a string and expand any windows environment variables.
Args:
data_string: A string, e.g. "%SystemRoot%\\LogFiles"
knowledge_base: A knowledgebase object.
Returns:
A string with available environment variables expanded. If we can't expand
we just return the string with the original variables.
"""
win_environ_regex = re.compile(r"%([^%]+?)%")
components = []
offset = 0
for match in win_environ_regex.finditer(data_string):
components.append(data_string[offset:match.start()])
# KB environment variables are prefixed with environ_.
kb_value = getattr(knowledge_base, "environ_%s" % match.group(1).lower(),
None)
if isinstance(kb_value, str) and kb_value:
components.append(kb_value)
else:
# Failed to expand, leave the variable as it was.
components.append("%%%s%%" % match.group(1))
offset = match.end()
components.append(data_string[offset:]) # Append the final chunk.
return "".join(components)
def CheckCondition(condition, check_object):
"""Check if a condition matches an object.
Args:
condition: A string condition e.g. "os == 'Windows'"
check_object: Object to validate, e.g. an rdf_client.KnowledgeBase()
Returns:
True or False depending on whether the condition matches.
Raises:
ConditionError: If condition is bad.
"""
try:
of = objectfilter.Parser(condition).Parse()
compiled_filter = of.Compile(objectfilter.BaseFilterImplementation)
return compiled_filter.Matches(check_object)
except objectfilter.Error as e:
raise ConditionError(e)
def ExpandWindowsUserEnvironmentVariables(data_string,
knowledge_base,
sid=None,
username=None):
r"""Take a string and expand windows user environment variables based.
Args:
data_string: A string, e.g. "%TEMP%\\LogFiles"
knowledge_base: A knowledgebase object.
sid: A Windows SID for a user to expand for.
username: A Windows user name to expand for.
Returns:
A string with available environment variables expanded.
"""
win_environ_regex = re.compile(r"%([^%]+?)%")
components = []
offset = 0
for match in win_environ_regex.finditer(data_string):
components.append(data_string[offset:match.start()])
kb_user = knowledge_base.GetUser(sid=sid, username=username)
kb_value = None
if kb_user:
kb_value = getattr(kb_user, match.group(1).lower(), None)
if isinstance(kb_value, str) and kb_value:
components.append(kb_value)
else:
components.append("%%%s%%" % match.group(1))
offset = match.end()
components.append(data_string[offset:]) # Append the final chunk.
return "".join(components)
| |
"Supporting objects and functions to convert Matplotlib objects into Bokeh."
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import itertools
import warnings
import matplotlib as mpl
import numpy as np
import pandas as pd
from six import string_types
from ..models import (ColumnDataSource, FactorRange, DataRange1d, DatetimeAxis, GlyphRenderer,
Grid, GridPlot, LinearAxis, Plot, CategoricalAxis, Legend)
from ..models.glyphs import (Asterisk, Circle, Cross, Diamond, InvertedTriangle,
Line, MultiLine, Patches, Square, Text, Triangle, X)
from ..plotting import DEFAULT_TOOLS
from ..plotting_helpers import _process_tools_arg
from .mplexporter.renderers import Renderer
from .mpl_helpers import convert_dashes, get_props_cycled, is_ax_end, xkcd_line
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class BokehRenderer(Renderer):
def __init__(self, pd_obj, xkcd):
"Initial setup."
self.fig = None
self.pd_obj = pd_obj
self.xkcd = xkcd
self.zorder = {}
self.handles = {}
def open_figure(self, fig, props):
"Get the main plot properties and create the plot."
self.width = int(props['figwidth'] * props['dpi'])
self.height = int(props['figheight'] * props['dpi'])
self.plot = Plot(x_range=DataRange1d(),
y_range=DataRange1d(),
plot_width=self.width,
plot_height=self.height)
def close_figure(self, fig):
"Complete the plot: add tools."
# Add tools
tool_objs = _process_tools_arg(self.plot, DEFAULT_TOOLS)
self.plot.add_tools(*tool_objs)
# Simple or Grid plot setup
if len(fig.axes) <= 1:
self.fig = self.plot
self.plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
else:
# This list comprehension splits the plot.renderers list at the "marker"
# points returning small sublists corresponding with each subplot.
subrends = [list(x[1]) for x in itertools.groupby(
self.plot.renderers, lambda x: is_ax_end(x)) if not x[0]]
plots = []
for i, axes in enumerate(fig.axes):
# create a new plot for each subplot
_plot = Plot(x_range=self.plot.x_range,
y_range=self.plot.y_range,
plot_width=self.width,
plot_height=self.height)
_plot.title = ""
# and add new tools
_tool_objs = _process_tools_arg(_plot, DEFAULT_TOOLS)
_plot.add_tools(*_tool_objs)
# clean the plot ref from axis and grids
_plot_rends = subrends[i]
for r in _plot_rends:
if not isinstance(r, GlyphRenderer):
r.plot = None
# add all the renderers into the new subplot
for r in _plot_rends:
if isinstance(r, GlyphRenderer):
_plot.renderers.append(r)
elif isinstance(r, Grid):
_plot.add_layout(r)
else:
if r in self.plot.below:
_plot.add_layout(r, 'below')
elif r in self.plot.above:
_plot.add_layout(r, 'above')
elif r in self.plot.left:
_plot.add_layout(r, 'left')
elif r in self.plot.right:
_plot.add_layout(r, 'right')
_plot.renderers.sort(key=lambda x: self.zorder.get(x._id, 0))
plots.append(_plot)
(a, b, c) = fig.axes[0].get_geometry()
p = np.array(plots)
n = np.resize(p, (a, b))
grid = GridPlot(children=n.tolist())
self.fig = grid
def open_axes(self, ax, props):
"Get axes data and create the axes and grids"
# Get axes, title and grid into class attributes.
self.ax = ax
self.plot.title = ax.get_title()
# to avoid title conversion by draw_text later
# Add axis
for props in props['axes']:
if props['position'] == "bottom" : location, dim, thing = "below", 0, ax.xaxis
elif props['position'] == "top" : location, dim, thing = "above", 0, ax.xaxis
else: location, dim, thing = props['position'], 1, ax.yaxis
baxis = self.make_axis(thing, location, props)
if dim==0:
gridlines = ax.get_xgridlines()
else:
gridlines = ax.get_ygridlines()
if gridlines:
self.make_grid(baxis, dim, gridlines[0])
def close_axes(self, ax):
"Complete the axes adding axes-dependent plot props"
background_fill = ax.get_axis_bgcolor()
if background_fill == 'w':
background_fill = 'white'
self.plot.background_fill = background_fill
if self.xkcd:
self.plot.title_text_font = "Comic Sans MS, Textile, cursive"
self.plot.title_text_font_style = "bold"
self.plot.title_text_color = "black"
# Add a "marker" Glyph to help the plot.renderers splitting in the GridPlot build
dummy_source = ColumnDataSource(data=dict(name="ax_end"))
self.plot.renderers.append(GlyphRenderer(data_source=dummy_source, glyph=X()))
def open_legend(self, legend, props):
lgnd = Legend(orientation="top_right")
try:
for label, obj in zip(props['labels'], props['handles']):
lgnd.legends.append((label, [self.handles[id(obj)]]))
self.plot.add_layout(lgnd)
except KeyError:
pass
def close_legend(self, legend):
pass
def draw_line(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Line glyph."
_x = data[:, 0]
if self.pd_obj is True:
try:
x = [pd.Period(ordinal=int(i), freq=self.ax.xaxis.freq).to_timestamp() for i in _x]
except AttributeError: # we probably can make this one more intelligent later
x = _x
else:
x = _x
y = data[:, 1]
if self.xkcd:
x, y = xkcd_line(x, y)
line = Line()
source = ColumnDataSource()
line.x = source.add(x)
line.y = source.add(y)
line.line_color = style['color']
line.line_width = style['linewidth']
line.line_alpha = style['alpha']
line.line_dash = [] if style['dasharray'] is "none" else [int(i) for i in style['dasharray'].split(",")] # str2list(int)
# line.line_join = line2d.get_solid_joinstyle() # not in mplexporter
# line.line_cap = cap_style_map[line2d.get_solid_capstyle()] # not in mplexporter
if self.xkcd:
line.line_width = 3
r = self.plot.add_glyph(source, line)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"Given a mpl line2d instance create a Bokeh Marker glyph."
x = data[:, 0]
y = data[:, 1]
marker_map = {
"o": Circle,
"s": Square,
"+": Cross,
"^": Triangle,
"v": InvertedTriangle,
"x": X,
"d": Diamond,
"D": Diamond,
"*": Asterisk,
}
# Not all matplotlib markers are currently handled; fall back to Circle if we encounter an
# unhandled marker. See http://matplotlib.org/api/markers_api.html for a list of markers.
try:
marker = marker_map[style['marker']]()
except KeyError:
warnings.warn("Unable to handle marker: %s; defaulting to Circle" % style['marker'])
marker = Circle()
source = ColumnDataSource()
marker.x = source.add(x)
marker.y = source.add(y)
marker.line_color = style['edgecolor']
marker.fill_color = style['facecolor']
marker.line_width = style['edgewidth']
marker.size = style['markersize']
marker.fill_alpha = marker.line_alpha = style['alpha']
r = self.plot.add_glyph(source, marker)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
pass
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"Given a mpl text instance create a Bokeh Text glyph."
# mpl give you the title and axes names as a text object (with specific locations)
# inside the plot itself. That does not make sense inside Bokeh, so we
# just skip the title and axes names from the conversion and covert any other text.
if text_type in ['xlabel', 'ylabel', 'title']:
return
if coordinates != 'data':
return
x, y = position
text = Text(x=x, y=y, text=[text])
alignment_map = {"center": "middle", "top": "top", "bottom": "bottom", "baseline": "bottom"}
# baseline not implemented in Bokeh, deafulting to bottom.
text.text_alpha = style['alpha']
text.text_font_size = "%dpx" % style['fontsize']
text.text_color = style['color']
text.text_align = style['halign']
text.text_baseline = alignment_map[style['valign']]
text.angle = style['rotation']
## Using get_fontname() works, but it's oftentimes not available in the browser,
## so it's better to just use the font family here.
#text.text_font = mplText.get_fontname()) not in mplexporter
#text.text_font = mplText.get_fontfamily()[0] # not in mplexporter
#text.text_font_style = fontstyle_map[mplText.get_fontstyle()] # not in mplexporter
## we don't really have the full range of font weights, but at least handle bold
#if mplText.get_weight() in ("bold", "heavy"):
#text.text_font_style = bold
source = ColumnDataSource()
r = self.plot.add_glyph(source, text)
self.zorder[r._id] = style['zorder']
self.handles[id(mplobj)] = r
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
pass
def make_axis(self, ax, location, props):
"Given a mpl axes instance, returns a Bokeh LinearAxis object."
# TODO:
# * handle log scaling
# * map `labelpad` to `major_label_standoff`
# * deal with minor ticks once BokehJS supports them
# * handle custom tick locations once that is added to bokehJS
tf = props['tickformat']
if tf and any(isinstance(x, string_types) for x in tf):
laxis = CategoricalAxis(axis_label=ax.get_label_text())
rng = FactorRange(factors=[str(x) for x in tf], offset=-1.0)
if location in ["above", "below"]:
self.plot.x_range = rng
else:
self.plot.y_range = rng
else:
if props['scale'] == "linear":
laxis = LinearAxis(axis_label=ax.get_label_text())
elif props['scale'] == "date":
laxis = DatetimeAxis(axis_label=ax.get_label_text())
self.plot.add_layout(laxis, location)
# First get the label properties by getting an mpl.Text object
label = ax.get_label()
self.text_props(label, laxis, prefix="axis_label_")
# Set the tick properties (for now just turn off if necessary)
# TODO: mirror tick properties
if props['nticks'] == 0:
laxis.major_tick_line_color = None
laxis.minor_tick_line_color = None
laxis.major_label_text_color = None
# To get the tick label format, we look at the first of the tick labels
# and assume the rest are formatted similarly.
ticklabels = ax.get_ticklabels()
if ticklabels:
self.text_props(ticklabels[0], laxis, prefix="major_label_")
#newaxis.bounds = axis.get_data_interval() # I think this is the right func...
if self.xkcd:
laxis.axis_line_width = 3
laxis.axis_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.axis_label_text_font_style = "bold"
laxis.axis_label_text_color = "black"
laxis.major_label_text_font = "Comic Sans MS, Textile, cursive"
laxis.major_label_text_font_style = "bold"
laxis.major_label_text_color = "black"
return laxis
def make_grid(self, baxis, dimension, gridline):
"Given a mpl axes instance, returns a Bokeh Grid object."
lgrid = Grid(dimension=dimension,
ticker=baxis.ticker,
grid_line_color=gridline.get_color(),
grid_line_width=gridline.get_linewidth())
self.plot.add_layout(lgrid)
def make_line_collection(self, col):
"Given a mpl collection instance create a Bokeh MultiLine glyph."
xydata = col.get_segments()
t_xydata = [np.transpose(seg) for seg in xydata]
xs = [t_xydata[x][0] for x in range(len(t_xydata))]
ys = [t_xydata[x][1] for x in range(len(t_xydata))]
if self.xkcd:
xkcd_xs = [xkcd_line(xs[i], ys[i])[0] for i in range(len(xs))]
xkcd_ys = [xkcd_line(xs[i], ys[i])[1] for i in range(len(ys))]
xs = xkcd_xs
ys = xkcd_ys
multiline = MultiLine()
source = ColumnDataSource()
multiline.xs = source.add(xs)
multiline.ys = source.add(ys)
self.multiline_props(source, multiline, col)
r = self.plot.add_glyph(source, multiline)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def make_poly_collection(self, col):
"Given a mpl collection instance create a Bokeh Patches glyph."
xs = []
ys = []
for path in col.get_paths():
for sub_poly in path.to_polygons():
xx, yy = sub_poly.transpose()
xs.append(xx)
ys.append(yy)
patches = Patches()
source = ColumnDataSource()
patches.xs = source.add(xs)
patches.ys = source.add(ys)
self.patches_props(source, patches, col)
r = self.plot.add_glyph(source, patches)
self.zorder[r._id] = col.zorder
self.handles[id(col)] = r
def multiline_props(self, source, multiline, col):
"Takes a mpl collection object to extract and set up some Bokeh multiline properties."
colors = get_props_cycled(col, col.get_colors(), fx=lambda x: mpl.colors.rgb2hex(x))
widths = get_props_cycled(col, col.get_linewidth())
multiline.line_color = source.add(colors)
multiline.line_width = source.add(widths)
multiline.line_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
multiline.line_dash_offset = convert_dashes(offset)
multiline.line_dash = list(convert_dashes(tuple(on_off)))
def patches_props(self, source, patches, col):
"Takes a mpl collection object to extract and set up some Bokeh patches properties."
face_colors = get_props_cycled(col, col.get_facecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.fill_color = source.add(face_colors)
edge_colors = get_props_cycled(col, col.get_edgecolors(), fx=lambda x: mpl.colors.rgb2hex(x))
patches.line_color = source.add(edge_colors)
widths = get_props_cycled(col, col.get_linewidth())
patches.line_width = source.add(widths)
patches.line_alpha = col.get_alpha()
patches.fill_alpha = col.get_alpha()
offset = col.get_linestyle()[0][0]
if not col.get_linestyle()[0][1]:
on_off = []
else:
on_off = map(int,col.get_linestyle()[0][1])
patches.line_dash_offset = convert_dashes(offset)
patches.line_dash = list(convert_dashes(tuple(on_off)))
def text_props(self, text, obj, prefix=""):
fp = text.get_font_properties()
setattr(obj, prefix+"text_font", fp.get_family()[0])
setattr(obj, prefix+"text_font_size", "%fpt" % fp.get_size_in_points())
setattr(obj, prefix+"text_font_style", fp.get_style())
| |
from __future__ import division, absolute_import, print_function
__all__ = ['column_stack', 'row_stack', 'dstack', 'array_split', 'split', 'hsplit',
'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, zeros, newaxis, outer, \
concatenate, isscalar, array, asanyarray
from numpy.core.fromnumeric import product, reshape
from numpy.core import hstack, vstack, atleast_3d
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
Execute `func1d(a, *args)` where `func1d` operates on 1-D arrays and `a`
is a 1-D slice of `arr` along `axis`.
Parameters
----------
func1d : function
This function should accept 1-D arrays. It is applied to 1-D
slices of `arr` along the specified axis.
axis : integer
Axis along which `arr` is sliced.
arr : ndarray
Input array.
args : any
Additional arguments to `func1d`.
kwargs: any
Additional named arguments to `func1d`.
.. versionadded:: 1.9.0
Returns
-------
apply_along_axis : ndarray
The output array. The shape of `outarr` is identical to the shape of
`arr`, except along the `axis` dimension, where the length of `outarr`
is equal to the size of the return value of `func1d`. If `func1d`
returns a scalar `outarr` will have one fewer dimensions than `arr`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([ 4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([ 2., 5., 8.])
For a function that doesn't return a scalar, the number of dimensions in
`outarr` is the same as `arr`.
>>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
>>> np.apply_along_axis(sorted, 1, b)
array([[1, 7, 8],
[3, 4, 9],
[2, 5, 6]])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0]*(nd-1)
i = zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape, asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res`
has one less dimension than `a`, a dimension is inserted before
`axis`. The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : array_like
Input array.
axes : array_like
Axes over which `func` is applied; the elements must be integers.
Returns
-------
apply_over_axis : ndarray
The output array. The number of dimensions is the same as `a`,
but the shape can be different. This depends on whether `func`
changes the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Notes
------
This function is equivalent to tuple axis arguments to reorderable ufuncs
with keepdims=True. Tuple axis arguments to ufuncs have been availabe since
version 1.7.0.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
Tuple axis arguments to ufuncs are equivalent:
>>> np.sum(a, axis=(0,2), keepdims=True)
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
-------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
if arr.ndim < 2:
arr = array(arr, copy=False, subok=True, ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays, 1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by `dsplit`.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
def array_split(ary,indices_or_sections,axis = 0):
"""
Split an array into multiple sub-arrays.
Please refer to the ``split`` documentation. The only difference
between these functions is that ``array_split`` allows
`indices_or_sections` to be an integer that does *not* equally
divide the axis.
See Also
--------
split : Split array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try: # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError: #indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError('number sections must be larger than 0.')
Neach_section, extras = divmod(Ntotal, Nsections)
section_sizes = [0] + \
extras * [Neach_section+1] + \
(Nsections-extras) * [Neach_section]
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary, axis, 0)
for i in range(Nsections):
st = div_points[i]; end = div_points[i+1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
# This "kludge" was introduced here to replace arrays shaped (0, 10)
# or similar with an array shaped (0,).
# There seems no need for this, so give a FutureWarning to remove later.
if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:
warnings.warn("in the future np.array_split will retain the shape of "
"arrays with a zero size, instead of replacing them by "
"`array([])`, which always has a shape of (0,).",
FutureWarning)
sub_arys = _replace_zero_by_x_arrays(sub_arys)
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try: len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError('array split does not result in an equal division')
res = array_split(ary, indices_or_sections, axis)
return res
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
Please refer to the `split` documentation. `hsplit` is equivalent
to `split` with ``axis=1``, the array is always split along the second
axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError('hsplit only works on arrays of 1 or more dimensions')
if len(ary.shape) > 1:
return split(ary, indices_or_sections, 1)
else:
return split(ary, indices_or_sections, 0)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
Please refer to the ``split`` documentation. ``vsplit`` is equivalent
to ``split`` with `axis=0` (default), the array is always split along the
first axis regardless of the array dimension.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]]),
array([[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
[array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]]),
array([[ 12., 13., 14., 15.]]),
array([], dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[ 0., 1.],
[ 2., 3.]]]),
array([[[ 4., 5.],
[ 6., 7.]]])]
"""
if len(_nx.shape(ary)) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
[[ 10., 11.],
[ 14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
return split(ary, indices_or_sections, 2)
def get_array_prepare(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_prepare__) for i, x in enumerate(args)
if hasattr(x, '__array_prepare__'))
if wrappers:
return wrappers[-1][-1]
return None
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__'))
if wrappers:
return wrappers[-1][-1]
return None
def kron(a, b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> c[K] == a[I]*b[J]
True
"""
b = asanyarray(b)
a = array(a, copy=False, subok=True, ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a, b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a, b).reshape(as_+bs)
axis = nd-1
for _ in range(nd):
result = concatenate(result, axis=axis)
wrapper = get_array_prepare(a, b)
if wrapper is not None:
result = wrapper(result)
wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
See Also
--------
repeat : Repeat elements of an array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A, copy=False, subok=True, ndmin=d)
shape = list(c.shape)
n = max(c.size, 1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep!=1:
c = c.reshape(-1, n).repeat(nrep, 0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n //= max(dim_in, 1)
return c.reshape(shape)
| |
#!/usr/bin/env python
"""
STK500v2 protocol implementation for programming AVR chips.
The STK500v2 protocol is used by the ArduinoMega2560 and a few other Arduino platforms to load firmware.
"""
__copyright__ = "Copyright (C) 2013 David Braam - Released under terms of the AGPLv3 License"
import os, struct, sys, time
from serial import Serial
from serial import SerialException
from serial import SerialTimeoutException
import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.lastAddr = -1
self.progressCallback = None
def connect(self, port = '/dev/ttyMFD1', speed = 115200):
if self.serial is not None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, write_timeout=10000)
except SerialException as e:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
import mraa
ResetPin = mraa.Gpio(36)
ResetPin.dir(mraa.DIR_OUT)
ResetPin.write(0)
time.sleep(0.1)
ResetPin.write(1)
time.sleep(0.1)
self.serial.flushInput()
self.serial.flushOutput()
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
self.close()
raise ispBase.IspError("Failed to enter programming mode")
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
if self.sendMessage([0xEE])[1] == 0x00:
self._has_checksum = True
else:
self._has_checksum = False
self.serial.timeout = 5
def close(self):
if self.serial is not None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial is not None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial is not None
def hasChecksumFunction(self):
return self._has_checksum
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flashData):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
pageSize = self.chip['pageSize'] * 2
flashSize = pageSize * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + pageSize - 1) / pageSize
# step = loadCount/20
for i in xrange(0, loadCount):
recv = self.sendMessage([0x13, pageSize >> 8, pageSize & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flashData[(i * pageSize):(i * pageSize + pageSize)])
if self.progressCallback is not None:
if self._has_checksum:
self.progressCallback(i + 1, loadCount)
else:
self.progressCallback(i + 1, loadCount*2)
# if i % step ==0:
# print '#'
def verifyFlash(self, flashData):
if self._has_checksum:
self.sendMessage([0x06, 0x00, (len(flashData) >> 17) & 0xFF, (len(flashData) >> 9) & 0xFF, (len(flashData) >> 1) & 0xFF])
res = self.sendMessage([0xEE])
checksum_recv = res[2] | (res[3] << 8)
checksum = 0
for d in flashData:
checksum += d
checksum &= 0xFFFF
if hex(checksum) != hex(checksum_recv):
raise ispBase.IspError('Verify checksum mismatch: 0x%x != 0x%x' % (checksum & 0xFFFF, checksum_recv))
else:
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flashSize = self.chip['pageSize'] * 2 * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + 0xFF) / 0x100
step = loadCount/20
for i in xrange(0, loadCount):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progressCallback is not None:
self.progressCallback(loadCount + i + 1, loadCount*2)
for j in xrange(0, 0x100):
if i * 0x100 + j < len(flashData) and flashData[i * 0x100 + j] != recv[j]:
raise ispBase.IspError('Verify error at: 0x%x' % (i * 0x100 + j))
if i % step ==0:
print '#'
def sendMessage(self, data):
# print "*",
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= ord(c)
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError('Serial send timeout')
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = 'Start'
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
#print(hex(b))
if state == 'Start':
if b == 0x1B:
state = 'GetSeq'
checksum = 0x1B
elif state == 'GetSeq':
state = 'MsgSize1'
elif state == 'MsgSize1':
msgSize = b << 8
state = 'MsgSize2'
elif state == 'MsgSize2':
msgSize |= b
state = 'Token'
elif state == 'Token':
if b != 0x0E:
state = 'Start'
else:
state = 'Data'
data = []
elif state == 'Data':
data.append(b)
if len(data) == msgSize:
state = 'Checksum'
elif state == 'Checksum':
if checksum != 0:
state = 'Start'
else:
return data
def runProgrammer(port, filename):
""" Run an STK500v2 program on serial port 'port' and write 'filename' into flash. """
programmer = Stk500v2()
programmer.connect(port = port)
programmer.programChip(intelHex.readHex(filename))
programmer.close()
def main():
""" Entry point to call the stk500v2 programmer from the commandline. """
import threading
port = '/dev/ttyMFD1'
threading.Thread(target=runProgrammer, args=(port,sys.argv[1])).start()
#threading.Thread(target=runProgrammer, args=(port,"./MarlinWitbox.hex")).start()
# programmer = Stk500v2()
# programmer.connect(port = sys.argv[1])
# programmer.programChip(intelHex.readHex(sys.argv[2]))
# sys.exit(1)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/process -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_process
short_description: Module to process openshift templates
description:
- Process openshift templates programmatically.
options:
state:
description:
- State has a few different meanings when it comes to process.
- state: present - This state runs an `oc process <template>`. When used in
- conjunction with 'create: True' the process will be piped to | oc create -f
- state: absent - will remove a template
- state: list - will perform an `oc get template <template_name>`
default: present
choices: ["present", "absent", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
template_name:
description:
- Name of the openshift template that is being processed.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the template lives.
required: false
default: default
aliases: []
content:
description:
- Template content that will be processed.
required: false
default: None
aliases: []
params:
description:
- A list of parameters that will be inserted into the template.
required: false
default: None
aliases: []
create:
description:
- Whether or not to create the template after being processed. e.g. oc process | oc create -f -
required: False
default: False
aliases: []
reconcile:
description:
- Whether or not to attempt to determine if there are updates or changes in the incoming template.
default: true
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: process the cloud volume provisioner template with variables
oc_process:
namespace: openshift-infra
template_name: online-volume-provisioner
create: True
params:
PLAT: rhel7
register: processout
run_once: true
- debug: var=processout
'''
# -*- -*- -*- End included fragment: doc/process -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# pylint: disable=undefined-variable,missing-docstring
# noqa: E301,E302
class YeditException(Exception):
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in ' +
'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
% (inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# If vtype is not str then go ahead and attempt to yaml load it.
if isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming ' +
'value. value=[%s] vtype=[%s]'
% (type(inc_value), vtype))
return inc_value
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(module):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=module.params['src'],
backup=module.params['backup'],
separator=module.params['separator'])
if module.params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and \
module.params['state'] != 'present':
return {'failed': True,
'msg': 'Error opening file [%s]. Verify that the ' +
'file exists, that it is has correct' +
' permissions, and is valid yaml.'}
if module.params['state'] == 'list':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['key']:
rval = yamlfile.get(module.params['key']) or {}
return {'changed': False, 'result': rval, 'state': "list"}
elif module.params['state'] == 'absent':
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
yamlfile.yaml_dict = content
if module.params['update']:
rval = yamlfile.pop(module.params['key'],
module.params['value'])
else:
rval = yamlfile.delete(module.params['key'])
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
elif module.params['state'] == 'present':
# check if content is different than what is in the file
if module.params['content']:
content = Yedit.parse_value(module.params['content'],
module.params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
module.params['value'] is None:
return {'changed': False,
'result': yamlfile.yaml_dict,
'state': "present"}
yamlfile.yaml_dict = content
# we were passed a value; parse it
if module.params['value']:
value = Yedit.parse_value(module.params['value'],
module.params['value_type'])
key = module.params['key']
if module.params['update']:
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
module.params['curr_value_format']) # noqa: E501
rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
elif module.params['append']:
rval = yamlfile.append(key, value)
else:
rval = yamlfile.put(key, value)
if rval[0] and module.params['src']:
yamlfile.write()
return {'changed': rval[0],
'result': rval[1], 'state': "present"}
# no edits to make
if module.params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': "present"}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, rname, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource, rname]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
elif rname:
cmd.append(rname)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode(), stderr.decode()
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
rval = {}
results = ''
err = None
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"results": results,
"cmd": ' '.join(cmds)}
if returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.args:
err = err.args
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {}})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import yum
yum_base = yum.YumBase()
if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
return True
return False
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_process.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCProcess(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
tname=None,
params=None,
create=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
tdata=None,
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCProcess, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = tname
self.data = tdata
self.params = params
self.create = create
self._template = None
@property
def template(self):
'''template property'''
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
raise OpenShiftCLIError('Error processing template [%s].' % self.name)
self._template = results['results']['items']
return self._template
def get(self):
'''get the template'''
results = self._get('template', self.name)
if results['returncode'] != 0:
# Does the template exist??
if 'not found' in results['stderr']:
results['returncode'] = 0
results['exists'] = False
results['results'] = []
return results
def delete(self, obj):
'''delete a resource'''
return self._delete(obj['kind'], obj['metadata']['name'])
def create_obj(self, obj):
'''create a resource'''
return self._create_from_content(obj['metadata']['name'], obj)
def process(self, create=None):
'''process a template'''
do_create = False
if create != None:
do_create = create
else:
do_create = self.create
return self._process(self.name, do_create, self.params, self.data)
def exists(self):
'''return whether the template exists'''
# Always return true if we're being passed template data
if self.data:
return True
t_results = self._get('template', self.name)
if t_results['returncode'] != 0:
# Does the template exist??
if 'not found' in t_results['stderr']:
return False
else:
raise OpenShiftCLIError('Something went wrong. %s' % t_results)
return True
def needs_update(self):
'''attempt to process the template and return it for comparison with oc objects'''
obj_results = []
for obj in self.template:
# build a list of types to skip
skip = []
if obj['kind'] == 'ServiceAccount':
skip.extend(['secrets', 'imagePullSecrets'])
if obj['kind'] == 'BuildConfig':
skip.extend(['lastTriggeredImageID'])
if obj['kind'] == 'ImageStream':
skip.extend(['generation'])
if obj['kind'] == 'DeploymentConfig':
skip.extend(['lastTriggeredImage'])
# fetch the current object
curr_obj_results = self._get(obj['kind'], obj['metadata']['name'])
if curr_obj_results['returncode'] != 0:
# Does the template exist??
if 'not found' in curr_obj_results['stderr']:
obj_results.append((obj, True))
continue
# check the generated object against the existing object
if not Utils.check_def_equal(obj, curr_obj_results['results'][0], skip_keys=skip):
obj_results.append((obj, True))
continue
obj_results.append((obj, False))
return obj_results
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run the ansible idempotent code'''
ocprocess = OCProcess(params['namespace'],
params['template_name'],
params['params'],
params['create'],
kubeconfig=params['kubeconfig'],
tdata=params['content'],
verbose=params['debug'])
state = params['state']
api_rval = ocprocess.get()
if state == 'list':
if api_rval['returncode'] != 0:
return {"failed": True, "msg" : api_rval}
return {"changed" : False, "results": api_rval, "state": "list"}
elif state == 'present':
if check_mode and params['create']:
return {"changed": True, 'msg': "CHECK_MODE: Would have processed template."}
if not ocprocess.exists() or not params['reconcile']:
#FIXME: this code will never get run in a way that succeeds when
# module.params['reconcile'] is true. Because oc_process doesn't
# create the actual template, the check of ocprocess.exists()
# is meaningless. Either it's already here and this code
# won't be run, or this code will fail because there is no
# template available for oc process to use. Have we conflated
# the template's existence with the existence of the objects
# it describes?
# Create it here
api_rval = ocprocess.process()
if api_rval['returncode'] != 0:
return {"failed": True, "msg": api_rval}
if params['create']:
return {"changed": True, "results": api_rval, "state": "present"}
return {"changed": False, "results": api_rval, "state": "present"}
# verify results
update = False
rval = []
all_results = ocprocess.needs_update()
for obj, status in all_results:
if status:
ocprocess.delete(obj)
results = ocprocess.create_obj(obj)
results['kind'] = obj['kind']
rval.append(results)
update = True
if not update:
return {"changed": update, "results": api_rval, "state": "present"}
for cmd in rval:
if cmd['returncode'] != 0:
return {"failed": True, "changed": update, "results": rval, "state": "present"}
return {"changed": update, "results": rval, "state": "present"}
# -*- -*- -*- End included fragment: class/oc_process.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_process.py -*- -*- -*-
def main():
'''
ansible oc module for processing templates
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
template_name=dict(default=None, type='str'),
content=dict(default=None, type='str'),
params=dict(default=None, type='dict'),
create=dict(default=False, type='bool'),
reconcile=dict(default=True, type='bool'),
),
supports_check_mode=True,
)
rval = OCProcess.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_process.py -*- -*- -*-
| |
#!/usr/bin/python
import argparse
import collections
import ctypes
import math
import opcodes
import sys
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return '%s' % (self.value)
class Parser:
__MAGIC_JUMP = 'FIXME'
def __init__(self, data):
self.data = data
self.code = ''
self.output = collections.OrderedDict({})
self.debug = True
self.labels = {}
self.line = 0
self.regmap = {}
self.postdata = {}
self.opers = {
'==': 0,
'<': 1,
'>': 2,
'<=': 3,
'>=': 4,
'!=': 5,
'&&': 8,
'||': 9,
'^': 14,
'%': 15
}
def hexstr(self, s):
"""
>>> p = Parser('')
>>> p.hexstr('1234')
'0x31 0x32 0x33 0x34'
>>> p.hexstr('a')
'0x61'
>>> p.hexstr('\\0')
'0x0'
>>> p.hexstr('\\1')
'0x1'
>>> p.hexstr(' ')
'0x20'
>>> p.hexstr(' a g')
'0x20 0x61 0x20 0x20 0x67'
"""
res = ''
for c in s:
res += hex(ord(c)) + ' '
return res.strip()
def __str__(self):
r = ''
for l in self.output:
r += '%s: %s\n' % (l, self.hexstr(self.output[l]))
return r
def generate(self):
"""
>>> p = Parser('')
>>> p.generate()
''
>>> p.output[1] = 'a'
>>> p.output[2] = 'b'
>>> p.output[6] = 'c'
>>> p.generate()
'abc'
>>> p.output[10] = 'f'
>>> p.generate()
'abcf'
>>> p.output[5] = 'm'
>>> p.generate()
'abmcf'
"""
res = ''
for l in collections.OrderedDict(sorted(self.output.items())):
res += self.output[l]
return res
def parse_label(self, data):
"""
>>> p = Parser('')
>>> p.debug = False
>>> p.parse_label('a')
True
>>> 'a' in p.labels
True
>>> p.labels['a']
0
>>> p.parse_label('a12')
True
>>> 'a12' in p.labels
True
>>> p.labels['a12']
0
>>> p.parse_label('a12-,')
False
>>> p.parse_label('test_label')
True
>>> 'a12-,' in p.labels
False
>>> p.parse_label('daud8912eh8921he')
True
"""
for c in data:
if not (c.isalnum() or c == '_'):
return False
if self.debug and data not in self.labels:
print ('LABEL: %s' % (data))
self.labels[data] = self.line
return True
def raw_number_neg(self, num):
"""
>>> p = Parser('')
>>> p.raw_number_neg(-1)
255
>>> p.raw_number_neg(-12)
244
>>> p.raw_number_neg(-126)
130
>>> p.raw_number_neg(-127)
65409
>>> p.raw_number_neg(-200)
65336
>>> p.raw_number_neg(-40000) # doctest: +ELLIPSIS
4294927296...
>>> p.raw_number_neg(1) # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: Invalid logic, number should not be positive
"""
if num >= 0:
raise ValueError('Invalid logic, number should not be positive')
#print (ctypes.c_uint8(-num).value, -num, int(0xff/2))
if ctypes.c_uint8(-num).value == -num and -num < int(0xff/2):
return ctypes.c_uint8(num).value
if ctypes.c_uint16(-num).value == -num and -num < int(0xffff/2):
return ctypes.c_uint16(num).value
if ctypes.c_uint32(-num).value == -num and -num < int(0xffffffff/2):
return ctypes.c_uint32(num).value
return ctypes.c_uint64(num).value
def raw_number(self, num):
"""
>>> p = Parser('')
>>> p.raw_number(-1)
'\\xff'
>>> p.raw_number(-2)
'\\xfe'
>>> p.raw_number(-3)
'\\xfd'
>>> p.raw_number(-200)
'\\xff8'
>>> p.raw_number(48)
'0'
>>> p.raw_number(0x5048)
'PH'
>>> p.raw_number(0x434241)
'CBA'
>>> p.raw_number(0x00434241)
'CBA'
>>> p.raw_number(0x4400434241)
'D\\x00CBA'
>>> p.raw_number(2147483647)
'\\x7f\\xff\\xff\\xff'
>>> p.raw_number(2147483649)
'\\x80\\x00\\x00\\x01'
"""
res = ''
if num < 0:
num = self.raw_number_neg(num)
while num > 0:
val = num & 0xff
res += chr(val)
num = num >> 8
return res[::-1]
def fill_to_bytes(self, num):
"""
>>> p = Parser('')
>>> p.fill_to_bytes('')
(1, '\\x00')
>>> p.fill_to_bytes('0')
(1, '0')
>>> p.fill_to_bytes('AB')
(2, 'AB')
>>> p.fill_to_bytes('ABC')
(4, '\\x00ABC')
>>> p.fill_to_bytes('ABCD')
(4, 'ABCD')
>>> p.fill_to_bytes('ABCDE')
(8, '\\x00\\x00\\x00ABCDE')
>>> p.fill_to_bytes('ABCDEFGHI') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid number ABCDEFGHI @0
>>> p.fill_to_bytes('ABCDEFGH')
(8, 'ABCDEFGH')
"""
num_bytes = len(num)
if num_bytes <= 1:
num_bytes = 1
elif num_bytes <= 2:
num_bytes = 2
elif num_bytes <= 4:
num_bytes = 4
elif num_bytes <= 8:
num_bytes = 8
else:
raise ParseError('Invalid number %s @%s' % (num, self.line))
while len(num) < num_bytes:
num = '\x00' + num
return (num_bytes, num)
def is_float(self, val):
"""
>>> p = Parser('')
>>> p.is_float('')
False
>>> p.is_float('.')
False
>>> p.is_float('.0')
True
>>> p.is_float('1.0')
True
>>> p.is_float('1.00005')
True
>>> p.is_float('1.000.05')
False
"""
if not val:
return False
got_digit = False
got_dot = False
for c in val:
if not (c.isdigit() or c == '.'):
return False
if not got_digit and c.isdigit():
got_digit = True
elif c == '.':
if got_dot:
return False
got_dot = True
return got_digit
def is_int(self, data):
return data and (data.isdigit() or (data[0] == '-' and data[1:].isdigit()))
def parse_reg(self, regdata):
"""
>>> p = Parser('')
>>> p.parse_reg('a') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid register or immediate: A @0
>>> p.parse_reg('r') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: ''
>>> p.parse_reg('r0')
0
>>> p.parse_reg('r1')
1
>>> p.parse_reg('r112')
112
>>> p.parse_reg('R999')
999
>>> p.parse_reg('PC')
-1
>>> p.parse_reg('pc')
-1
"""
regdata = regdata.upper()
if regdata != 'PC' and regdata[0] != 'R':
if self.is_int(regdata) and int(regdata) < 16 and int(regdata) >= 0:
return (int(regdata) << 4)
else:
raise ParseError('Invalid register or immediate: %s @%s' % (regdata, self.line))
if regdata == 'PC':
return -1
reg = int(regdata[1:])
return reg
def output_num(self, val, full=True):
"""
>>> p = Parser('')
>>> p.output_num(12)
(1, '\\x0c')
>>> p.output_num(1234)
(2, '\\x04\\xd2')
>>> p.output_num(65)
(1, 'A')
>>> p.output_num(1234567890)
(4, 'I\\x96\\x02\\xd2')
>>> p.output_num(1234567890, False)
'I\\x96\\x02\\xd2'
"""
res = self.fill_to_bytes(self.raw_number(val))
if full:
return res
return res[1]
def format_string(self, s):
"""
>>> p = Parser('')
>>> p.format_string('a')
'a'
>>> p.format_string('aa\\\\taa')
'aa\\taa'
>>> p.format_string('aa\\\\naa')
'aa\\naa'
>>> p.format_string('aa\\\\"naa')
'aa"naa'
>>> p.format_string('aa\\\\fbb') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid escape: \\f @0
"""
res = ''
escape = False
for c in s:
if not escape and c == '\\':
escape = True
elif escape and c == 'n':
res += '\n'
escape = False
elif escape and c == 't':
res += '\t'
escape = False
elif escape and c == '"':
res += '"'
escape = False
elif escape:
raise ParseError('Invalid escape: \\%s @%s' % (c, self.line))
else:
res += c
return res
def parse_load_2args(self, data):
"""
>>> p = Parser('')
"""
reg = self.parse_reg(data[0])
value = data[1]
# Support int or negative int
if self.is_int(value):
# Int
val = int(value)
(cnt, val) = self.output_num(val)
if cnt == 1:
self.code += chr(opcodes.LOAD_INT8)
elif cnt == 2:
self.code += chr(opcodes.LOAD_INT16)
elif cnt == 4:
self.code += chr(opcodes.LOAD_INT32)
elif cnt == 8:
self.code += chr(opcodes.LOAD_INT64)
self.regmap[reg] = 'int'
self.code += self.output_num(reg, False)
self.code += val
elif self.is_float(value):
# TODO
# Float
self.regmap[reg] = 'float'
pass
elif value[0] == '"' and value[-1] == '"':
# String
self.code += chr(opcodes.LOAD_STR)
self.code += self.output_num(reg, False)
self.regmap[reg] = 'str'
self.code += self.format_string(value[1:-1]) + '\x00'
else:
raise ParseError('Invalid argument for LOAD: %s @%s' % (value, self.line))
def parse_load_3args(self, data):
"""
>>> p = Parser('')
"""
reg = self.parse_reg(data[0])
if not data[1].isdigit():
raise ParseError('Invalid argument for LOAD: %s @%s' % (data[1], self.line))
cnt = int(data[1])
opt = data[2].strip()
if opt[0] == '[' and opt[-1] == ']':
address = opt[1:-1]
self.code += chr(opcodes.LOAD_INT_MEM)
self.code += self.output_num(reg, False)
self.code += self.output_num(cnt, False)
address_entries = address.split(' ')
if len(address_entries) == 0:
raise ParseError('Invalid argument for LOAD: %s @%s' % (data[2], self.line))
address = address_entries[0]
if address.isdigit():
address = int(address)
self.code += self.output_num(address, False)
elif address in self.labels:
target = self.labels[address]
oper = 0
diff = 0
if len(address_entries) == 3:
oper = address_entries[1]
if oper != '+' and oper != '-':
raise ParseError('Invalid argument for LOAD: %s (%s) @%s' % (oper, data[2], self.line))
diff = address_entries[2]
if not diff.isdigit():
raise ParseError('Invalid argument for LOAD: %s (%s) @%s' % (diff, data[2], self.line))
diff = int(diff)
self.code += '\x00' * 8
self.postdata[self.line] = (opcodes.LOAD_INT_MEM, target, oper, diff)
#self.code = '%s %s,%s,%s,%s,%s:' % (Parser.__MAGIC_JUMP, 0, 8, target, oper, diff) + self.code
#self.code = '%s %s,%s,%s,%s,%s:' % (Parser.__MAGIC_JUMP, 0, 8, target, oper, diff) + self.code
else:
raise ParseError('Invalid argument for LOAD: %s @%s' % (data[2], self.line))
#self.code += self.output_num(address, False)
self.regmap[reg] = 'int'
elif opt[0] == 'R':
reg2 = self.parse_reg(opt)
self.code += chr(opcodes.LOAD_INT)
self.code += self.output_num(reg, False)
self.code += self.output_num(cnt, False)
self.code += self.output_num(reg2, False)
self.regmap[reg] = 'int'
elif opt.isdigit():
data = int(opt)
def parse_load(self, opts):
"""
>>> p = Parser('')
>>> p.parse_load('R0, 1')
'\\x05\\x00\\x01'
>>> p.code = ''
>>> p.parse_load('R1, 1')
'\\x05\\x01\\x01'
>>> p.code = ''
>>> p.parse_load('R11, 28')
'\\x05\\x0b\\x1c'
>>> p.code = ''
>>> p.parse_load('R11, 12345')
'\\x06\\x0b09'
>>> p.code = ''
>>> p.parse_load('R11, 123456789')
'\\x07\\x0b\\x07[\\xcd\\x15'
>>> p.parse_load('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid number argument for LOAD: 1 () @0
>>> p.parse_load('R1') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid number argument for LOAD: 1 (R1) @0
>>> p.parse_load('R1, R2, 3') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid number argument for LOAD: 3 (R1, R2, 3) @0
>>> p.code = ''
>>> p.parse_load('R1, "abc"')
'\\t\\x01abc\\x00'
>>> p.code = ''
>>> p.parse_load('R1, 1.234') # FIXME: Unimplemented
''
"""
data = [x.strip() for x in opts.split(',')]
if len(data) == 2:
self.parse_load_2args(data)
elif len(data) == 3:
self.parse_load_3args(data)
else:
raise ParseError('Invalid number argument for LOAD: %s (%s) @%s' % (len(data), opts, self.line))
return self.code
def parse_print(self, opts):
"""
>>> p = Parser('')
>>> p.parse_print('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: No mandatory parameter given for PRINT
>>> p.parse_print('a') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported PRINT: a @0
>>> p.parse_print('"a"') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported PRINT: "a" @0
>>> p.parse_print('R0') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Using unused register for PRINT: R0 @0
>>> p.regmap[1] = "int"
>>> p.regmap[2] = "str"
>>> p.parse_print('R1')
'\\x14\\x01'
>>> p.code = ''
>>> p.parse_print('R2')
'\\x16\\x02'
"""
opts = opts.strip()
if not opts:
raise ParseError('No mandatory parameter given for PRINT')
if opts[0] == 'R':
reg = self.parse_reg(opts)
if not reg in self.regmap:
raise ParseError('Using unused register for PRINT: %s @%s' % (opts, self.line))
if self.regmap[reg] == 'int':
self.code += chr(opcodes.PRINT_INT)
elif self.regmap[reg] == 'str':
self.code += chr(opcodes.PRINT_STR)
self.code += self.output_num(reg, False)
else:
raise ParseError('Unsupported PRINT: %s @%s' % (opts, self.line))
"""
elif opts in self.labels:
self.code += 'FIXME'
self.code += chr(opcodes.PRINT_STR)
print ('print %s' % (opts))
elif opts[0] == '"':
self.code += chr(opcodes.PRINT_STR)
self.code += opts
self.code += '\x00'
print ('print %s' % (opts))
"""
return self.code
def parse_inc(self, opts):
"""
>>> p = Parser('')
>>> p.parse_inc('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: No mandatory parameter given for INC
>>> p.parse_inc('R0')
'\\r\\x00'
>>> p.code = ''
>>> p.parse_inc('R18')
'\\r\\x12'
>>> p.parse_inc('a') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported INC: a @0
>>> p.parse_inc('PC') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported INC: PC @0
"""
opts = opts.strip()
if not opts:
raise ParseError('No mandatory parameter given for INC')
if opts[0] == 'R':
reg = self.parse_reg(opts)
self.code += chr(opcodes.INC_INT)
self.code += self.output_num(reg, False)
else:
raise ParseError('Unsupported INC: %s @%s' % (opts, self.line))
return self.code
def parse_dec(self, opts):
"""
>>> p = Parser('')
>>> p.parse_dec('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: No mandatory parameter given for DEC
>>> p.parse_dec('R0')
'\\x0e\\x00'
>>> p.code = ''
>>> p.parse_dec('R18')
'\\x0e\\x12'
>>> p.parse_dec('PC') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported DEC: PC @0
"""
opts = opts.strip()
if not opts:
raise ParseError('No mandatory parameter given for DEC')
if opts[0] == 'R':
reg = self.parse_reg(opts)
self.code += chr(opcodes.DEC_INT)
self.code += self.output_num(reg, False)
else:
raise ParseError('Unsupported DEC: %s @%s' % (opts, self.line))
return self.code
def parse_target(self, target):
"""
>>> p = Parser('')
>>> p.parse_target('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid target
>>> p.parse_target('R1')
('reg', 1)
>>> p.parse_target('R99')
('reg', 99)
>>> p.parse_target('123')
('imm', 123)
>>> p.parse_target('0')
('imm', 0)
>>> p.parse_target('label')
(None, None)
>>> p.labels['label'] = 17
>>> p.labels['label2'] = 42
>>> p.parse_target('label')
('label', 17)
"""
if target in self.labels:
return ('label', self.labels[target])
if not target:
raise ParseError('Invalid target')
if target[0] == 'R':
return ('reg', self.parse_reg(target))
if target.isdigit():
return ('imm', int(target))
return (None, None)
def find_output(self, pos):
"""
>>> p = Parser('')
>>> p.find_output(1) is None
True
>>> p.find_output(-1) is None
True
>>> p.output[1] = 'a'
>>> p.output[2] = 'b'
>>> p.output[6] = 'c'
>>> p.find_output(1)
'a'
>>> p.find_output(6)
'c'
"""
if pos in self.output:
return self.output[pos]
return None
"""
while pos < max(self.output):
pos += 1
if pos in self.output:
return self.output[pos]
raise ParseError("No code for line: %s" % (pos))
"""
def estimate_jump_len(self, target_line):
"""
>>> p = Parser('')
>>> p.estimate_jump_len(10)
(True, 100)
>>> p.line = 5
>>> p.estimate_jump_len(1)
(False, 0)
>>> p.output[1] = 'a'
>>> p.output[2] = 'bb42'
>>> p.output[6] = 'c'
>>> p.output[8] = 'eue'
>>> p.output[10] = 'ff'
>>> p.output[11] = 'ff12334567789'
>>> p.estimate_jump_len(1)
(False, -5)
>>> p.estimate_jump_len(3)
(False, 0)
>>> p.estimate_jump_len(2)
(False, -4)
>>> p.estimate_jump_len(8)
(True, 65)
>>> p.estimate_jump_len(100)
(True, 600)
"""
diff = target_line - self.line
fuzzy = False
s = min(target_line, self.line)
s_end = max(target_line, self.line)
jlen = 0
estlines = 0
while s < s_end:
outp = self.find_output(s)
s += 1
if outp is None:
estlines += 1
continue
jlen += len(outp)
if Parser.__MAGIC_JUMP in outp:
jlen += 10
fuzzy = True
if diff < 0:
return (fuzzy, -jlen)
# Just estimate roughly upwards.
# Does not result optimal code, but it should work in most cases
return (True, (jlen + 10 + estlines) * 5)
def parse_jmp(self, opts):
"""
>>> p = Parser('')
>>> p.parse_jmp('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid target
>>> p.parse_jmp('R1') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unimplemented jump to target: R1
>>> p.parse_jmp('R1 < R2, label') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported JMP target: label @0
>>> p.labels['label'] = 1
>>> p.labels['label2'] = 20
>>> p.line = 4
>>> p.parse_jmp('R1 < R2, label')
'\\x1d\\x01\\x01\\x02\\x01'
>>> p.code = ''
>>> p.parse_jmp('R1 < R2, label2')
'FIXME 1,1,20,0,0:\\x1d\\x01\\x01\\x02'
>>> p.code = ''
>>> p.labels['label2'] = 2000
>>> p.parse_jmp('R1 < R2, label2')
'FIXME 1,2,2000,0,0:\\x1e\\x01\\x01\\x02'
>>> p.code = ''
>>> p.parse_jmp('R1 R2, label2') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported JMP: R1 R2, label2 @4
"""
data = [x.strip() for x in opts.split(',')]
if len(data) == 2:
cmp_ops = [x.strip() for x in data[0].split(' ')]
cmp_op = 0
if len(cmp_ops) == 3:
cmp_op = self.opers[cmp_ops[1]]
reg1 = self.parse_reg(cmp_ops[0])
reg2 = self.parse_reg(cmp_ops[2])
else:
raise ParseError('Unsupported JMP: %s @%s' % (opts, self.line))
(ttype, target) = self.parse_target(data[1].strip())
if ttype == 'imm':
(cnt, val) = self.output_num(target)
if cnt == 1:
self.code += chr(opcodes.JMP8)
elif cnt == 2:
self.code += chr(opcodes.JMP16)
elif cnt == 4:
self.code += chr(opcodes.JMP32)
elif cnt == 4:
self.code += chr(opcodes.JMP64)
self.code += val
elif ttype == 'label':
(est, est_size) = self.estimate_jump_len(target)
if est_size < 0xff:
bits = 1
self.code += chr(opcodes.JMP_LE8)
elif est_size < 0xffff:
bits = 2
self.code += chr(opcodes.JMP_LE16)
elif est_size < 0xffffffff:
bits = 4
self.code += chr(opcodes.JMP_LE32)
else:
bits = 8
self.code += chr(opcodes.JMP_LE64)
self.code += self.output_num(cmp_op, False)
self.code += self.output_num(reg1, False)
self.code += self.output_num(reg2, False)
if not est:
if est_size < 0:
est_size -= 4
else:
(extra_bytes, _) = self.output_num(est_size + 8)
est_size += extra_bytes
self.code += self.output_num(est_size, False)
else:
self.code = '%s %s,%s,%s,0,0:' % (Parser.__MAGIC_JUMP, 1, bits, target) + self.code
else:
raise ParseError('Unsupported JMP target: %s @%s' % (data[1], self.line))
elif len(data) == 1:
target = self.parse_target(data[0].strip())
print (target)
raise ParseError('Unimplemented jump to target: %s' % (data[0]))
else:
raise ParseError('Invalid JMP: %s @%s' % (opts, self.line))
return self.code
def parse_db(self, opts):
"""
>>> p = Parser('')
>>> p.parse_db('')
''
>>> p.parse_db('a, b') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid DB data: a, b @0
>>> p.parse_db('"a"')
'a'
>>> p.code = ''
>>> p.parse_db('"a", "bcde"')
'abcde'
>>> p.code = ''
>>> p.parse_db('1, 2, 4, 6')
'\\x01\\x02\\x04\\x06'
>>> p.code = ''
>>> p.parse_db('0x42, 0x12')
'B\\x12'
>>> p.code = ''
>>> p.parse_db("'a', 'cd'")
'acd'
"""
data = [x.strip() for x in opts.split(',')]
for val in data:
if not val:
continue
if val[0] == '\'':
self.code += self.format_string(val[1:-1])
elif val[0] == '\"':
self.code += self.format_string(val[1:-1])
elif val[0] == '0' and val[1] == 'x':
self.code += self.output_num(int(val, 16), False)
elif val.isdigit():
self.code += self.output_num(int(val), False)
else:
raise ParseError('Invalid DB data: %s @%s' % (opts, self.line))
return self.code
def stub_2regs(self, opcode, name, opts):
"""
>>> p = Parser('')
>>> p.stub_2regs(8, 'name', 'R1, R2')
('\\x08\\x01\\x02', 1, 2)
>>> p.stub_2regs(8, 'name', 'a, b') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid register: A @0
>>> p.stub_2regs(8, '', '') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported : @0
>>> p.stub_2regs(8, 'name', '') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported name: @0
>>> p.code = ''
>>> p.stub_2regs(8, 'name', 'R6, R0')
('\\x08\\x06\\x00', 6, 0)
"""
data = [x.strip() for x in opts.split(',')]
if len(data) == 2:
reg1 = self.parse_reg(data[0])
reg2 = self.parse_reg(data[1])
self.code += chr(opcode)
self.code += self.output_num(reg1, False)
self.code += self.output_num(reg2, False)
else:
raise ParseError('Unsupported %s: %s @%s' % (name, opts, self.line))
return (self.code, reg1, reg2)
def stub_3regs(self, opcode, name, opts):
"""
>>> p = Parser('')
>>> p.stub_3regs(8, 'name', 'R1, R2') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported name: R1, R2 @0
>>> p.stub_3regs(8, 'name', 'R1, R2, R3')
'\\x08\\x01\\x02\\x03'
>>> p.stub_3regs(8, 'name', 'a, b, c') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid register: A @0
>>> p.stub_3regs(8, '', '') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported : @0
>>> p.stub_3regs(8, 'name', '') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported name: @0
>>> p.code = ''
>>> p.stub_3regs(8, 'name', 'R6, R0, R7')
'\\x08\\x06\\x00\\x07'
"""
data = [x.strip() for x in opts.split(',')]
if len(data) == 3:
reg1 = self.parse_reg(data[0])
reg2 = self.parse_reg(data[1])
reg3 = self.parse_reg(data[2])
self.code += chr(opcode)
self.code += self.output_num(reg1, False)
self.code += self.output_num(reg2, False)
self.code += self.output_num(reg3, False)
else:
raise ParseError('Unsupported %s: %s @%s' % (name, opts, self.line))
return self.code
def parse_mov(self, opts):
"""
>>> p = Parser('')
>>> p.parse_mov('R1, R2')
Traceback (most recent call last):
..
ParseError: Using unused register for MOV: R1, R2 @0
>>> p.regmap[2] = 'int'
>>> p.parse_mov('R1, R2')
'\"\\x01\\x02"\\x01\\x02'
"""
(res, reg1, reg2) = self.stub_2regs(opcodes.MOV, 'MOV', opts)
if not reg2 in self.regmap:
raise ParseError('Using unused register for MOV: %s @%s' % (opts, self.line))
self.regmap[reg1] = self.regmap[reg2]
return res
def parse_add(self, opts):
return self.stub_3regs(opcodes.ADD_INT, 'ADD', opts)
def parse_sub(self, opts):
return self.stub_3regs(opcodes.SUB_INT, 'SUB', opts)
def parse_mul(self, opts):
return self.stub_3regs(opcodes.MUL_INT, 'MUL', opts)
def parse_div(self, opts):
return self.stub_3regs(opcodes.DIV_INT, 'DIV', opts)
def parse_mod(self, opts):
return self.stub_3regs(opcodes.MOD_INT, 'MOD', opts)
def parse_heap(self, opts):
"""
>>> p = Parser('')
>>> p.parse_heap('R1')
'#\\x01'
>>> p.parse_heap('1')
'#\\x01#\\x10'
>>> p.parse_heap('t1') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid register: 1 @0
>>> p.parse_heap('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid arguments for HEAP: @0
"""
opts = opts.strip()
if not opts:
raise ParseError('Invalid arguments for HEAP: %s @%s' % (opts, self.line))
reg = self.parse_reg(opts)
self.code += chr(opcodes.HEAP)
self.code += self.output_num(reg, False)
return self.code
def parse_info(self, opts):
"""
>>> p = Parser('')
>>> p.parse_info('R1, 1')
'$\\x01\\x01'
>>> p.parse_info('') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported INFO: @0
>>> p.parse_info('1') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported INFO: 1 @0
>>> p.parse_info('1, 1')
'$\\x01\\x01$\\x10\\x01'
>>> p.parse_info('1a, 1b') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid register or immediate: 1A @0
"""
data = [x.strip() for x in opts.split(',')]
if len(data) == 2:
reg = self.parse_reg(data[0])
if not data[1].isdigit():
raise ParseError('Invalid argument for INT: %s @%s' % (data[1], self.line))
val = int(data[1])
self.regmap[reg] = 'int'
self.code += chr(opcodes.INFO)
self.code += self.output_num(reg, False)
self.code += self.output_num(val, False)
else:
raise ParseError('Unsupported INFO: %s @%s' % (opts, self.line))
return self.code
def parse_command(self, cmd, opts):
"""
>>> p = Parser('')
>>> p.parse_command('', '') is None
True
>>> p.parse_command('LOAD', 'R1, 1')
'\\x05\\x01\\x01'
>>> p.code = ''
>>> p.parse_command('PRINT', 'R1')
'\\x14\\x01'
>>> p.code = ''
>>> p.parse_command('ADD', 'R3, R1, R0')
'\\x0f\\x03\\x01\\x00'
>>> p.code = ''
>>> p.parse_command('NONE', '') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported command: NONE @0
"""
cmd = cmd.upper().strip()
if cmd == '':
return None
elif cmd == 'LOAD':
return self.parse_load(opts)
elif cmd == 'PRINT':
return self.parse_print(opts)
elif cmd == 'INC':
return self.parse_inc(opts)
elif cmd == 'DEC':
return self.parse_dec(opts)
elif cmd == 'JMP':
return self.parse_jmp(opts)
elif cmd == 'MOV':
return self.parse_mov(opts)
elif cmd == 'ADD':
return self.parse_add(opts)
elif cmd == 'SUB':
return self.parse_sub(opts)
elif cmd == 'MUL':
return self.parse_mul(opts)
elif cmd == 'DIV':
return self.parse_div(opts)
elif cmd == 'MOD':
return self.parse_mod(opts)
elif cmd == 'DB':
return self.parse_db(opts)
elif cmd == 'HEAP':
return self.parse_heap(opts)
elif cmd == 'INFO':
return self.parse_info(opts)
elif cmd == 'STOP':
self.code += chr(opcodes.STOP)
else:
raise ParseError('Unsupported command: %s @%s' % (cmd, self.line))
#print (cmd, opts)
return self.code
def pre_parse_line(self, line):
"""
>>> p = Parser('')
>>> p.labels
{}
>>> p.pre_parse_line('a')
>>> p.labels
{}
>>> p.debug = False
>>> p.pre_parse_line('a:')
>>> p.labels
{'a': 0}
"""
datas = line.split(':')
if len(datas) >= 2 and self.parse_label(datas[0]):
return
def parse_line(self, line):
"""
>>> p = Parser('')
>>> p.debug = False
>>> p.parse_line('a') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Unsupported command: NONE @0
>>> p.parse_line('LOAD R1, 0')
'\\x05\\x01\\x00'
>>> p.parse_line('') is None
True
>>> p.code = ''
>>> p.labels
{}
>>> p.parse_line('tst: LOAD R1, 0')
'\\x05\\x01\\x00'
>>> p.labels
{'tst': 0}
"""
if not line:
return None
datas = line.split(':')
if len(datas) >= 2 and self.parse_label(datas[0]):
line = ':'.join(datas[1:]).strip()
cmds = line.split(' ')
if not cmds:
return None
return self.parse_command(cmds[0], ' '.join(cmds[1:]))
def pre_parse(self):
"""
>>> p = Parser('')
>>> p.pre_parse()
"""
for self.line, line in enumerate(self.data):
line = line.strip()
if not line or line[0] == ';':
continue
self.pre_parse_line(line)
def try_fix(self, line):
"""
>>> p = Parser('')
>>> p.try_fix('')
(False, 0)
>>> p.try_fix('test: t') # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ParseError: Invalid data
>>> p.output[1] = 'a'
>>> p.try_fix(1) # doctest: +ELLIPSIS +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: ''
>>> p.output[1] = 'FIXME 1, 1, 2, 0, 0: a'
>>> p.try_fix(1)
(True, 0)
>>> p.output[1] = 'FIXME 1, 1, 5, 0, 0: a'
>>> p.output[2] = 'bc'
>>> p.try_fix(1)
(True, 2)
>>> p.output[3] = 'FIXME 1, 1, 1, 0, 0: d'
>>> p.try_fix(1)
(False, 2)
"""
if not line:
return (False, 0)
if line not in self.output:
raise ParseError('Invalid data')
data = self.output[line].split(':')
(_, bits, target, oper, diff) = [int(x) for x in data[0][6:].split(',')]
if bits == 8:
tmp = 0
else:
tmp = line + 1
inc = 1
if tmp > target:
inc = -1
size = 0
while tmp != target:
if tmp == line:
pass
elif tmp in self.output:
if self.output[tmp][:5] == Parser.__MAGIC_JUMP:
return (False, size)
size += len(self.output[tmp])
tmp += inc
if oper == 1:
size += diff
elif oper == 2:
size -= diff
return (True, size * inc)
def fix_line(self, line, size):
"""
>>> p = Parser('')
>>> p.fix_line('', 0)
>>> p.output[1] = 'FIXME 1, 1, 2, 0, 0: a'
>>> p.fix_line(1, 0)
"""
if not line:
return
data = self.output[line].split(':')
(append_bits, bits, target, _, _) = [int(x) for x in data[0][6:].split(',')]
data = ':'.join(data[1:])
outnum = size
if append_bits == 1:
if outnum < 0:
outnum -= bits
else:
outnum += bits
if outnum < 0:
outnum -= len(data)
num = self.output_num(outnum, False)
while len(num) < bits / 8:
num = '\x00' + num
self.output[line] = data + num
def try_fixes(self, lines):
"""
>>> p = Parser('')
>>> p.try_fixes([])
>>> p.output[1] = 'FIXME 1, 1, 2, 0, 0: a'
>>> p.try_fixes([1])
"""
for l in lines:
(res, size) = self.try_fix(l)
if res:
self.fix_line(l, size)
def fixme_lines(self):
"""
>>> p = Parser('')
>>> p.fixme_lines()
[]
"""
flines = []
for c in self.output:
if self.output[c][:5] == Parser.__MAGIC_JUMP:
flines.append(c)
return flines
def fix_fixmes(self):
"""
>>> p = Parser('')
>>> p.fix_fixmes()
"""
flines = self.fixme_lines()
tries = 0
while flines:
self.try_fixes(flines)
flines = self.fixme_lines()
tries += 1
if tries >= 100:
raise ParseError("Can't parse FIXME lines: %s" % (flines))
def apply_post_data(self):
"""
>>> p = Parser('')
>>> p.apply_post_data()
"""
for line in self.postdata:
tmp = 0
size = 0
(opcode, target, oper, diff) = self.postdata[line]
while tmp < target:
if tmp in self.output:
if self.output[tmp][:5] == Parser.__MAGIC_JUMP:
raise ParseError('Got invalid %s on %s' % (Parser.__MAGIC_JUMP, tmp))
size += len(self.output[tmp])
tmp += 1
if oper == '+':
size += diff
elif oper == '-':
size -= diff
num = self.output_num(size, False)
num = '\x00' * (8 - len(num)) + num
self.output[line] = self.output[line][:-8:] + num
def parse(self):
"""
>>> p = Parser('')
>>> p.parse()
"""
self.pre_parse()
for self.line, line in enumerate(self.data):
self.code = ''
line = line.strip()
if not line or line[0] == ';':
continue
self.parse_line(line)
self.output[self.line] = self.code
self.fix_fixmes()
self.apply_post_data()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Assembler for MinVM')
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('input', type=argparse.FileType('rb'))
parser.add_argument('output', type=argparse.FileType('wb'))
res = vars(parser.parse_args())
data = res['input'].readlines()
p = Parser(data)
if res['quiet']:
p.debug = False
p.parse()
if not res['quiet']:
print ('Code:\n%s' % (p))
res['output'].write(p.generate())
| |
import sys
import py, pytest
import _pytest.assertion as plugin
from _pytest.assertion import reinterpret, util
needsnewassert = pytest.mark.skipif("sys.version_info < (2,6)")
def interpret(expr):
return reinterpret.reinterpret(expr, py.code.Frame(sys._getframe(1)))
class TestBinReprIntegration:
pytestmark = needsnewassert
def test_pytest_assertrepr_compare_called(self, testdir):
testdir.makeconftest("""
l = []
def pytest_assertrepr_compare(op, left, right):
l.append((op, left, right))
def pytest_funcarg__l(request):
return l
""")
testdir.makepyfile("""
def test_hello():
assert 0 == 1
def test_check(l):
assert l == [("==", 0, 1)]
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"*test_hello*FAIL*",
"*test_check*PASS*",
])
def callequal(left, right):
return plugin.pytest_assertrepr_compare('==', left, right)
class TestAssert_reprcompare:
def test_different_types(self):
assert callequal([0, 1], 'foo') is None
def test_summary(self):
summary = callequal([0, 1], [0, 2])[0]
assert len(summary) < 65
def test_text_diff(self):
diff = callequal('spam', 'eggs')[1:]
assert '- spam' in diff
assert '+ eggs' in diff
def test_multiline_text_diff(self):
left = 'foo\nspam\nbar'
right = 'foo\neggs\nbar'
diff = callequal(left, right)
assert '- spam' in diff
assert '+ eggs' in diff
def test_list(self):
expl = callequal([0, 1], [0, 2])
assert len(expl) > 1
def test_list_different_lenghts(self):
expl = callequal([0, 1], [0, 1, 2])
assert len(expl) > 1
expl = callequal([0, 1, 2], [0, 1])
assert len(expl) > 1
def test_dict(self):
expl = callequal({'a': 0}, {'a': 1})
assert len(expl) > 1
def test_set(self):
expl = callequal(set([0, 1]), set([0, 2]))
assert len(expl) > 1
def test_list_tuples(self):
expl = callequal([], [(1,2)])
assert len(expl) > 1
expl = callequal([(1,2)], [])
assert len(expl) > 1
def test_list_bad_repr(self):
class A:
def __repr__(self):
raise ValueError(42)
expl = callequal([], [A()])
assert 'ValueError' in "".join(expl)
expl = callequal({}, {'1': A()})
assert 'faulty' in "".join(expl)
def test_one_repr_empty(self):
"""
the faulty empty string repr did trigger
a unbound local error in _diff_text
"""
class A(str):
def __repr__(self):
return ''
expl = callequal(A(), '')
assert not expl
def test_repr_no_exc(self):
expl = ' '.join(callequal('foo', 'bar'))
assert 'raised in repr()' not in expl
@needsnewassert
def test_rewritten(testdir):
testdir.makepyfile("""
def test_rewritten():
assert "@py_builtins" in globals()
""")
assert testdir.runpytest().ret == 0
def test_reprcompare_notin():
detail = plugin.pytest_assertrepr_compare('not in', 'foo', 'aaafoobbb')[1:]
assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++']
@needsnewassert
def test_pytest_assertrepr_compare_integration(testdir):
testdir.makepyfile("""
def test_hello():
x = set(range(100))
y = x.copy()
y.remove(50)
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*50*",
])
@needsnewassert
def test_sequence_comparison_uses_repr(testdir):
testdir.makepyfile("""
def test_hello():
x = set("hello x")
y = set("hello y")
assert x == y
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello():*",
"*assert x == y*",
"*E*Extra items*left*",
"*E*'x'*",
"*E*Extra items*right*",
"*E*'y'*",
])
@pytest.mark.xfail("sys.version_info < (2,6)")
def test_assert_compare_truncate_longmessage(testdir):
testdir.makepyfile(r"""
def test_long():
a = list(range(200))
b = a[::2]
a = '\n'.join(map(str, a))
b = '\n'.join(map(str, b))
assert a == b
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*too verbose, truncated*",
])
result = testdir.runpytest('-vv')
result.stdout.fnmatch_lines([
"*- 197",
])
@needsnewassert
def test_assertrepr_loaded_per_dir(testdir):
testdir.makepyfile(test_base=['def test_base(): assert 1 == 2'])
a = testdir.mkdir('a')
a_test = a.join('test_a.py')
a_test.write('def test_a(): assert 1 == 2')
a_conftest = a.join('conftest.py')
a_conftest.write('def pytest_assertrepr_compare(): return ["summary a"]')
b = testdir.mkdir('b')
b_test = b.join('test_b.py')
b_test.write('def test_b(): assert 1 == 2')
b_conftest = b.join('conftest.py')
b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]')
result = testdir.runpytest()
result.stdout.fnmatch_lines([
'*def test_base():*',
'*E*assert 1 == 2*',
'*def test_a():*',
'*E*assert summary a*',
'*def test_b():*',
'*E*assert summary b*'])
def test_assertion_options(testdir):
testdir.makepyfile("""
def test_hello():
x = 3
assert x == 4
""")
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
off_options = (("--no-assert",),
("--nomagic",),
("--no-assert", "--nomagic"),
("--assert=plain",),
("--assert=plain", "--no-assert"),
("--assert=plain", "--nomagic"),
("--assert=plain", "--no-assert", "--nomagic"))
for opt in off_options:
result = testdir.runpytest(*opt)
assert "3 == 4" not in result.stdout.str()
def test_old_assert_mode(testdir):
testdir.makepyfile("""
def test_in_old_mode():
assert "@py_builtins" not in globals()
""")
result = testdir.runpytest("--assert=reinterp")
assert result.ret == 0
def test_triple_quoted_string_issue113(testdir):
testdir.makepyfile("""
def test_hello():
assert "" == '''
'''""")
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines([
"*1 failed*",
])
assert 'SyntaxError' not in result.stdout.str()
def test_traceback_failure(testdir):
p1 = testdir.makepyfile("""
def g():
return 2
def f(x):
assert x == g()
def test_onefails():
f(3)
""")
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines([
"*test_traceback_failure.py F",
"====* FAILURES *====",
"____*____",
"",
" def test_onefails():",
"> f(3)",
"",
"*test_*.py:6: ",
"_ _ _ *",
#"",
" def f(x):",
"> assert x == g()",
"E assert 3 == 2",
"E + where 2 = g()",
"",
"*test_traceback_failure.py:4: AssertionError"
])
@pytest.mark.skipif("sys.version_info < (2,5) or '__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" )
def test_warn_missing(testdir):
p1 = testdir.makepyfile("")
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert")
result.stderr.fnmatch_lines([
"*WARNING*assert statements are not executed*",
])
| |
from __future__ import unicode_literals
from future.builtins import str
from django import VERSION
from django.contrib.auth.models import AnonymousUser
from django.db import connection
from django.template import Context, Template
from django.test.utils import override_settings
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from mezzanine.core.request import current_request
from mezzanine.pages.models import Page, RichTextPage
from mezzanine.urls import PAGES_SLUG
from mezzanine.utils.tests import TestCase
from mezzanine.utils.models import get_user_model
User = get_user_model()
class PagesTests(TestCase):
def test_page_ascendants(self):
"""
Test the methods for looking up ascendants efficiently
behave as expected.
"""
# Create related pages.
primary, created = RichTextPage.objects.get_or_create(title="Primary")
secondary, created = primary.children.get_or_create(title="Secondary")
tertiary, created = secondary.children.get_or_create(title="Tertiary")
# Force a site ID to avoid the site query when measuring queries.
setattr(current_request(), "site_id", settings.SITE_ID)
# Test that get_ascendants() returns the right thing.
page = Page.objects.get(id=tertiary.id)
ascendants = page.get_ascendants()
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
# Test ascendants are returned in order for slug, using
# a single DB query.
connection.queries = []
pages_for_slug = Page.objects.with_ascendants_for_slug(tertiary.slug)
self.assertEqual(len(connection.queries), 1)
self.assertEqual(pages_for_slug[0].id, tertiary.id)
self.assertEqual(pages_for_slug[1].id, secondary.id)
self.assertEqual(pages_for_slug[2].id, primary.id)
# Test page.get_ascendants uses the cached attribute,
# without any more queries.
connection.queries = []
ascendants = pages_for_slug[0].get_ascendants()
self.assertEqual(len(connection.queries), 0)
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
# Use a custom slug in the page path, and test that
# Page.objects.with_ascendants_for_slug fails, but
# correctly falls back to recursive queries.
secondary.slug += "custom"
secondary.save()
pages_for_slug = Page.objects.with_ascendants_for_slug(tertiary.slug)
self.assertEqual(len(pages_for_slug[0]._ascendants), 0)
connection.queries = []
ascendants = pages_for_slug[0].get_ascendants()
self.assertEqual(len(connection.queries), 2) # 2 parent queries
self.assertEqual(pages_for_slug[0].id, tertiary.id)
self.assertEqual(ascendants[0].id, secondary.id)
self.assertEqual(ascendants[1].id, primary.id)
def test_set_parent(self):
old_parent, _ = RichTextPage.objects.get_or_create(title="Old parent")
new_parent, _ = RichTextPage.objects.get_or_create(title="New parent")
child, _ = RichTextPage.objects.get_or_create(
title="Child", slug="kid")
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child.set_parent(old_parent)
child.save()
self.assertEqual(child.parent_id, old_parent.id)
self.assertTrue(child.slug == "old-parent/kid")
child = RichTextPage.objects.get(id=child.id)
self.assertEqual(child.parent_id, old_parent.id)
self.assertTrue(child.slug == "old-parent/kid")
child.set_parent(new_parent)
child.save()
self.assertEqual(child.parent_id, new_parent.id)
self.assertTrue(child.slug == "new-parent/kid")
child = RichTextPage.objects.get(id=child.id)
self.assertEqual(child.parent_id, new_parent.id)
self.assertTrue(child.slug == "new-parent/kid")
child.set_parent(None)
child.save()
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child = RichTextPage.objects.get(id=child.id)
self.assertTrue(child.parent is None)
self.assertTrue(child.slug == "kid")
child = RichTextPage(title="child2")
child.set_parent(new_parent)
self.assertEqual(child.slug, "new-parent/child2")
# Assert that cycles are detected.
p1, _ = RichTextPage.objects.get_or_create(title="p1")
p2, _ = RichTextPage.objects.get_or_create(title="p2")
p2.set_parent(p1)
with self.assertRaises(AttributeError):
p1.set_parent(p1)
with self.assertRaises(AttributeError):
p1.set_parent(p2)
p2c = RichTextPage.objects.get(title="p2")
with self.assertRaises(AttributeError):
p1.set_parent(p2c)
def test_set_slug(self):
parent, _ = RichTextPage.objects.get_or_create(
title="Parent", slug="parent")
child, _ = RichTextPage.objects.get_or_create(
title="Child", slug="parent/child", parent_id=parent.id)
parent.set_slug("new-parent-slug")
self.assertTrue(parent.slug == "new-parent-slug")
parent = RichTextPage.objects.get(id=parent.id)
self.assertTrue(parent.slug == "new-parent-slug")
child = RichTextPage.objects.get(id=child.id)
self.assertTrue(child.slug == "new-parent-slug/child")
def test_login_required(self):
public, _ = RichTextPage.objects.get_or_create(
title="Public", slug="public", login_required=False)
private, _ = RichTextPage.objects.get_or_create(
title="Private", slug="private", login_required=True)
accounts_installed = ("mezzanine.accounts" in settings.INSTALLED_APPS)
args = {"for_user": AnonymousUser()}
self.assertTrue(public in RichTextPage.objects.published(**args))
self.assertTrue(private not in RichTextPage.objects.published(**args))
args = {"for_user": User.objects.get(username=self._username)}
self.assertTrue(public in RichTextPage.objects.published(**args))
self.assertTrue(private in RichTextPage.objects.published(**args))
public_url = public.get_absolute_url()
private_url = private.get_absolute_url()
self.client.logout()
response = self.client.get(private_url, follow=True)
login = "%s?next=%s" % (settings.LOGIN_URL, private_url)
if accounts_installed:
# For an inaccessible page with mezzanine.accounts we should
# see a login page, without it 404 is more appropriate than an
# admin login.
target_status_code = 200
else:
target_status_code = 404
self.assertRedirects(response, login,
target_status_code=target_status_code)
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
if accounts_installed and VERSION >= (1, 5):
# Test if view name or URL pattern can be used as LOGIN_URL.
with override_settings(LOGIN_URL="mezzanine.accounts.views.login"):
# Note: With 1.7 this loops if the view app isn't installed.
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertRedirects(response, login)
with override_settings(LOGIN_URL="login"):
# Note: The "login" is a pattern name in accounts.urls.
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertRedirects(response, login)
self.client.login(username=self._username, password=self._password)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
if accounts_installed and VERSION >= (1, 5):
with override_settings(LOGIN_URL="mezzanine.accounts.views.login"):
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
with override_settings(LOGIN_URL="login"):
response = self.client.get(public_url, follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get(private_url, follow=True)
self.assertEqual(response.status_code, 200)
def test_page_menu_queries(self):
"""
Test that rendering a page menu executes the same number of
queries regardless of the number of pages or levels of
children.
"""
template = ('{% load pages_tags %}'
'{% page_menu "pages/menus/tree.html" %}')
before = self.queries_used_for_template(template)
self.assertTrue(before > 0)
self.create_recursive_objects(RichTextPage, "parent", title="Page",
status=CONTENT_STATUS_PUBLISHED)
after = self.queries_used_for_template(template)
self.assertEqual(before, after)
def test_page_menu_flags(self):
"""
Test that pages only appear in the menu templates they've been
assigned to show in.
"""
menus = []
pages = []
template = "{% load pages_tags %}"
for i, label, path in settings.PAGE_MENU_TEMPLATES:
menus.append(i)
pages.append(RichTextPage.objects.create(in_menus=list(menus),
title="Page for %s" % str(label),
status=CONTENT_STATUS_PUBLISHED))
template += "{%% page_menu '%s' %%}" % path
rendered = Template(template).render(Context({}))
for page in pages:
self.assertEqual(rendered.count(page.title), len(page.in_menus))
def test_page_menu_default(self):
"""
Test that the settings-defined default value for the ``in_menus``
field is used, also checking that it doesn't get forced to text,
but that sequences are made immutable.
"""
with override_settings(
PAGE_MENU_TEMPLATES=((8, "a", "a"), (9, "b", "b"))):
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=None):
page_in_all_menus = Page.objects.create()
self.assertEqual(page_in_all_menus.in_menus, (8, 9))
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=tuple()):
page_not_in_menus = Page.objects.create()
self.assertEqual(page_not_in_menus.in_menus, tuple())
with override_settings(PAGE_MENU_TEMPLATES_DEFAULT=[9]):
page_in_a_menu = Page.objects.create()
self.assertEqual(page_in_a_menu.in_menus, (9,))
def test_overridden_page(self):
"""
Test that a page with a slug matching a non-page urlpattern
return ``True`` for its overridden property.
"""
# BLOG_SLUG is empty then urlpatterns for pages are prefixed
# with PAGE_SLUG, and generally won't be overridden. In this
# case, there aren't any overridding URLs by default, so bail
# on the test.
if PAGES_SLUG:
return
page, created = RichTextPage.objects.get_or_create(slug="edit")
self.assertTrue(page.overridden())
def test_unicode_slug_parm_to_processor_for(self):
"""
Test that passing an unicode slug to processor_for works for
python 2.x
"""
from mezzanine.pages.page_processors import processor_for
@processor_for(u'test unicode string')
def test_page_processor(request, page):
return {}
page, _ = RichTextPage.objects.get_or_create(title="test page")
self.assertEqual(test_page_processor(current_request(), page), {})
| |
"""
Tests for the CSScheme dumper
"""
import pytest
from ..tinycss.css21 import Stylesheet, Declaration, RuleSet
from ..tinycss.token_data import Token
from ..tinycss.tokenizer import tokenize_grouped
from .. import dumper
from ..dumper import DumpError
from ..parser import StringRule
from . import jsonify
# Shorthand functions for tinycss classes
def T(type_, value): # noqa
return Token(type_, value, value, None, 0, 0)
def SR(keyword, value): # noqa
tokens = list(tokenize_grouped(value))
assert len(tokens) == 1
return StringRule(keyword, tokens[0], 0, 0)
def SS(rules): # noqa
return Stylesheet(rules, [], None)
def RS(sel, decl, at_rules=[]): # noqa
sel = tokenize_grouped(sel)
rs = RuleSet(sel, decl, 0, 0)
rs.at_rules = at_rules
return rs
def DC(name, value): # noqa
return Declaration(name, tokenize_grouped(value), None, 0, 0)
@pytest.mark.parametrize(('stylesheet', 'expected_data'), [
(SS([SR('@name', "Test"),
SR('@at-rule', "hi"),
RS('*', []),
# Should this be tested here?
RS('source', [DC('foreground', "#123456")]),
SR('@uuid', '2e3af29f-ebee-431f-af96-72bda5d4c144')
]),
{'name': "Test",
'at-rule': "hi",
'uuid': "2e3af29f-ebee-431f-af96-72bda5d4c144",
'settings': [
{'settings': {}},
{'scope': "source",
'settings': {'foreground': "#123456"}},
]}
),
])
def test_datafy(stylesheet, expected_data):
data = dumper.datafy_stylesheet(stylesheet)
assert data == expected_data
@pytest.mark.parametrize(('stylesheet', 'expected_error'), [
(SS([SR('@name', "Test"),
]),
"Must contain '*' ruleset"
),
(SS([SR('@name', "Test"),
RS('*', []),
RS('*', [])
]),
"Only one *-rule allowed"
),
(SS([SR('@settings', "value"),
SR('@name', "Test"),
RS('*', [])
]),
"Can not override 'settings' key using at-rules."
),
])
def test_datafy_errors(stylesheet, expected_error):
try:
dumper.datafy_stylesheet(stylesheet)
assert False, "no exception was raised"
except DumpError as e:
assert expected_error in str(e)
@pytest.mark.parametrize(('ruleset', 'expected_data'), [
# declarations
(RS('*', [DC('foreground', "#123456"),
DC('someSetting', "yeah"),
DC('anInteger', "2"),
# Test if subcalls (translate_colors) function properly
DC('another', "rgb(0,0,0)"),
],
[]),
{'settings': {
'foreground': "#123456",
'someSetting': "yeah",
'anInteger': "2", # converted to string
'another': "#000000",
}}
),
# string rules
(RS("source",
[DC('fontStyle', "bold"),
],
[SR('@name', "\"Test name\""),
SR('@arbitrary', "\"just some string\"")
]),
{'name': "Test name",
'arbitrary': "just some string",
'scope': "source",
'settings': {
'fontStyle': "bold",
}}
),
# whitespace stripping of selector
(RS("some other \nruleset with.blank.lines",
[]),
{'scope': "some other ruleset with.blank.lines",
'settings': {
}}
),
# escaped subtract operator (with legacy test)
(RS("not '-' subtract, real \- subtract",
[]),
{'scope': "not '-' subtract, real - subtract",
'settings': {
}}
),
# other escaped operators
(RS(R"\(a \| b\) \- \(c \& b.f, j.\1\)",
[]),
{'scope': "(a | b) - (c & b.f, j.1)",
'settings': {
}}
),
])
def test_datafy_ruleset(ruleset, expected_data):
data = dumper.datafy_ruleset(ruleset)
assert data == expected_data
@pytest.mark.parametrize(('ruleset', 'expected_error'), [
(RS('*',
[],
[SR('@settings', "a"),
]),
"You can not override the 'settings' key using at-rules"
),
(RS('yeah', [],
[SR('@scope', "a"),
]),
"You can not override the 'scope' key using at-rules"
),
])
def test_datafy_ruleset_errors(ruleset, expected_error):
try:
dumper.datafy_ruleset(ruleset)
assert False, "no exception was raised"
except DumpError as e:
assert expected_error in str(e)
@pytest.mark.parametrize(('decl', 'expected_decl'), [
# pass through
(DC('prop', "#123456 #12345678 cyan"),
('prop', [('HASH', "#123456"),
('S', " "),
('HASH', "#12345678"),
('S', " "),
('IDENT', "cyan")])),
# color
(DC('background', "#123456"),
('background', [('HASH', "#123456")])),
(DC('foreground', "black"),
('foreground', [('HASH', "#000000")])),
(DC('background', "cyan"),
('background', [('HASH', "#00FFFF")])),
# style list
(DC('fontStyle', 'bold italic underline'),
('fontStyle', [('IDENT', "bold"),
('S', " "),
('IDENT', "italic"),
('S', " "),
('IDENT', "underline")])),
(DC('fontStyle', 'none'),
('fontStyle', [('IDENT', "")])),
# options list
(DC('tagsOptions', 'foreground underline squiggly_underline stippled_underline'),
('tagsOptions', [('IDENT', "foreground"),
('S', " "),
('IDENT', "underline"),
('S', " "),
('IDENT', "squiggly_underline"),
('S', " "),
('IDENT', "stippled_underline")])),
# integer
(DC('shadowWidth', '4'),
('shadowWidth', [('INTEGER', 4)])),
(DC('shadowWidth', '\"-4\"'),
('shadowWidth', [('STRING', "-4")])),
])
def test_validify_decl(decl, expected_decl):
dumper.validify_declaration(decl, '')
assert (decl.name, list(jsonify(decl.value))) == expected_decl
@pytest.mark.parametrize(('decl', 'expected_error'), [
# color
(DC('background', "#123456 #12345678"),
"expected 1 token for property background, got 3"),
(DC('background', "'hi there'"),
"unexpected STRING token for property background"),
(DC('bracketsForeground', "\"#12345\""),
"unexpected STRING token for property bracketsForeground"),
(DC('foreground', "2"),
"unexpected INTEGER token for property foreground"),
(DC('foreground', "not-a-color"),
"unknown color name 'not-a-color' for property foreground"),
# style list
(DC('fontStyle', "#000001"),
"unexpected HASH token for property fontStyle"),
(DC('fontStyle', "\"hi\""),
"unexpected STRING token for property fontStyle"),
(DC('fontStyle', "foreground"),
"invalid value 'foreground' for style property fontStyle"),
(DC('fontStyle', "bold none"),
"'none' may not be used together with other styles"),
# options list
(DC('tagsOptions', "#000001"),
"unexpected HASH token for property tagsOptions"),
(DC('bracketsOptions', "italic"),
"invalid value 'italic' for options property bracketsOptions"),
# integer
(DC('shadowWidth', "1 2"),
"expected 1 token for property shadowWidth, got 3"),
(DC('shadowWidth', "'a'"),
"expected number in string for property shadowWidth, got 'a'"),
(DC('shadowWidth', "1.23"),
"unexpected NUMBER token for property shadowWidth"),
])
def test_validify_decl_errors(decl, expected_error):
try:
dumper.validify_declaration(decl, '')
assert False, "no exception was raised"
except DumpError as e:
assert expected_error in str(e)
@pytest.mark.parametrize(('decl', 'expected_decl'), [
# Does not access a declaration's name, only values
# pass through
(DC('prop', "'hi there' #123456 ident 5"),
('prop', [('STRING', "hi there"),
('S', " "),
('HASH', "#123456"),
('S', " "),
('IDENT', "ident"),
('S', " "),
('INTEGER', 5)])),
# changes
(DC('prop', "'#12345678'"),
('prop', [('HASH', "#12345678")])),
(DC('prop', "#123"),
('prop', [('HASH', "#112233")])),
(DC('prop', "rgb(16, 32, 50.2%)"),
('prop', [('HASH', "#102080")])),
(DC('prop', "rgba(-100%, 312.6%, 5, .5)"),
('prop', [('HASH', "#00FF0580")])),
(DC('prop', "hsl(0, 50%, 50%)"),
('prop', [('HASH', "#BF4040")])),
(DC('prop', "hsla(123.4, 250%, 13.54%, 0.1)"),
('prop', [('HASH', "#0045041A")])),
])
def test_translate_colors(decl, expected_decl):
dumper.translate_colors(decl, '')
assert (decl.name, list(jsonify(decl.value))) == expected_decl
@pytest.mark.parametrize(('decl', 'expected_error'), [
(DC('prop', "#12345"),
"unexpected length of 5 of color hash for property prop"),
(DC('prop', "\"#12345\""),
"unexpected length of 5 of color hash for property prop"),
(DC('prop', "yolo()"),
"unknown function 'yolo()' in property prop"),
(DC('prop', "rgb()"),
"expected 3 parameters for function 'rgb()', got 0"),
(DC('prop', "rgb()"),
"expected 3 parameters for function 'rgb()', got 0"),
(DC('prop', "rgba(1, 2, 3, 4, 5)"),
"expected 4 parameters for function 'rgba()', got 5"),
(DC('prop', "rgb(1, 2 3, 4)"),
"expected 1 token for parameter 2 in function 'rgb()', got 3"),
(DC('prop', "rgb(1, 2, 3}"),
"expected 1 token for parameter 3 in function 'rgb()', got 2"),
# Can't test all possible value types here, so only cover all params and
# possible values as a whole
(DC('prop', "rgb(hi, 2, 3)"),
"unexpected IDENT value for parameter 1 in function 'rgb()'"),
(DC('prop', "rgb(1, 2, 2.2)"),
"unexpected NUMBER value for parameter 3 in function 'rgb()'"),
(DC('prop', "rgba(1, 2, 2, 10%)"),
"unexpected PERCENTAGE value for parameter 4 in function 'rgba()'"),
(DC('prop', "hsl(\"string\", 2%, 3%)"),
"unexpected STRING value for parameter 1 in function 'hsl()'"),
(DC('prop', "hsl(0, 2, 3%)"),
"unexpected INTEGER value for parameter 2 in function 'hsl()'"),
(DC('prop', "hsla(0, 2%, 3%, #123)"),
"unexpected HASH value for parameter 4 in function 'hsla()'"),
])
def test_translate_colors_errors(decl, expected_error):
try:
dumper.translate_colors(decl, '')
assert False, "no exception was raised"
except DumpError as e:
assert expected_error in str(e)
| |
import datetime
from io import BytesIO
import re
from warnings import catch_warnings
import numpy as np
import pytest
from pandas import (
CategoricalIndex,
DataFrame,
HDFStore,
MultiIndex,
_testing as tm,
date_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
ensure_clean_path,
ensure_clean_store,
)
from pandas.io.pytables import (
Term,
_maybe_adjust_name,
)
pytestmark = pytest.mark.single_cpu
def test_pass_spec_to_storer(setup_path):
df = tm.makeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("df", df)
msg = (
"cannot pass a column specification when reading a Fixed format "
"store. this store must be selected in its entirety"
)
with pytest.raises(TypeError, match=msg):
store.select("df", columns=["A"])
msg = (
"cannot pass a where specification when reading from a Fixed "
"format store. this store must be selected in its entirety"
)
with pytest.raises(TypeError, match=msg):
store.select("df", where=[("columns=A")])
def test_table_index_incompatible_dtypes(setup_path):
df1 = DataFrame({"a": [1, 2, 3]})
df2 = DataFrame({"a": [4, 5, 6]}, index=date_range("1/1/2000", periods=3))
with ensure_clean_store(setup_path) as store:
store.put("frame", df1, format="table")
msg = re.escape("incompatible kind in col [integer - datetime64]")
with pytest.raises(TypeError, match=msg):
store.put("frame", df2, format="table", append=True)
def test_unimplemented_dtypes_table_columns(setup_path):
with ensure_clean_store(setup_path) as store:
dtypes = [("date", datetime.date(2001, 1, 2))]
# currently not supported dtypes ####
for n, f in dtypes:
df = tm.makeDataFrame()
df[n] = f
msg = re.escape(f"[{n}] is not implemented as a table column")
with pytest.raises(TypeError, match=msg):
store.append(f"df1_{n}", df)
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["datetime1"] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
# this fails because we have a date in the object block......
msg = re.escape(
"""Cannot serialize the column [datetime1]
because its data contents are not [string] but [date] object dtype"""
)
with pytest.raises(TypeError, match=msg):
store.append("df_unimplemented", df)
def test_invalid_terms(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
store.put("df", df, format="table")
# some invalid terms
msg = re.escape(
"__init__() missing 1 required positional argument: 'where'"
)
with pytest.raises(TypeError, match=msg):
Term()
# more invalid
msg = re.escape(
"cannot process expression [df.index[3]], "
"[2000-01-06 00:00:00] is not a valid condition"
)
with pytest.raises(ValueError, match=msg):
store.select("df", "df.index[3]")
msg = "invalid syntax"
with pytest.raises(SyntaxError, match=msg):
store.select("df", "index>")
# from the docs
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table", data_columns=True)
# check ok
read_hdf(path, "dfq", where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, "dfq", where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(setup_path) as path:
dfq = DataFrame(
np.random.randn(10, 4),
columns=list("ABCD"),
index=date_range("20130101", periods=10),
)
dfq.to_hdf(path, "dfq", format="table")
msg = (
r"The passed where expression: A>0 or C>0\n\s*"
r"contains an invalid variable reference\n\s*"
r"all of the variable references must be a reference to\n\s*"
r"an axis \(e.g. 'index' or 'columns'\), or a data_column\n\s*"
r"The currently defined references are: index,columns\n"
)
with pytest.raises(ValueError, match=msg):
read_hdf(path, "dfq", where="A>0 or C>0")
def test_append_with_diff_col_name_types_raises_value_error(setup_path):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({"a": np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({("1", 2): np.random.randn(10)})
df5 = DataFrame({("1", 2, object): np.random.randn(10)})
with ensure_clean_store(setup_path) as store:
name = f"df_{tm.rands(10)}"
store.append(name, df)
for d in (df2, df3, df4, df5):
msg = re.escape(
"cannot match existing table structure for [0] on appending data"
)
with pytest.raises(ValueError, match=msg):
store.append(name, d)
def test_invalid_complib(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with tm.ensure_clean(setup_path) as path:
msg = r"complib only supports \[.*\] compression."
with pytest.raises(ValueError, match=msg):
df.to_hdf(path, "df", complib="foolib")
@pytest.mark.parametrize(
"idx",
[
date_range("2019", freq="D", periods=3, tz="UTC"),
CategoricalIndex(list("abc")),
],
)
def test_to_hdf_multiindex_extension_dtype(idx, setup_path):
# GH 7775
mi = MultiIndex.from_arrays([idx, idx])
df = DataFrame(0, index=mi, columns=["a"])
with ensure_clean_path(setup_path) as path:
with pytest.raises(NotImplementedError, match="Saving a MultiIndex"):
df.to_hdf(path, "df")
def test_unsuppored_hdf_file_error(datapath):
# GH 9539
data_path = datapath("io", "data", "legacy_hdf/incompatible_dataset.h5")
message = (
r"Dataset\(s\) incompatible with Pandas data types, "
"not table, or no datasets found in HDF5 file."
)
with pytest.raises(ValueError, match=message):
read_hdf(data_path)
def test_read_hdf_errors(setup_path):
df = DataFrame(np.random.rand(4, 5), index=list("abcd"), columns=list("ABCDE"))
with ensure_clean_path(setup_path) as path:
msg = r"File [\S]* does not exist"
with pytest.raises(OSError, match=msg):
read_hdf(path, "key")
df.to_hdf(path, "df")
store = HDFStore(path, mode="r")
store.close()
msg = "The HDFStore must be open for reading."
with pytest.raises(OSError, match=msg):
read_hdf(store, "df")
def test_read_hdf_generic_buffer_errors():
msg = "Support for generic buffers has not been implemented."
with pytest.raises(NotImplementedError, match=msg):
read_hdf(BytesIO(b""), "df")
@pytest.mark.parametrize("bad_version", [(1, 2), (1,), [], "12", "123"])
def test_maybe_adjust_name_bad_version_raises(bad_version):
msg = "Version is incorrect, expected sequence of 3 integers"
with pytest.raises(ValueError, match=msg):
_maybe_adjust_name("values_block_0", version=bad_version)
| |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import logging
from collections import Counter
import torch
class EM:
"""
EM algorithm used to quantize the columns of W to minimize
||W - W_hat||^2
Args:
- W: weight matrix of size (in_features x out_features)
- n_iter: number of k-means iterations
- n_centroids: number of centroids (size of codebook)
- eps: for cluster reassignment when an empty cluster is found
- max_tentatives for cluster reassignment when an empty cluster is found
- verbose: print error after each iteration
Remarks:
- If one cluster is empty, the most populated cluster is split into
two clusters
- All the relevant dimensions are specified in the code
"""
def __init__(
self, W, n_centroids=256, n_iter=20, eps=1e-6, max_tentatives=30, verbose=True
):
self.W = W
self.n_centroids = n_centroids
self.n_iter = n_iter
self.eps = eps
self.max_tentatives = max_tentatives
self.verbose = verbose
self.centroids = torch.Tensor()
self.assignments = torch.Tensor()
self.objective = []
def initialize_centroids(self):
"""
Initializes the centroids by sampling random columns from W.
"""
in_features, out_features = self.W.size()
indices = torch.randint(
low=0, high=out_features, size=(self.n_centroids,)
).long()
self.centroids = self.W[:, indices].t() # (n_centroids x in_features)
def step(self, i):
"""
There are two standard steps for each iteration: expectation (E) and
minimization (M). The E-step (assignment) is performed with an exhaustive
search and the M-step (centroid computation) is performed with
the exact solution.
Args:
- i: step number
Remarks:
- The E-step heavily uses PyTorch broadcasting to speed up computations
and reduce the memory overhead
"""
# assignments (E-step)
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
n_empty_clusters = self.resolve_empty_clusters()
# centroids (M-step)
for k in range(self.n_centroids):
W_k = self.W[:, self.assignments == k] # (in_features x size_of_cluster_k)
self.centroids[k] = W_k.mean(dim=1) # (in_features)
# book-keeping
obj = (self.centroids[self.assignments].t() - self.W).norm(p=2).item()
self.objective.append(obj)
if self.verbose:
logging.info(
f"Iteration: {i},\t"
f"objective: {obj:.6f},\t"
f"resolved empty clusters: {n_empty_clusters}"
)
def resolve_empty_clusters(self):
"""
If one cluster is empty, the most populated cluster is split into
two clusters by shifting the respective centroids. This is done
iteratively for a fixed number of tentatives.
"""
# empty clusters
counts = Counter(map(lambda x: x.item(), self.assignments))
empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
n_empty_clusters = len(empty_clusters)
tentatives = 0
while len(empty_clusters) > 0:
# given an empty cluster, find most populated cluster and split it into two
k = random.choice(list(empty_clusters))
m = counts.most_common(1)[0][0]
e = torch.randn_like(self.centroids[m]) * self.eps
self.centroids[k] = self.centroids[m].clone()
self.centroids[k] += e
self.centroids[m] -= e
# recompute assignments
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
# check for empty clusters
counts = Counter(map(lambda x: x.item(), self.assignments))
empty_clusters = set(range(self.n_centroids)) - set(counts.keys())
# increment tentatives
if tentatives == self.max_tentatives:
logging.info(
f"Could not resolve all empty clusters, {len(empty_clusters)} remaining"
)
raise EmptyClusterResolveError
tentatives += 1
return n_empty_clusters
def compute_distances(self):
"""
For every centroid m, computes
||M - m[None, :]||_2
Remarks:
- We rely on PyTorch's broadcasting to speed up computations
and reduce the memory overhead
- Without chunking, the sizes in the broadcasting are modified as:
(n_centroids x n_samples x out_features) -> (n_centroids x out_features)
- The broadcasting computation is automatically chunked so that
the tensors fit into the memory of the GPU
"""
nb_centroids_chunks = 1
while True:
try:
return torch.cat(
[
(self.W[None, :, :] - centroids_c[:, :, None]).norm(p=2, dim=1)
for centroids_c in self.centroids.chunk(
nb_centroids_chunks, dim=0
)
],
dim=0,
)
except RuntimeError:
nb_centroids_chunks *= 2
def assign(self):
"""
Assigns each column of W to its closest centroid, thus essentially
performing the E-step in train().
Remarks:
- The function must be called after train() or after loading
centroids using self.load(), otherwise it will return empty tensors
"""
distances = self.compute_distances() # (n_centroids x out_features)
self.assignments = torch.argmin(distances, dim=0) # (out_features)
def save(self, path, layer):
"""
Saves centroids and assignments.
Args:
- path: folder used to save centroids and assignments
"""
torch.save(self.centroids, os.path.join(path, "{}_centroids.pth".format(layer)))
torch.save(
self.assignments, os.path.join(path, "{}_assignments.pth".format(layer))
)
torch.save(self.objective, os.path.join(path, "{}_objective.pth".format(layer)))
def load(self, path, layer):
"""
Loads centroids and assignments from a given path
Args:
- path: folder use to load centroids and assignments
"""
self.centroids = torch.load(
os.path.join(path, "{}_centroids.pth".format(layer))
)
self.assignments = torch.load(
os.path.join(path, "{}_assignments.pth".format(layer))
)
self.objective = torch.load(
os.path.join(path, "{}_objective.pth".format(layer))
)
class EmptyClusterResolveError(Exception):
pass
| |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
"""
Unit Test Cases for :mod:`tvecs.vector_space_mapper.vector_space_mapper`.
.. py:currentmodule:: tvecs.vector_space_mapper.vector_space_mapper
"""
import os
import pytest
import random
from tvecs.model_generator import model_generator as mod
from tvecs.bilingual_generator import bilingual_generator as bg
from tvecs.vector_space_mapper.vector_space_mapper import VectorSpaceMapper
class TestVectorSpaceMapper:
"""
Unit Testing for :mod:`tvecs.vector_space_mapper.vector_space_mapper`.
Test Cases considered for the following functions
- :func:`VectorSpaceMapper.extract_vectors_and_words`
- :func:`VectorSpaceMapper._predict_vec_from_word`
- :func:`VectorSpaceMapper._predict_vec_from_vec`
- :func:`VectorSpaceMapper.get_recommendations_from_vec`
- :func:`VectorSpaceMapper.get_recommendations_from_word`
.. seealso::
* :mod:`tvecs.vector_space_mapper.vector_space_mapper`
* :mod:`pytest`
* :mod:`tvecs.model_generator.model_generator`
"""
def setup_class(cls):
"""
Setup Unit Testing for :class:`VectorSpaceMapper`.
| *Test Suite ID* : V
|
| *Test Case Number* : 01
|
| *Description* : Create an instance of
| :mod:`tvecs.vector_space_mapper.vector_space_mapper`.
|
| *Preconditions* : Corpus data for both languages
| and bilingual dictionary exists.
|
| *Test Parameters* : model_1, model_2, bilingual_dict
|
| *Test Data* : model_1 = English, model_2 = Hindi, bilingual_dict =
| 'data/bilingual_dictionary/english_hindi_train_bd'
|
| *Expected Result* : Vector Space Mapping created
|
| *Actual Result* : Vector Space Mapping created
|
| **Status : Pass**
|
- Learns transformation between two models
- :mod:`tvecs.model_generator.model_generator`
- :mod:`tvecs.model_generator.model_generator`
"""
try:
model_1 = mod.generate_model(
preprocessor_type='HcCorpusPreprocessor',
language='english',
corpus_fname='test_english',
corpus_dir_path=os.path.join('tests', 'resources'),
output_dir_path=os.path.join('tests', 'resources'),
need_preprocessing=True,
output_fname='model_1'
)
model_2 = mod.generate_model(
preprocessor_type='HcCorpusPreprocessor',
language='hindi',
corpus_fname='test_hindi',
corpus_dir_path=os.path.join('tests', 'resources'),
output_dir_path=os.path.join('tests', 'resources'),
need_preprocessing=True,
output_fname='model_2'
)
except Exception as err:
pytest.fail(
'Model construction failed: %s' % err
)
try:
bilingual_dict = bg.load_bilingual_dictionary(
os.path.join(
'data', 'bilingual_dictionary', 'english_hindi_train_bd'
)
)
except Exception as err:
pytest.fail(
'Bilingual Dictionary Construction failed: %s' % err
)
try:
cls.testing_obj = VectorSpaceMapper(
model_1, model_2, bilingual_dict
)
cls.testing_obj.map_vector_spaces()
except BaseException as err:
pytest.fail(
'Vector Space Mapping failed : %s' % err
)
def teardown_class(cls):
"""
Delete temp files generated for tests.
| *Test Suite ID* : V
|
| *Test Case Number* : 02
|
| *Description* : Delete models after construction to remove
| residual of setup_test [Test Case Number 01]
|
| *Preconditions* : model1 and model2 exist
|
| *Test Parameters* : model1 and model2 file paths
|
| *Test Data* :
| model1 file path = 'tests/resources/model1'
| model2 file path = 'tests/resources/model2'
|
| *Expected Result* : Models deleted
|
| *Actual Result* : Models deleted
|
| **Status : Pass**
|
"""
try:
os.remove(
os.path.join('tests', 'resources', 'model_1')
)
os.remove(
os.path.join('tests', 'resources', 'model_2')
)
except (OSError, IOError):
pass
def test_english_extract_vectors_and_words(self):
r"""
Ensure valid data structure from extract_vectors_and_words for english.
| *Test Suite ID* : V
|
| *Test Case Number* : 03
|
| *Description* : Verify the data structure for the
| response of the function for English model.
| Test
| :func:`VectorSpaceMapper._extract_vectors_and_words`
|
| *Preconditions* : The English model exists
|
| *Test Parameters* : model_1 and expected_list[keys]
|
| *Test Data* : model_1 is English model, expected_list[keys] = {
| u'has', u'have', u'\u0915\u093e'
| }
|
| *Expected Result* : Data structure consists of 1 vector
| of 100 dimensions if word exists in model, else None
|
| *Actual Result* : Data structure consists of 1 vector
| of 100 dimensions if word exists in model, else None
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
expected_list = {
u'has': (100,),
u'have': (100,),
u'\u0915\u093e': None
}
result_vec, result_word = obj._extract_vectors_and_words(
obj.model_1, expected_list.keys()
)
temp_list = []
for vec in result_vec:
if vec is not None:
vec = vec.shape
temp_list.append(vec)
assert_err_msg = "_extract_vectors_and_words failed for english" \
" model [ Data Structure test ]"
result = dict(zip(result_word, temp_list))
for key, value in expected_list.items():
assert result[key] == value, assert_err_msg
def test_hindi_extract_vectors_and_words(self):
r"""
Ensure valid data structure from extract_vectors_and_words for hindi.
| *Test Suite ID* : V
|
| *Test Case Number* : 04
|
| *Description* : Verify the data structure for the response of the
| function for Hindi model.
| Test
| :func:`VectorSpaceMapper._extract_vectors_and_words`
|
| *Preconditions* : The Hindi model exists
|
|
| *Test Parameters* : model_2 and expected_list[keys]
|
| *Test Data* : model_2 is Hindi model, expected_list[keys] = {
| u'\u0915\u093e', u'\u0925\u0940', u'english'
| }
|
| *Expected Result* : Data structure consists of 1 vector
| of 100 dimensions if word exists in model, else None
|
| *Actual Result* : Data structure consists of 1 vector
| of 100 dimensions if word exists in model, else None
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
expected_list = {
u'\u0915\u093e': (100,),
u'\u0925\u0940': (100,),
u'english': None
}
result_vec, result_word = obj._extract_vectors_and_words(
obj.model_2, expected_list.keys()
)
temp_list = []
for vec in result_vec:
if vec is not None:
vec = vec.shape
temp_list.append(vec)
assert_err_msg = "_extract_vectors_and_words failed for hindi" \
" model [ Data Structure test ]"
result = dict(zip(result_word, temp_list))
for key, value in expected_list.items():
assert result[key] == value, assert_err_msg
def test_predict_vec_from_word(self):
"""
Ensure valid data structure from predict_vec_from_word.
| *Test Suite ID* : V
|
| *Test Case Number* : 05
|
| *Description* : Verify the data structure for the response of the
| function given a word.
| Test :func:`VectorSpaceMapper._predict_vec_from_word`
|
| *Preconditions* : English model exists
|
| *Test Parameters* : word passed to _predict_vec_from_word
|
| *Test Data* : word = u'has'
|
| *Expected Result* : Data structure consists of 1 vector of
| 100 dimensions if word exists in model, else None
|
| *Actual Result* : Data structure consists of 1 vector of
| 100 dimensions if word exists in model, else None
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
expected = (100,)
result = obj._predict_vec_from_word('has').shape
assert_err_msg = "_predict_vec_from_word execution failed " \
"[ Data Structure test ]"
assert expected == result, assert_err_msg
def test_predict_vec_from_vec(self):
"""
Ensure valid data structure from predict_vec_from_vec.
| *Test Suite ID* : V
|
| *Test Case Number* : 06
|
| *Description* : Verify the data structure for the response
| of the function given a vector.
| Test :func:`VectorSpaceMapper._predict_vec_from_vec`
|
| *Preconditions* : English model exists
|
| *Test Parameters* : vector passed to _predict_vec_from_vec
|
| *Test Data* : vector = obj.model_1['has']
|
| *Expected Result* : Data structure consists of 1 vector of
| 100 dimensions if word exists in model, else None
|
| *Actual Result* : Data structure consists of 1 vector of
| 100 dimensions if word exists in model, else None
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
expected_shape = (100,)
result_shape = obj._predict_vec_from_vec(
obj.model_1.wv['has']
).shape
assert_err_msg = "_predict_vec_from_vec failed [ Data Structure test ]"
assert expected_shape == result_shape, assert_err_msg
def test_predict_vec_from_word_and_vec_match(self):
"""
Validate output of predict_vec_from_word.
| *Test Suite ID* : V
|
| *Test Case Number* : 07
|
| *Description* : Verify both functions provide same predictions.
| Test following functions:
| - :func:`VectorSpaceMapper._predict_vec_from_vec`
| - :func:`VectorSpaceMapper._predict_vec_from_word`
|
| *Preconditions* : English model exists
|
| *Test Parameters* : vector passed to _predict_vec_from_vec,
| word passed to _predict_vec_from_word
|
| *Test Data* : vector = obj.model_1['has'], word = 'has'
|
| *Expected Result* : Vector prediction matches
|
| *Actual Result* : Vector prediction matches
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
result = obj._predict_vec_from_vec(
obj.model_1.wv['has']
)
expected = obj._predict_vec_from_word('has')
assert_err_msg = "Recommendations from _predict_vec_from_vec"\
"and _predict_vec_from_word do not match"
assert all(result == expected), assert_err_msg
def test_num_recom_from_get_recommendations_from_word(self):
"""
Validate num of recommendations from get_recommendations_from_word.
| *Test Suite ID* : V
|
| *Test Case Number* : 08
|
| *Description* : Verify number of recommendations conform to the
| number of recommendations requested.
| Test
| :func:`VectorSpaceMapper.get_recommendations_from_word`
|
| *Preconditions* : English and Hindi models exist
|
| *Test Parameters* : num_recommendations
|
| *Test Data* : num_recommendations = random.randint(1,10)
|
| *Expected Result* : The number of recommendations returned is equal
| to the number of recommendations requested.
|
| *Actual Result* : The number of recommendations returned is equal
| to the number of recommendations requested.
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
num_recommendations = random.randint(1, 10)
result = obj.get_recommendations_from_word(
'has', topn=num_recommendations
)
assert_err_msg = "_get_recommendations_from_word returned" \
"wrong number of recommendations"
assert len(result) == num_recommendations, assert_err_msg
def test_num_recom_from_get_recommendations_from_vec(self):
"""
Ensure valid num of recommendations from get_recommendations_from_vec.
| *Test Suite ID* : V
|
| *Test Case Number* : 09
|
| *Description* : Verify number of recommendations conform to the
| number of recommendations requested.
| Test
| :func:`VectorSpaceMapper.get_recommendations_from_vec`
|
| *Preconditions* : English and Hindi models exist
|
| *Test Parameters* : num_recommendations
|
| *Test Data* : num_recommendations = random.randint(1,10)
|
| *Expected Result* : The number of recommendations returned is equal
| to the number of recommendations requested.
|
| *Actual Result* : The number of recommendations returned is equal
| to the number of recommendations requested.
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
num_recommendations = random.randint(1, 10)
result = obj.get_recommendations_from_vec(
obj.model_1.wv['has'], topn=num_recommendations
)
assert_err_msg = "_get_recommendations_from_word returned"\
"wrong number of recommendations"
assert len(result) == num_recommendations, assert_err_msg
def test_get_recommendations_from_word(self):
"""
Ensure valid types from get_recommendations_from_word.
| *Test Suite ID* : V
|
| *Test Case Number* : 10
|
| *Description* : Verify the response type of the word and distance
| returned from the function.
| Test
| :func:`VectorSpaceMapper.get_recommendations_from_word`
|
| *Preconditions* : English and Hindi models exist
|
| *Test Parameters* : word passed to get_recommendations_from_word
|
| *Test Data* : word = 'has'
|
| *Expected Result* : word_type is unicode, and dist_type is float
|
| *Actual Result* : word_type is unicode, and dist_type is float
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
result = obj.get_recommendations_from_word('has')
word_type, dist_type = (str, float)
assert_err_msg = "_get_recommendations_from_word" \
"returned wrong type"
for word, dist in result:
assert (
type(word) is word_type and type(dist) is dist_type
), assert_err_msg
def test_get_recommendations_from_vec(self):
"""
Ensure valid types from get_recommmendations_from_vec.
| *Test Suite ID* : V
|
| *Test Case Number* : 11
|
| *Description* : Verify the response type of the word and distance
| returned from the function.
| Test
| :func:`VectorSpaceMapper.get_recommendations_from_vec`
|
| *Preconditions* : English and Hindi models exist
|
| *Test Parameters* : vector passed to get_recommendations_from_vec
|
| *Test Data* : vector = obj.model_1['has']
|
| *Expected Result* : word_type is unicode, and dist_type is float
|
| *Actual Result* : word_type is unicode, and dist_type is float
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
result = obj.get_recommendations_from_vec(obj.model_1.wv['has'])
word_type, dist_type = (str, float)
assert_err_msg = "_get_recommendations_from_word"\
"returned wrong type"
for word, dist in result:
assert (
type(word) is word_type and type(dist) is dist_type
), assert_err_msg
def test_get_recommendations_from_vec_and_word_match(self):
"""
Ensure valid recommendations obtained.
| *Test Suite ID* : V
|
| *Test Case Number* : 12
|
| *Description* : Verify the results from both functions match.
| Test following functions:
| - :func:`VectorSpaceMapper.get_recommendations_from_vec`
| - :func:`VectorSpaceMapper.get_recommendations_from_word`
|
| *Preconditions* : English and Hindi models exist
|
| *Test Parameters* : word passed to get_recommendations_from_word, and
| vector passed to get_recommendations_from_vec
|
| *Test Data* : word = 'has', vector = obj.model_1['has']
|
| *Expected Result* : Recommendations match
|
| *Actual Result* : Recommendations match
|
| **Status : Pass**
|
"""
obj = self.__class__.testing_obj
result = dict(obj.get_recommendations_from_word('has'))
expected = dict(obj.get_recommendations_from_vec(obj.model_1.wv['has']))
assert_err_msg = "_get_recommendations_from_vec do not match"\
"_get_recommendations_from_word"
for word, dist in expected.items():
assert dist == result[word], assert_err_msg
| |
import datetime
import logging
import re
from django.utils import timezone
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django_auth_policy import signals
from django_auth_policy.models import LoginAttempt
from django_auth_policy import BasePolicy
logger = logging.getLogger(__name__)
class AuthenticationPolicy(BasePolicy):
""" Checks run when authenticating.
Policies can define:
`pre_auth_check` for checks that should be run before attempting to
authenticate provided credentials.
`post_auth_check` for checks that should be run after attempting to
authenticate provided credentials.
Both `pre_auth_check` and `post_auth_check` raise a ValidationError
when authentication fails to comply with the policy
`auth_success` is run when the attempt was successful and should not
raise a ValidationError
"""
def pre_auth_check(self, loginattempt, password):
pass
def post_auth_check(self, loginattempt):
pass
def auth_success(self, loginattempt):
pass
class AuthenticationBasicChecks(AuthenticationPolicy):
text = _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive.")
def pre_auth_check(self, loginattempt, password):
if not loginattempt.username:
logger.info(u'Authentication failure, address=%s, '
'no username supplied.',
loginattempt.source_address)
raise ValidationError(self.text, code='invalid_login')
if not password:
logger.info(u'Authentication failure, username=%s, '
'address=%s, no password supplied.',
loginattempt.username,
loginattempt.source_address)
raise ValidationError(self.text, code='invalid_login')
def post_auth_check(self, loginattempt):
if loginattempt.user is None:
logger.info(u'Authentication failure, username=%s, '
'address=%s, invalid authentication.',
loginattempt.username, loginattempt.source_address)
raise ValidationError(self.text, code='invalid_login')
if not loginattempt.user.is_active:
logger.info(u'Authentication failure, username=%s, '
'address=%s, user inactive.',
loginattempt.username, loginattempt.source_address)
raise ValidationError(self.text, code='inactive')
class AuthenticationDisableExpiredUsers(AuthenticationPolicy):
""" Disable ALL users that have been expired
Users must have an `is_active` and a `last_login` field
Reactivate user by setting is_active to True and last_login to
now.
"""
# Days after which users without a successful login expire, make sure
# user sessions are short enough to enforce frequent re-logins
inactive_period = 90
def pre_auth_check(self, loginattempt, password):
expire_at = timezone.now() - datetime.timedelta(
days=self.inactive_period)
expired = get_user_model().objects.filter(is_active=True,
last_login__lt=expire_at)
for user in expired:
logger.info('User %s disabled because last login was at %s',
unicode(user), user.last_login)
# Send signal to be used to alert admins
signals.user_expired.send(sender=user, user=user)
expired.update(is_active=False)
def _format_lockduration(seconds):
duration = datetime.timedelta(seconds=seconds)
if duration.days > 1:
return _('{days} days').format(days=duration.days)
elif duration.days == 1:
return _('a day')
elif duration.seconds >= 120:
return _('{mins} minutes').format(mins=duration.seconds // 60)
elif duration.seconds >= 60:
return _('a minute')
else:
return _('{secs} seconds').format(secs=duration.seconds)
class AuthenticationLockedUsername(AuthenticationPolicy):
""" Lockout usernames with too many failed login attempts within a certain
period.
"""
# Number of failed login attempts
max_failed = 3
# Period in seconds used to count number of failed login attempts,
# None = indefinite
period = None
# Lockout duration in seconds
lockout_duration = 60 * 10
# Validation error
text = _('Too many failed login attempts. Your account has been locked '
'for {duration}.')
def pre_auth_check(self, loginattempt, password):
try:
prev_login = LoginAttempt.objects.filter(
username=loginattempt.username).exclude(
pk=loginattempt.pk).order_by('-id')[0]
except IndexError:
# No login attempts for this username and thus no lockout
return
# If previous login did not count towards a lockout one is certainly
# not locked out
if not prev_login.lockout:
return
# If previous login was before lockout duration one is not
# locked out (anymore)
lock_from = (timezone.now() -
datetime.timedelta(seconds=self.lockout_duration))
if prev_login.timestamp < lock_from:
return
# Count number of locking login attempts
user_lockout = LoginAttempt.objects.filter(
username=loginattempt.username, successful=False,
lockout=True).exclude(pk=loginattempt.pk)
if self.period is not None:
lockout_count_from = timezone.now() - datetime.timedelta(
seconds=self.period)
user_lockout = user_lockout.filter(
timestamp__gt=lockout_count_from)
if user_lockout.count() >= self.max_failed:
logger.info(u'Authentication failure, username=%s, address=%s, '
'username locked', loginattempt.username,
loginattempt.source_address)
raise ValidationError(self.validation_msg,
code='username_locked_out')
def auth_success(self, loginattempt):
# Reset lockout counts for username
LoginAttempt.objects.filter(username=loginattempt.username,
lockout=True).update(lockout=False)
@property
def validation_msg(self):
dur = _format_lockduration(self.lockout_duration)
return self.text.format(duration=dur)
class AuthenticationLockedRemoteAddress(AuthenticationPolicy):
""" Lockout IP addresses with too many failed login attempts within a
certain period.
"""
# Number of failed login attempts
max_failed = 3
# Period in seconds used to count number of failed login attempts
period = None
# Lockout duration in seconds
lockout_duration = 60 * 10
# Validation error
text = _('Too many failed login attempts. Your account has been locked '
'for {duration}.')
def pre_auth_check(self, loginattempt, password):
try:
prev_login = LoginAttempt.objects.filter(
source_address=loginattempt.source_address).exclude(
pk=loginattempt.pk).order_by('-id')[0]
except IndexError:
# No login attempts for this username and thus no lockout
return
# If previous login did not count towards a lockout one is certainly
# not locked out
if not prev_login.lockout:
return
# If previous login was before lockout duration one is not
# locked out (anymore)
lock_from = (timezone.now() -
datetime.timedelta(seconds=self.lockout_duration))
if prev_login.timestamp < lock_from:
return
# Count number of locking login attempts
user_lockout = LoginAttempt.objects.filter(
source_address=loginattempt.source_address, successful=False,
lockout=True).exclude(pk=loginattempt.pk)
if self.period is not None:
lockout_count_from = timezone.now() - datetime.timedelta(
seconds=self.period)
user_lockout = user_lockout.filter(
timestamp__gt=lockout_count_from)
if user_lockout.count() >= self.max_failed:
logger.info(u'Authentication failure, username=%s, address=%s, '
'address locked',
loginattempt.username,
loginattempt.source_address)
raise ValidationError(self.validation_msg,
code='address_locked_out')
def auth_success(self, loginattempt):
# Reset lockout counts for password
LoginAttempt.objects.filter(source_address=loginattempt.source_address,
lockout=True).update(lockout=False)
@property
def validation_msg(self):
dur = _format_lockduration(self.lockout_duration)
return self.text.format(duration=dur)
class AuthenticationUsernameWhitelist(AuthenticationPolicy):
""" Only allow usernames that match regular expressions
Useful to restrict login with email addresses with a certain domainname
"""
# Regexes
whitelist = []
_whitelist_regex = []
text = _("Please enter a correct username and password. "
"Note that both fields may be case-sensitive.")
def pre_auth_check(self, loginattempt, password):
if not self._whitelist_regex:
for pattern in self.whitelist:
self._whitelist_regex.append(re.compile(pattern))
for regex in self._whitelist_regex:
if regex.search(loginattempt.username):
logger.debug('Username matched whitelisted pattern %s',
regex.pattern)
return
logger.info(u'Authentication failure, username %s did not match '
'whitelisted pattern(s)', loginattempt.username)
raise ValidationError(self.text, code='invalid_login')
| |
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
from __future__ import with_statement, unicode_literals
import copy
import logging
import socket as Socket
import ssl
import sys
import threading
import time
import types
import signal
try:
import queue
except ImportError:
import Queue as queue
from sleekxmpp.thirdparty.statemachine import StateMachine
from sleekxmpp.xmlstream import Scheduler, tostring
from sleekxmpp.xmlstream.stanzabase import StanzaBase, ET
# In Python 2.x, file socket objects are broken. A patched socket
# wrapper is provided for this case in filesocket.py.
if sys.version_info < (3, 0):
from sleekxmpp.xmlstream.filesocket import FileSocket, Socket26
# The time in seconds to wait before timing out waiting for response stanzas.
RESPONSE_TIMEOUT = 10
# The number of threads to use to handle XML stream events. This is not the
# same as the number of custom event handling threads. HANDLER_THREADS must
# be at least 1.
HANDLER_THREADS = 1
# Flag indicating if the SSL library is available for use.
SSL_SUPPORT = True
log = logging.getLogger(__name__)
class RestartStream(Exception):
"""
Exception to restart stream processing, including
resending the stream header.
"""
class XMLStream(object):
"""
An XML stream connection manager and event dispatcher.
The XMLStream class abstracts away the issues of establishing a
connection with a server and sending and receiving XML "stanzas".
A stanza is a complete XML element that is a direct child of a root
document element. Two streams are used, one for each communication
direction, over the same socket. Once the connection is closed, both
streams should be complete and valid XML documents.
Three types of events are provided to manage the stream:
Stream -- Triggered based on received stanzas, similar in concept
to events in a SAX XML parser.
Custom -- Triggered manually.
Scheduled -- Triggered based on time delays.
Typically, stanzas are first processed by a stream event handler which
will then trigger custom events to continue further processing,
especially since custom event handlers may run in individual threads.
Attributes:
address -- The hostname and port of the server.
default_ns -- The default XML namespace that will be applied
to all non-namespaced stanzas.
event_queue -- A queue of stream, custom, and scheduled
events to be processed.
filesocket -- A filesocket created from the main connection socket.
Required for ElementTree.iterparse.
namespace_map -- Optional mapping of namespaces to namespace prefixes.
scheduler -- A scheduler object for triggering events
after a given period of time.
send_queue -- A queue of stanzas to be sent on the stream.
socket -- The connection to the server.
ssl_support -- Indicates if a SSL library is available for use.
ssl_version -- The version of the SSL protocol to use.
Defaults to ssl.PROTOCOL_TLSv1.
state -- A state machine for managing the stream's
connection state.
stream_footer -- The start tag and any attributes for the stream's
root element.
stream_header -- The closing tag of the stream's root element.
use_ssl -- Flag indicating if SSL should be used.
use_tls -- Flag indicating if TLS should be used.
stop -- threading Event used to stop all threads.
auto_reconnect-- Flag to determine whether we auto reconnect.
Methods:
add_event_handler -- Add a handler for a custom event.
add_handler -- Shortcut method for registerHandler.
connect -- Connect to the given server.
del_event_handler -- Remove a handler for a custom event.
disconnect -- Disconnect from the server and terminate
processing.
event -- Trigger a custom event.
get_id -- Return the current stream ID.
incoming_filter -- Optionally filter stanzas before processing.
new_id -- Generate a new, unique ID value.
process -- Read XML stanzas from the stream and apply
matching stream handlers.
reconnect -- Reestablish a connection to the server.
register_handler -- Add a handler for a stream event.
register_stanza -- Add a new stanza object type that may appear
as a direct child of the stream's root.
remove_handler -- Remove a stream handler.
remove_stanza -- Remove a stanza object type.
schedule -- Schedule an event handler to execute after a
given delay.
send -- Send a stanza object on the stream.
send_raw -- Send a raw string on the stream.
send_xml -- Send an XML string on the stream.
set_socket -- Set the stream's socket and generate a new
filesocket.
start_stream_handler -- Perform any stream initialization such
as handshakes.
start_tls -- Establish a TLS connection and restart
the stream.
"""
def __init__(self, socket=None, host='', port=0):
"""
Establish a new XML stream.
Arguments:
socket -- Use an existing socket for the stream.
Defaults to None to generate a new socket.
host -- The name of the target server.
Defaults to the empty string.
port -- The port to use for the connection.
Defaults to 0.
"""
# To comply with PEP8, method names now use underscores.
# Deprecated method names are re-mapped for backwards compatibility.
self.startTLS = self.start_tls
self.registerStanza = self.register_stanza
self.removeStanza = self.remove_stanza
self.registerHandler = self.register_handler
self.removeHandler = self.remove_handler
self.setSocket = self.set_socket
self.sendRaw = self.send_raw
self.getId = self.get_id
self.getNewId = self.new_id
self.sendXML = self.send_xml
self.ssl_support = SSL_SUPPORT
self.ssl_version = ssl.PROTOCOL_TLSv1
self.state = StateMachine(('disconnected', 'connected'))
self.state._set_state('disconnected')
self.address = (host, int(port))
self.filesocket = None
self.set_socket(socket)
if sys.version_info < (3, 0):
self.socket_class = Socket26
else:
self.socket_class = Socket.socket
self.use_ssl = False
self.use_tls = False
self.default_ns = ''
self.stream_header = "<stream>"
self.stream_footer = "</stream>"
self.stop = threading.Event()
self.stream_end_event = threading.Event()
self.stream_end_event.set()
self.event_queue = queue.Queue()
self.send_queue = queue.Queue()
self.scheduler = Scheduler(self.event_queue, self.stop)
self.namespace_map = {}
self.__thread = {}
self.__root_stanza = []
self.__handlers = []
self.__event_handlers = {}
self.__event_handlers_lock = threading.Lock()
self._id = 0
self._id_lock = threading.Lock()
self.auto_reconnect = True
self.is_client = False
try:
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self._handle_kill)
if hasattr(signal, 'SIGTERM'):
# Used in Windows
signal.signal(signal.SIGTERM, self._handle_kill)
except:
log.debug("Can not set interrupt signal handlers. " + \
"SleekXMPP is not running from a main thread.")
def _handle_kill(self, signum, frame):
"""
Capture kill event and disconnect cleanly after first
spawning the "killed" event.
"""
self.event("killed", direct=True)
self.disconnect()
def new_id(self):
"""
Generate and return a new stream ID in hexadecimal form.
Many stanzas, handlers, or matchers may require unique
ID values. Using this method ensures that all new ID values
are unique in this stream.
"""
with self._id_lock:
self._id += 1
return self.get_id()
def get_id(self):
"""
Return the current unique stream ID in hexadecimal form.
"""
return "%X" % self._id
def connect(self, host='', port=0, use_ssl=False,
use_tls=True, reattempt=True):
"""
Create a new socket and connect to the server.
Setting reattempt to True will cause connection attempts to be made
every second until a successful connection is established.
Arguments:
host -- The name of the desired server for the connection.
port -- Port to connect to on the server.
use_ssl -- Flag indicating if SSL should be used.
use_tls -- Flag indicating if TLS should be used.
reattempt -- Flag indicating if the socket should reconnect
after disconnections.
"""
if host and port:
self.address = (host, int(port))
self.is_client = True
# Respect previous SSL and TLS usage directives.
if use_ssl is not None:
self.use_ssl = use_ssl
if use_tls is not None:
self.use_tls = use_tls
# Repeatedly attempt to connect until a successful connection
# is established.
connected = self.state.transition('disconnected', 'connected',
func=self._connect)
while reattempt and not connected:
connected = self.state.transition('disconnected', 'connected',
func=self._connect)
return connected
def _connect(self):
self.stop.clear()
self.socket = self.socket_class(Socket.AF_INET, Socket.SOCK_STREAM)
self.socket.settimeout(None)
if self.use_ssl and self.ssl_support:
log.debug("Socket Wrapped for SSL")
ssl_socket = ssl.wrap_socket(self.socket)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
try:
log.debug("Connecting to %s:%s" % self.address)
self.socket.connect(self.address)
self.set_socket(self.socket, ignore=True)
#this event is where you should set your application state
self.event("connected", direct=True)
return True
except Socket.error as serr:
error_msg = "Could not connect to %s:%s. Socket Error #%s: %s"
log.error(error_msg % (self.address[0], self.address[1],
serr.errno, serr.strerror))
time.sleep(1)
return False
def disconnect(self, reconnect=False):
"""
Terminate processing and close the XML streams.
Optionally, the connection may be reconnected and
resume processing afterwards.
Arguments:
reconnect -- Flag indicating if the connection
and processing should be restarted.
Defaults to False.
"""
self.state.transition('connected', 'disconnected', wait=0.0,
func=self._disconnect, args=(reconnect,))
def _disconnect(self, reconnect=False):
# Send the end of stream marker.
self.send_raw(self.stream_footer)
# Wait for confirmation that the stream was
# closed in the other direction.
if not reconnect:
self.auto_reconnect = False
self.stream_end_event.wait(4)
if not self.auto_reconnect:
self.stop.set()
try:
self.socket.close()
self.filesocket.close()
self.socket.shutdown(Socket.SHUT_RDWR)
except Socket.error as serr:
pass
finally:
#clear your application state
self.event("disconnected", direct=True)
return True
def reconnect(self):
"""
Reset the stream's state and reconnect to the server.
"""
log.debug("reconnecting...")
self.state.transition('connected', 'disconnected', wait=2.0,
func=self._disconnect, args=(True,))
log.debug("connecting...")
return self.state.transition('disconnected', 'connected',
wait=2.0, func=self._connect)
def set_socket(self, socket, ignore=False):
"""
Set the socket to use for the stream.
The filesocket will be recreated as well.
Arguments:
socket -- The new socket to use.
ignore -- don't set the state
"""
self.socket = socket
if socket is not None:
# ElementTree.iterparse requires a file.
# 0 buffer files have to be binary.
# Use the correct fileobject type based on the Python
# version to work around a broken implementation in
# Python 2.x.
if sys.version_info < (3, 0):
self.filesocket = FileSocket(self.socket)
else:
self.filesocket = self.socket.makefile('rb', 0)
if not ignore:
self.state._set_state('connected')
def start_tls(self):
"""
Perform handshakes for TLS.
If the handshake is successful, the XML stream will need
to be restarted.
"""
if self.ssl_support:
log.info("Negotiating TLS")
log.info("Using SSL version: %s" % str(self.ssl_version))
ssl_socket = ssl.wrap_socket(self.socket,
ssl_version=self.ssl_version,
do_handshake_on_connect=False)
if hasattr(self.socket, 'socket'):
# We are using a testing socket, so preserve the top
# layer of wrapping.
self.socket.socket = ssl_socket
else:
self.socket = ssl_socket
self.socket.do_handshake()
self.set_socket(self.socket)
return True
else:
log.warning("Tried to enable TLS, but ssl module not found.")
return False
def start_stream_handler(self, xml):
"""
Perform any initialization actions, such as handshakes, once the
stream header has been sent.
Meant to be overridden.
"""
pass
def register_stanza(self, stanza_class):
"""
Add a stanza object class as a known root stanza. A root stanza is
one that appears as a direct child of the stream's root element.
Stanzas that appear as substanzas of a root stanza do not need to
be registered here. That is done using register_stanza_plugin() from
sleekxmpp.xmlstream.stanzabase.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
Arguments:
stanza_class -- The top-level stanza object's class.
"""
self.__root_stanza.append(stanza_class)
def remove_stanza(self, stanza_class):
"""
Remove a stanza from being a known root stanza. A root stanza is
one that appears as a direct child of the stream's root element.
Stanzas that are not registered will not be converted into
stanza objects, but may still be processed using handlers and
matchers.
"""
del self.__root_stanza[stanza_class]
def add_handler(self, mask, pointer, name=None, disposable=False,
threaded=False, filter=False, instream=False):
"""
A shortcut method for registering a handler using XML masks.
Arguments:
mask -- An XML snippet matching the structure of the
stanzas that will be passed to this handler.
pointer -- The handler function itself.
name -- A unique name for the handler. A name will
be generated if one is not provided.
disposable -- Indicates if the handler should be discarded
after one use.
threaded -- Deprecated. Remains for backwards compatibility.
filter -- Deprecated. Remains for backwards compatibility.
instream -- Indicates if the handler should execute during
stream processing and not during normal event
processing.
"""
# To prevent circular dependencies, we must load the matcher
# and handler classes here.
from sleekxmpp.xmlstream.matcher import MatchXMLMask
from sleekxmpp.xmlstream.handler import XMLCallback
if name is None:
name = 'add_handler_%s' % self.getNewId()
self.registerHandler(XMLCallback(name, MatchXMLMask(mask), pointer,
once=disposable, instream=instream))
def register_handler(self, handler, before=None, after=None):
"""
Add a stream event handler that will be executed when a matching
stanza is received.
Arguments:
handler -- The handler object to execute.
"""
if handler.stream is None:
self.__handlers.append(handler)
handler.stream = self
def remove_handler(self, name):
"""
Remove any stream event handlers with the given name.
Arguments:
name -- The name of the handler.
"""
idx = 0
for handler in self.__handlers:
if handler.name == name:
self.__handlers.pop(idx)
return True
idx += 1
return False
def add_event_handler(self, name, pointer,
threaded=False, disposable=False):
"""
Add a custom event handler that will be executed whenever
its event is manually triggered.
Arguments:
name -- The name of the event that will trigger
this handler.
pointer -- The function to execute.
threaded -- If set to True, the handler will execute
in its own thread. Defaults to False.
disposable -- If set to True, the handler will be
discarded after one use. Defaults to False.
"""
if not name in self.__event_handlers:
self.__event_handlers[name] = []
self.__event_handlers[name].append((pointer, threaded, disposable))
def del_event_handler(self, name, pointer):
"""
Remove a function as a handler for an event.
Arguments:
name -- The name of the event.
pointer -- The function to remove as a handler.
"""
if not name in self.__event_handlers:
return
# Need to keep handlers that do not use
# the given function pointer
def filter_pointers(handler):
return handler[0] != pointer
self.__event_handlers[name] = filter(filter_pointers,
self.__event_handlers[name])
def event_handled(self, name):
"""
Indicates if an event has any associated handlers.
Returns the number of registered handlers.
Arguments:
name -- The name of the event to check.
"""
return len(self.__event_handlers.get(name, []))
def event(self, name, data={}, direct=False):
"""
Manually trigger a custom event.
Arguments:
name -- The name of the event to trigger.
data -- Data that will be passed to each event handler.
Defaults to an empty dictionary.
direct -- Runs the event directly if True, skipping the
event queue. All event handlers will run in the
same thread.
"""
for handler in self.__event_handlers.get(name, []):
if direct:
try:
handler[0](copy.copy(data))
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg % str(handler[0]))
if hasattr(data, 'exception'):
data.exception(e)
else:
self.event_queue.put(('event', handler, copy.copy(data)))
if handler[2]:
# If the handler is disposable, we will go ahead and
# remove it now instead of waiting for it to be
# processed in the queue.
with self.__event_handlers_lock:
try:
h_index = self.__event_handlers[name].index(handler)
self.__event_handlers[name].pop(h_index)
except:
pass
def schedule(self, name, seconds, callback, args=None,
kwargs=None, repeat=False):
"""
Schedule a callback function to execute after a given delay.
Arguments:
name -- A unique name for the scheduled callback.
seconds -- The time in seconds to wait before executing.
callback -- A pointer to the function to execute.
args -- A tuple of arguments to pass to the function.
kwargs -- A dictionary of keyword arguments to pass to
the function.
repeat -- Flag indicating if the scheduled event should
be reset and repeat after executing.
"""
self.scheduler.add(name, seconds, callback, args, kwargs,
repeat, qpointer=self.event_queue)
def incoming_filter(self, xml):
"""
Filter incoming XML objects before they are processed.
Possible uses include remapping namespaces, or correcting elements
from sources with incorrect behavior.
Meant to be overridden.
"""
return xml
def send(self, data, mask=None, timeout=RESPONSE_TIMEOUT):
"""
A wrapper for send_raw for sending stanza objects.
May optionally block until an expected response is received.
Arguments:
data -- The stanza object to send on the stream.
mask -- Deprecated. An XML snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
timeout -- Time in seconds to wait for a response before
continuing. Defaults to RESPONSE_TIMEOUT.
"""
if hasattr(mask, 'xml'):
mask = mask.xml
data = str(data)
if mask is not None:
log.warning("Use of send mask waiters is deprecated.")
wait_for = Waiter("SendWait_%s" % self.new_id(),
MatchXMLMask(mask))
self.register_handler(wait_for)
self.send_raw(data)
if mask is not None:
return wait_for.wait(timeout)
def send_raw(self, data):
"""
Send raw data across the stream.
Arguments:
data -- Any string value.
"""
self.send_queue.put(data)
return True
def send_xml(self, data, mask=None, timeout=RESPONSE_TIMEOUT):
"""
Send an XML object on the stream, and optionally wait
for a response.
Arguments:
data -- The XML object to send on the stream.
mask -- Deprecated. An XML snippet matching the structure
of the expected response. Execution will block
in this thread until the response is received
or a timeout occurs.
timeout -- Time in seconds to wait for a response before
continuing. Defaults to RESPONSE_TIMEOUT.
"""
return self.send(tostring(data), mask, timeout)
def process(self, threaded=True):
"""
Initialize the XML streams and begin processing events.
The number of threads used for processing stream events is determined
by HANDLER_THREADS.
Arguments:
threaded -- If threaded=True then event dispatcher will run
in a separate thread, allowing for the stream to be
used in the background for another application.
Defaults to True.
Event handlers and the send queue will be threaded
regardless of this parameter's value.
"""
self.scheduler.process(threaded=True)
def start_thread(name, target):
self.__thread[name] = threading.Thread(name=name, target=target)
self.__thread[name].start()
for t in range(0, HANDLER_THREADS):
log.debug("Starting HANDLER THREAD")
start_thread('stream_event_handler_%s' % t, self._event_runner)
start_thread('send_thread', self._send_thread)
if threaded:
# Run the XML stream in the background for another application.
start_thread('process', self._process)
else:
self._process()
def _process(self):
"""
Start processing the XML streams.
Processing will continue after any recoverable errors
if reconnections are allowed.
"""
firstrun = True
# The body of this loop will only execute once per connection.
# Additional passes will be made only if an error occurs and
# reconnecting is permitted.
while firstrun or (self.auto_reconnect and not self.stop.isSet()):
firstrun = False
try:
if self.is_client:
self.send_raw(self.stream_header)
# The call to self.__read_xml will block and prevent
# the body of the loop from running until a disconnect
# occurs. After any reconnection, the stream header will
# be resent and processing will resume.
while not self.stop.isSet() and self.__read_xml():
# Ensure the stream header is sent for any
# new connections.
if self.is_client:
self.send_raw(self.stream_header)
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _process")
self.stop.set()
except SystemExit:
log.debug("SystemExit in _process")
self.stop.set()
except Socket.error:
log.exception('Socket Error')
except:
if not self.stop.isSet():
log.exception('Connection error.')
if not self.stop.isSet() and self.auto_reconnect:
self.reconnect()
else:
self.disconnect()
self.event_queue.put(('quit', None, None))
self.scheduler.run = False
def __read_xml(self):
"""
Parse the incoming XML stream, raising stream events for
each received stanza.
"""
depth = 0
root = None
for (event, xml) in ET.iterparse(self.filesocket, (b'end', b'start')):
if event == b'start':
if depth == 0:
# We have received the start of the root element.
root = xml
# Perform any stream initialization actions, such
# as handshakes.
self.stream_end_event.clear()
self.start_stream_handler(root)
depth += 1
if event == b'end':
depth -= 1
if depth == 0:
# The stream's root element has closed,
# terminating the stream.
log.debug("End of stream recieved")
self.stream_end_event.set()
return False
elif depth == 1:
# We only raise events for stanzas that are direct
# children of the root element.
try:
self.__spawn_event(xml)
except RestartStream:
return True
if root:
# Keep the root element empty of children to
# save on memory use.
root.clear()
log.debug("Ending read XML loop")
def _build_stanza(self, xml, default_ns=None):
"""
Create a stanza object from a given XML object.
If a specialized stanza type is not found for the XML, then
a generic StanzaBase stanza will be returned.
Arguments:
xml -- The XML object to convert into a stanza object.
default_ns -- Optional default namespace to use instead of the
stream's current default namespace.
"""
if default_ns is None:
default_ns = self.default_ns
stanza_type = StanzaBase
for stanza_class in self.__root_stanza:
if xml.tag == "{%s}%s" % (default_ns, stanza_class.name):
stanza_type = stanza_class
break
stanza = stanza_type(self, xml)
return stanza
def __spawn_event(self, xml):
"""
Analyze incoming XML stanzas and convert them into stanza
objects if applicable and queue stream events to be processed
by matching handlers.
Arguments:
xml -- The XML stanza to analyze.
"""
log.debug("RECV: %s" % tostring(xml,
xmlns=self.default_ns,
stream=self))
# Apply any preprocessing filters.
xml = self.incoming_filter(xml)
# Convert the raw XML object into a stanza object. If no registered
# stanza type applies, a generic StanzaBase stanza will be used.
stanza_type = StanzaBase
for stanza_class in self.__root_stanza:
if xml.tag == "{%s}%s" % (self.default_ns, stanza_class.name):
stanza_type = stanza_class
break
stanza = stanza_type(self, xml)
# Match the stanza against registered handlers. Handlers marked
# to run "in stream" will be executed immediately; the rest will
# be queued.
unhandled = True
for handler in self.__handlers:
if handler.match(stanza):
stanza_copy = stanza_type(self, copy.deepcopy(xml))
handler.prerun(stanza_copy)
self.event_queue.put(('stanza', handler, stanza_copy))
try:
if handler.check_delete():
self.__handlers.pop(self.__handlers.index(handler))
except:
pass # not thread safe
unhandled = False
# Some stanzas require responses, such as Iq queries. A default
# handler will be executed immediately for this case.
if unhandled:
stanza.unhandled()
def _threaded_event_wrapper(self, func, args):
"""
Capture exceptions for event handlers that run
in individual threads.
Arguments:
func -- The event handler to execute.
args -- Arguments to the event handler.
"""
try:
func(*args)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg % str(func))
if hasattr(args[0], 'exception'):
args[0].exception(e)
def _event_runner(self):
"""
Process the event queue and execute handlers.
The number of event runner threads is controlled by HANDLER_THREADS.
Stream event handlers will all execute in this thread. Custom event
handlers may be spawned in individual threads.
"""
log.debug("Loading event runner")
try:
while not self.stop.isSet():
try:
event = self.event_queue.get(True, timeout=5)
except queue.Empty:
event = None
if event is None:
continue
etype, handler = event[0:2]
args = event[2:]
if etype == 'stanza':
try:
handler.run(args[0])
except Exception as e:
error_msg = 'Error processing stream handler: %s'
log.exception(error_msg % handler.name)
args[0].exception(e)
elif etype == 'schedule':
try:
log.debug(args)
handler(*args[0])
except:
log.exception('Error processing scheduled task')
elif etype == 'event':
func, threaded, disposable = handler
try:
if threaded:
x = threading.Thread(
name="Event_%s" % str(func),
target=self._threaded_event_wrapper,
args=(func, args))
x.start()
else:
func(*args)
except Exception as e:
error_msg = 'Error processing event handler: %s'
log.exception(error_msg % str(func))
if hasattr(args[0], 'exception'):
args[0].exception(e)
elif etype == 'quit':
log.debug("Quitting event runner thread")
return False
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _event_runner")
self.disconnect()
return
except SystemExit:
self.disconnect()
self.event_queue.put(('quit', None, None))
return
def _send_thread(self):
"""
Extract stanzas from the send queue and send them on the stream.
"""
try:
while not self.stop.isSet():
try:
data = self.send_queue.get(True, 1)
except queue.Empty:
continue
log.debug("SEND: %s" % data)
try:
self.socket.send(data.encode('utf-8'))
except:
log.warning("Failed to send %s" % data)
self.disconnect(self.auto_reconnect)
except KeyboardInterrupt:
log.debug("Keyboard Escape Detected in _send_thread")
self.disconnect()
return
except SystemExit:
self.disconnect()
self.event_queue.put(('quit', None, None))
return
| |
"""
Support for Google - Calendar Event Devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/google/
NOTE TO OTHER DEVELOPERS: IF YOU ADD MORE SCOPES TO THE OAUTH THAN JUST
CALENDAR THEN USERS WILL NEED TO DELETE THEIR TOKEN_FILE. THEY WILL LOSE THEIR
REFRESH_TOKEN PIECE WHEN RE-AUTHENTICATING TO ADD MORE API ACCESS
IT'S BEST TO JUST HAVE SEPARATE OAUTH FOR DIFFERENT PIECES OF GOOGLE
"""
import logging
import os
import yaml
import voluptuous as vol
from voluptuous.error import Error as VoluptuousError
import homeassistant.helpers.config_validation as cv
import homeassistant.loader as loader
from homeassistant.setup import setup_component
from homeassistant.helpers import discovery
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.event import track_time_change
from homeassistant.util import convert, dt
REQUIREMENTS = [
'google-api-python-client==1.6.2',
'oauth2client==4.0.0',
]
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'google'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONF_TRACK_NEW = 'track_new_calendar'
CONF_CAL_ID = 'cal_id'
CONF_DEVICE_ID = 'device_id'
CONF_NAME = 'name'
CONF_ENTITIES = 'entities'
CONF_TRACK = 'track'
CONF_SEARCH = 'search'
CONF_OFFSET = 'offset'
DEFAULT_CONF_TRACK_NEW = True
DEFAULT_CONF_OFFSET = '!!'
NOTIFICATION_ID = 'google_calendar_notification'
NOTIFICATION_TITLE = 'Google Calendar Setup'
GROUP_NAME_ALL_CALENDARS = "Google Calendar Sensors"
SERVICE_SCAN_CALENDARS = 'scan_for_calendars'
SERVICE_FOUND_CALENDARS = 'found_calendar'
DATA_INDEX = 'google_calendars'
YAML_DEVICES = '{}_calendars.yaml'.format(DOMAIN)
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
TOKEN_FILE = '.{}.token'.format(DOMAIN)
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_TRACK_NEW): cv.boolean,
})
}, extra=vol.ALLOW_EXTRA)
_SINGLE_CALSEARCH_CONFIG = vol.Schema({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_TRACK): cv.boolean,
vol.Optional(CONF_SEARCH): vol.Any(cv.string, None),
vol.Optional(CONF_OFFSET): cv.string,
})
DEVICE_SCHEMA = vol.Schema({
vol.Required(CONF_CAL_ID): cv.string,
vol.Required(CONF_ENTITIES, None):
vol.All(cv.ensure_list, [_SINGLE_CALSEARCH_CONFIG]),
}, extra=vol.ALLOW_EXTRA)
def do_authentication(hass, config):
"""Notify user of actions and authenticate.
Notify user of user_code and verification_url then poll
until we have an access token.
"""
from oauth2client.client import (
OAuth2WebServerFlow,
OAuth2DeviceCodeError,
FlowExchangeError
)
from oauth2client.file import Storage
oauth = OAuth2WebServerFlow(
config[CONF_CLIENT_ID],
config[CONF_CLIENT_SECRET],
'https://www.googleapis.com/auth/calendar.readonly',
'Home-Assistant.io',
)
persistent_notification = loader.get_component('persistent_notification')
try:
dev_flow = oauth.step1_get_device_and_user_codes()
except OAuth2DeviceCodeError as err:
persistent_notification.create(
hass, 'Error: {}<br />You will need to restart hass after fixing.'
''.format(err),
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
return False
persistent_notification.create(
hass, 'In order to authorize Home-Assistant to view your calendars '
'you must visit: <a href="{}" target="_blank">{}</a> and enter '
'code: {}'.format(dev_flow.verification_url,
dev_flow.verification_url,
dev_flow.user_code),
title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID
)
def step2_exchange(now):
"""Keep trying to validate the user_code until it expires."""
if now >= dt.as_local(dev_flow.user_code_expiry):
persistent_notification.create(
hass, 'Authenication code expired, please restart '
'Home-Assistant and try again',
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID)
listener()
try:
credentials = oauth.step2_exchange(device_flow_info=dev_flow)
except FlowExchangeError:
# not ready yet, call again
return
storage = Storage(hass.config.path(TOKEN_FILE))
storage.put(credentials)
do_setup(hass, config)
listener()
persistent_notification.create(
hass, 'We are all setup now. Check {} for calendars that have '
'been found'.format(YAML_DEVICES),
title=NOTIFICATION_TITLE, notification_id=NOTIFICATION_ID)
listener = track_time_change(hass, step2_exchange,
second=range(0, 60, dev_flow.interval))
return True
def setup(hass, config):
"""Setup the platform."""
if DATA_INDEX not in hass.data:
hass.data[DATA_INDEX] = {}
conf = config.get(DOMAIN, {})
token_file = hass.config.path(TOKEN_FILE)
if not os.path.isfile(token_file):
do_authentication(hass, conf)
else:
do_setup(hass, conf)
return True
def setup_services(hass, track_new_found_calendars, calendar_service):
"""Setup service listeners."""
def _found_calendar(call):
"""Check if we know about a calendar and generate PLATFORM_DISCOVER."""
calendar = get_calendar_info(hass, call.data)
if hass.data[DATA_INDEX].get(calendar[CONF_CAL_ID], None) is not None:
return
hass.data[DATA_INDEX].update({calendar[CONF_CAL_ID]: calendar})
update_config(
hass.config.path(YAML_DEVICES),
hass.data[DATA_INDEX][calendar[CONF_CAL_ID]]
)
discovery.load_platform(hass, 'calendar', DOMAIN,
hass.data[DATA_INDEX][calendar[CONF_CAL_ID]])
hass.services.register(
DOMAIN, SERVICE_FOUND_CALENDARS, _found_calendar,
None, schema=None)
def _scan_for_calendars(service):
"""Scan for new calendars."""
service = calendar_service.get()
cal_list = service.calendarList() # pylint: disable=no-member
calendars = cal_list.list().execute()['items']
for calendar in calendars:
calendar['track'] = track_new_found_calendars
hass.services.call(DOMAIN, SERVICE_FOUND_CALENDARS,
calendar)
hass.services.register(
DOMAIN, SERVICE_SCAN_CALENDARS,
_scan_for_calendars,
None, schema=None)
return True
def do_setup(hass, config):
"""Run the setup after we have everything configured."""
# load calendars the user has configured
hass.data[DATA_INDEX] = load_config(hass.config.path(YAML_DEVICES))
calendar_service = GoogleCalendarService(hass.config.path(TOKEN_FILE))
track_new_found_calendars = convert(config.get(CONF_TRACK_NEW),
bool, DEFAULT_CONF_TRACK_NEW)
setup_services(hass, track_new_found_calendars, calendar_service)
# Ensure component is loaded
setup_component(hass, 'calendar', config)
for calendar in hass.data[DATA_INDEX].values():
discovery.load_platform(hass, 'calendar', DOMAIN, calendar)
# look for any new calendars
hass.services.call(DOMAIN, SERVICE_SCAN_CALENDARS, None)
return True
class GoogleCalendarService(object):
"""Calendar service interface to google."""
def __init__(self, token_file):
"""We just need the token_file."""
self.token_file = token_file
def get(self):
"""Get the calendar service from the storage file token."""
import httplib2
from oauth2client.file import Storage
from googleapiclient import discovery as google_discovery
credentials = Storage(self.token_file).get()
http = credentials.authorize(httplib2.Http())
service = google_discovery.build('calendar', 'v3', http=http,
cache_discovery=False)
return service
def get_calendar_info(hass, calendar):
"""Convert data from Google into DEVICE_SCHEMA."""
calendar_info = DEVICE_SCHEMA({
CONF_CAL_ID: calendar['id'],
CONF_ENTITIES: [{
CONF_TRACK: calendar['track'],
CONF_NAME: calendar['summary'],
CONF_DEVICE_ID: generate_entity_id('{}', calendar['summary'],
hass=hass),
}]
})
return calendar_info
def load_config(path):
"""Load the google_calendar_devices.yaml."""
calendars = {}
try:
with open(path) as file:
data = yaml.load(file)
for calendar in data:
try:
calendars.update({calendar[CONF_CAL_ID]:
DEVICE_SCHEMA(calendar)})
except VoluptuousError as exception:
# keep going
_LOGGER.warning('Calendar Invalid Data: %s', exception)
except FileNotFoundError:
# When YAML file could not be loaded/did not contain a dict
return {}
return calendars
def update_config(path, calendar):
"""Write the google_calendar_devices.yaml."""
with open(path, 'a') as out:
out.write('\n')
yaml.dump([calendar], out, default_flow_style=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.